diff options
Diffstat (limited to 'drivers/gpu/drm')
392 files changed, 16431 insertions, 7737 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 653726588956..7fedbb725e17 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -45,7 +45,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_atombios.o atombios_crtc.o amdgpu_connectors.o \ atom.o amdgpu_fence.o amdgpu_ttm.o amdgpu_object.o amdgpu_gart.o \ amdgpu_encoders.o amdgpu_display.o amdgpu_i2c.o \ - amdgpu_fb.o amdgpu_gem.o amdgpu_ring.o \ + amdgpu_gem.o amdgpu_ring.o \ amdgpu_cs.o amdgpu_bios.o amdgpu_benchmark.o amdgpu_test.o \ atombios_dp.o amdgpu_afmt.o amdgpu_trace_points.o \ atombios_encoders.o amdgpu_sa.o atombios_i2c.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 7077f21f0021..46cf48b3904a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -72,7 +72,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) if (!kfd_initialized) return; - adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, vf); + adev->kfd.dev = kgd2kfd_probe(adev, vf); if (adev->kfd.dev) amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; @@ -233,19 +233,16 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev) return r; } -void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd) +void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (amdgpu_device_should_recover_gpu(adev)) amdgpu_device_gpu_recover(adev, NULL); } -int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, +int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool cp_mqd_gfx9) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; struct amdgpu_bo_param bp; int r; @@ -314,7 +311,7 @@ allocate_mem_reserve_bo_failed: return r; } -void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) +void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj) { struct amdgpu_bo *bo = (struct amdgpu_bo *) mem_obj; @@ -325,10 +322,9 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj) amdgpu_bo_unref(&(bo)); } -int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, +int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, void **mem_obj) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_bo *bo = NULL; struct amdgpu_bo_user *ubo; struct amdgpu_bo_param bp; @@ -355,18 +351,16 @@ int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, return 0; } -void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj) +void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj) { struct amdgpu_bo *bo = (struct amdgpu_bo *)mem_obj; amdgpu_bo_unref(&bo); } -uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, +uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - switch (type) { case KGD_ENGINE_PFP: return adev->gfx.pfp_fw_version; @@ -399,11 +393,9 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, return 0; } -void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, +void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - memset(mem_info, 0, sizeof(*mem_info)); mem_info->local_mem_size_public = adev->gmc.visible_vram_size; @@ -428,19 +420,15 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, mem_info->mem_clk_max = 100; } -uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd) +uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (adev->gfx.funcs->get_gpu_clock_counter) return adev->gfx.funcs->get_gpu_clock_counter(adev); return 0; } -uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd) +uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - /* the sclk is in quantas of 10kHz */ if (amdgpu_sriov_vf(adev)) return adev->clock.default_sclk / 100; @@ -450,9 +438,8 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd) return 100; } -void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) +void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *cu_info) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_cu_info acu_info = adev->gfx.cu_info; memset(cu_info, 0, sizeof(*cu_info)); @@ -473,13 +460,12 @@ void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info) cu_info->lds_size = acu_info.lds_size; } -int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, - struct kgd_dev **dma_buf_kgd, +int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, + struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, uint32_t *flags) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct dma_buf *dma_buf; struct drm_gem_object *obj; struct amdgpu_bo *bo; @@ -507,8 +493,8 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, goto out_put; r = 0; - if (dma_buf_kgd) - *dma_buf_kgd = (struct kgd_dev *)adev; + if (dmabuf_adev) + *dmabuf_adev = adev; if (bo_size) *bo_size = amdgpu_bo_size(bo); if (metadata_buffer) @@ -528,32 +514,18 @@ out_put: return r; } -uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd) +uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); return amdgpu_vram_mgr_usage(vram_man); } -uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->gmc.xgmi.hive_id; -} - -uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->unique_id; -} - -uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src) +uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, + struct amdgpu_device *src) { - struct amdgpu_device *peer_adev = (struct amdgpu_device *)src; - struct amdgpu_device *adev = (struct amdgpu_device *)dst; + struct amdgpu_device *peer_adev = src; + struct amdgpu_device *adev = dst; int ret = amdgpu_xgmi_get_hops_count(adev, peer_adev); if (ret < 0) { @@ -565,16 +537,18 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s return (uint8_t)ret; } -int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min) +int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, + struct amdgpu_device *src, + bool is_min) { - struct amdgpu_device *adev = (struct amdgpu_device *)dst, *peer_adev; + struct amdgpu_device *adev = dst, *peer_adev; int num_links; if (adev->asic_type != CHIP_ALDEBARAN) return 0; if (src) - peer_adev = (struct amdgpu_device *)src; + peer_adev = src; /* num links returns 0 for indirect peers since indirect route is unknown. */ num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev); @@ -589,9 +563,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev return (num_links * 16 * 25000)/BITS_PER_BYTE; } -int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min) +int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min) { - struct amdgpu_device *adev = (struct amdgpu_device *)dev; int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) : fls(adev->pm.pcie_mlw_mask)) - 1; int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask & @@ -647,39 +620,11 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min) return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE; } -uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->rmmio_remap.bus_addr; -} - -uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->gds.gws_size; -} - -uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->rev_id; -} - -int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - - return adev->gmc.noretry; -} - -int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, +int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, + enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_job *job; struct amdgpu_ib *ib; struct amdgpu_ring *ring; @@ -730,10 +675,8 @@ err: return ret; } -void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) +void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_COMPUTE, !idle); @@ -747,10 +690,9 @@ bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) return false; } -int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid) +int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, + uint16_t vmid) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - if (adev->family == AMDGPU_FAMILY_AI) { int i; @@ -763,10 +705,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid) return 0; } -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, - enum TLB_FLUSH_TYPE flush_type) +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, enum TLB_FLUSH_TYPE flush_type) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; bool all_hub = false; if (adev->family == AMDGPU_FAMILY_AI) @@ -775,21 +716,18 @@ int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, return amdgpu_gmc_flush_gpu_tlb_pasid(adev, pasid, flush_type, all_hub); } -bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd) +bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - return adev->have_atomics_support; } -void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd) +void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct ras_err_data err_data = {0, 0, 0, NULL}; /* CPU MCA will handle page retirement if connected_to_cpu is 1 */ if (!adev->gmc.xgmi.connected_to_cpu) amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL); else - amdgpu_amdkfd_gpu_reset(kgd); + amdgpu_amdkfd_gpu_reset(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index a15a4787c7ee..fcbc8a9c9e06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -144,14 +144,16 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); -int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, +int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, + enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len); -void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); -bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); -int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid); -int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid, - enum TLB_FLUSH_TYPE flush_type); +void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle); +bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); +int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, + uint16_t vmid); +int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, + uint16_t pasid, enum TLB_FLUSH_TYPE flush_type); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); @@ -159,7 +161,7 @@ int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); -void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); +void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev); int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int queue_bit); @@ -198,37 +200,36 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) } #endif /* Shared API */ -int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, +int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, void **mem_obj, uint64_t *gpu_addr, void **cpu_ptr, bool mqd_gfx9); -void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); -int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj); -void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj); +void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj); +int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, + void **mem_obj); +void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj); int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem); int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); -uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd, +uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type); -void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd, +void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info); -uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd); +uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); -uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd); -void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info); -int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd, - struct kgd_dev **dmabuf_kgd, +uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); +void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, + struct kfd_cu_info *cu_info); +int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, + struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, uint32_t *flags); -uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd); -uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd); -uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd); -uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); -uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); -uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd); -int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd); -uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); -int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min); -int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min); +uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev); +uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, + struct amdgpu_device *src); +int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, + struct amdgpu_device *src, + bool is_min); +int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when @@ -258,45 +259,54 @@ int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min); (&((struct amdgpu_fpriv *) \ ((struct drm_file *)(drm_priv))->driver_priv)->vm) -int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, struct file *filp, u32 pasid, void **process_info, struct dma_fence **ef); -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv); +void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, + void *drm_priv); uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - struct kgd_dev *kgd, uint64_t va, uint64_t size, + struct amdgpu_device *adev, uint64_t va, uint64_t size, void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags); int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, uint64_t *size); int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed); + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, + bool *table_freed); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( - struct kgd_dev *kgd, struct kgd_mem *mem, bool intr); -int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, + struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); +int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, struct kgd_mem *mem, void **kptr, uint64_t *size); -void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem); +void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev, + struct kgd_mem *mem); int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, struct dma_fence **ef); -int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, struct kfd_vm_fault_info *info); -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, struct dma_buf *dmabuf, uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset); -int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, +int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); -void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd); +void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev); #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm); + +/** + * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released + * + * Allows KFD to release its resources associated with the GEM object. + */ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo); void amdgpu_amdkfd_reserve_system_mem(uint64_t size); #else @@ -324,7 +334,7 @@ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, #if IS_ENABLED(CONFIG_HSA_AMD) int kgd2kfd_init(void); void kgd2kfd_exit(void); -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf); +struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources); @@ -348,7 +358,7 @@ static inline void kgd2kfd_exit(void) } static inline -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf) +struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) { return NULL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 5a7f680bcb3f..abe93b3ff765 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -57,11 +57,6 @@ (*dump)[i++][1] = RREG32(addr); \ } while (0) -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) { return (struct v9_sdma_mqd *)mqd; @@ -123,10 +118,9 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, return sdma_rlc_reg_offset; } -int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -193,11 +187,10 @@ int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, +int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -225,9 +218,9 @@ int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev, + void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -244,10 +237,9 @@ bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h index ce08131b7b5f..756c1a5679c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h @@ -20,11 +20,12 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm); -int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, +int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); -bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev, + void *mqd); +int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 960acf68150a..7b7f4b2764c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -39,37 +39,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, mec, pipe, queue, vmid); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } static uint64_t get_queue_mask(struct amdgpu_device *adev, @@ -81,33 +70,29 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(kgd); + unlock_srbm(adev); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -150,22 +135,21 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, * but still works */ -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -218,12 +202,11 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v10_sdma_mqd *)mqd; } -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_compute_mqd *m; uint32_t *mqd_hqd; uint32_t reg, hqd_base, data; @@ -231,7 +214,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; @@ -296,16 +279,15 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; struct v10_compute_mqd *m; uint32_t mec, pipe; @@ -313,7 +295,7 @@ static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -349,16 +331,15 @@ static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, out_unlock: spin_unlock(&adev->gfx.kiq.ring_lock); - release_queue(kgd); + release_queue(adev); return r; } -static int kgd_hqd_dump(struct kgd_dev *kgd, +static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -372,13 +353,13 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -386,10 +367,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -456,11 +436,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -488,15 +467,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -506,13 +485,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -529,12 +507,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); enum hqd_dequeue_request_type type; unsigned long end_jiffies; uint32_t temp; @@ -548,7 +525,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, int retry; #endif - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); @@ -633,20 +610,19 @@ loop: break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -683,11 +659,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); @@ -696,12 +671,12 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int kgd_address_watch_disable(struct kgd_dev *kgd) +static int kgd_address_watch_disable(struct amdgpu_device *adev) { return 0; } -static int kgd_address_watch_execute(struct kgd_dev *kgd, +static int kgd_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -710,11 +685,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd, return 0; } -static int kgd_wave_control_execute(struct kgd_dev *kgd, +static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -735,18 +709,16 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, return 0; } -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, +static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", vmid); @@ -757,12 +729,10 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); } -static void program_trap_handler_settings(struct kgd_dev *kgd, +static void program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); /* * Program TBA registers @@ -781,7 +751,7 @@ static void program_trap_handler_settings(struct kgd_dev *kgd, WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), upper_32_bits(tma_addr >> 8)); - unlock_srbm(kgd); + unlock_srbm(adev); } const struct kfd2kgd_calls gfx_v10_kfd2kgd = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index dac0d751d5af..1f37d3574001 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -38,37 +38,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->srbm_mutex); nv_grbm_select(adev, mec, pipe, queue, vmid); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } static uint64_t get_queue_mask(struct amdgpu_device *adev, @@ -80,34 +69,30 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid, +static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config); WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(kgd); + unlock_srbm(adev); } /* ATC is defeatured on Sienna_Cichlid */ -static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid, +static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; /* Mapping vmid to pasid also for IH block */ @@ -118,22 +103,21 @@ static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid, return 0; } -static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id) +static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -188,12 +172,11 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v10_sdma_mqd *)mqd; } -static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_compute_mqd *m; uint32_t *mqd_hqd; uint32_t reg, hqd_base, data; @@ -201,7 +184,7 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HIQ is set during driver init period with vmid set to 0*/ if (m->cp_hqd_vmid == 0) { @@ -281,16 +264,15 @@ static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, +static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; struct v10_compute_mqd *m; uint32_t mec, pipe; @@ -298,7 +280,7 @@ static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -334,16 +316,15 @@ static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, out_unlock: spin_unlock(&adev->gfx.kiq.ring_lock); - release_queue(kgd); + release_queue(adev); return r; } -static int hqd_dump_v10_3(struct kgd_dev *kgd, +static int hqd_dump_v10_3(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -357,13 +338,13 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -371,10 +352,9 @@ static int hqd_dump_v10_3(struct kgd_dev *kgd, return 0; } -static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, +static int hqd_sdma_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -441,11 +421,10 @@ static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, return 0; } -static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, +static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -473,15 +452,15 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, return 0; } -static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -491,13 +470,13 @@ static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) +static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev, + void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -514,18 +493,17 @@ static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) return false; } -static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, +static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); enum hqd_dequeue_request_type type; unsigned long end_jiffies; uint32_t temp; struct v10_compute_mqd *m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); @@ -555,20 +533,19 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, if (time_after(jiffies, end_jiffies)) { pr_err("cp queue pipe %d queue %d preemption failed\n", pipe_id, queue_id); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, +static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -606,12 +583,12 @@ static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, } -static int address_watch_disable_v10_3(struct kgd_dev *kgd) +static int address_watch_disable_v10_3(struct amdgpu_device *adev) { return 0; } -static int address_watch_execute_v10_3(struct kgd_dev *kgd, +static int address_watch_execute_v10_3(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -620,11 +597,10 @@ static int address_watch_execute_v10_3(struct kgd_dev *kgd, return 0; } -static int wave_control_execute_v10_3(struct kgd_dev *kgd, +static int wave_control_execute_v10_3(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -645,28 +621,24 @@ static int wave_control_execute_v10_3(struct kgd_dev *kgd, return 0; } -static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd, +static uint32_t address_watch_get_offset_v10_3(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* SDMA is on gfxhub as well for Navi1* series */ adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base); } -static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd, +static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); /* * Program TBA registers @@ -685,15 +657,14 @@ static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd, WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), upper_32_bits(tma_addr >> 8)); - unlock_srbm(kgd); + unlock_srbm(adev); } #if 0 -uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd, +uint32_t enable_debug_trap_v10_3(struct amdgpu_device *adev, uint32_t trap_debug_wave_launch_mode, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; uint32_t orig_wave_cntl_value; uint32_t orig_stall_vmid; @@ -720,10 +691,8 @@ uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd, return 0; } -uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd) +uint32_t disable_debug_trap_v10_3(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->grbm_idx_mutex); WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0); @@ -733,11 +702,10 @@ uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd) return 0; } -uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd, +uint32_t set_wave_launch_trap_override_v10_3(struct amdgpu_device *adev, uint32_t trap_override, uint32_t trap_mask) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -762,11 +730,10 @@ uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd, return 0; } -uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd, +uint32_t set_wave_launch_mode_v10_3(struct amdgpu_device *adev, uint8_t wave_launch_mode, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; bool is_stall_mode; bool is_mode_set; @@ -805,16 +772,14 @@ uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd, * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. * deq_retry_wait_time -- Wait Count for Global Wave Syncs. */ -void get_iq_wait_times_v10_3(struct kgd_dev *kgd, +void get_iq_wait_times_v10_3(struct amdgpu_device *adev, uint32_t *wait_times) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); } -void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd, +void build_grace_period_packet_info_v10_3(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index b91d27e39bad..36528dad7684 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -82,68 +82,54 @@ union TCP_WATCH_CNTL_BITS { float f32All; }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); mutex_lock(&adev->srbm_mutex); WREG32(mmSRBM_GFX_CNTL, value); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - WREG32(mmSRBM_GFX_CNTL, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_MEM_CONFIG, sh_mem_config); WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); WREG32(mmSH_MEM_BASES, sh_mem_bases); - unlock_srbm(kgd); + unlock_srbm(adev); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -165,21 +151,20 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, return 0; } -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -207,12 +192,11 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) return (struct cik_sdma_rlc_registers *)mqd; } -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_mqd *m; uint32_t *mqd_hqd; uint32_t reg, wptr_val, data; @@ -220,7 +204,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */ mqd_hqd = &m->cp_mqd_base_addr_lo; @@ -239,25 +223,24 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ - release_queue(kgd); + release_queue(adev); valid_wptr = read_user_wptr(mm, wptr, wptr_val); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (valid_wptr) WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32(mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_dump(struct kgd_dev *kgd, +static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS (35+4) #define DUMP_REG(addr) do { \ @@ -271,7 +254,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1); @@ -281,7 +264,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -289,10 +272,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; unsigned long end_jiffies; uint32_t sdma_rlc_reg_offset; @@ -345,11 +327,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET + queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; uint32_t i = 0, reg; @@ -372,15 +353,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32(mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -390,13 +371,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32(mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -412,12 +392,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; enum hqd_dequeue_request_type type; unsigned long flags, end_jiffies; @@ -426,7 +405,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0); switch (reset_type) { @@ -504,20 +483,19 @@ loop: break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -551,9 +529,8 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_address_watch_disable(struct kgd_dev *kgd) +static int kgd_address_watch_disable(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); union TCP_WATCH_CNTL_BITS cntl; unsigned int i; @@ -571,13 +548,12 @@ static int kgd_address_watch_disable(struct kgd_dev *kgd) return 0; } -static int kgd_address_watch_execute(struct kgd_dev *kgd, +static int kgd_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, uint32_t addr_lo) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); union TCP_WATCH_CNTL_BITS cntl; cntl.u32All = cntl_val; @@ -602,11 +578,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd, return 0; } -static int kgd_wave_control_execute(struct kgd_dev *kgd, +static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data; mutex_lock(&adev->grbm_idx_mutex); @@ -627,18 +602,17 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, return 0; } -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, +static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; } -static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; @@ -646,21 +620,17 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static void set_scratch_backing_va(struct kgd_dev *kgd, +static void set_scratch_backing_va(struct amdgpu_device *adev, uint64_t va, uint32_t vmid) { - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); - unlock_srbm(kgd); + unlock_srbm(adev); } -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID\n"); return; @@ -676,10 +646,8 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, * @vmid: vmid pointer * read vmid from register (CIK). */ -static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd) +static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 5ce0ce704a21..52832cd69a93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -39,68 +39,54 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue); mutex_lock(&adev->srbm_mutex); WREG32(mmSRBM_GFX_CNTL, value); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - WREG32(mmSRBM_GFX_CNTL, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_MEM_CONFIG, sh_mem_config); WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base); WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit); WREG32(mmSH_MEM_BASES, sh_mem_bases); - unlock_srbm(kgd); + unlock_srbm(adev); } -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -123,21 +109,20 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, return 0; } -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -165,12 +150,11 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd) return (struct vi_sdma_mqd *)mqd; } -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_mqd *m; uint32_t *mqd_hqd; uint32_t reg, wptr_val, data; @@ -178,7 +162,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HIQ is set during driver init period with vmid set to 0*/ if (m->cp_hqd_vmid == 0) { @@ -206,7 +190,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, * on ASICs that do not support context-save. * EOP writes/reads can start anywhere in the ring. */ - if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) { + if (adev->asic_type != CHIP_TONGA) { WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); @@ -226,25 +210,24 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, * release srbm_mutex to avoid circular dependency between * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex. */ - release_queue(kgd); + release_queue(adev); valid_wptr = read_user_wptr(mm, wptr, wptr_val); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (valid_wptr) WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32(mmCP_HQD_ACTIVE, data); - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_dump(struct kgd_dev *kgd, +static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS (54+4) #define DUMP_REG(addr) do { \ @@ -258,7 +241,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0); DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1); @@ -268,7 +251,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -276,10 +259,9 @@ static int kgd_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; unsigned long end_jiffies; uint32_t sdma_rlc_reg_offset; @@ -331,11 +313,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET + queue_id * KFD_VI_SDMA_QUEUE_OFFSET; uint32_t i = 0, reg; @@ -367,15 +348,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32(mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); @@ -385,13 +366,12 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32(mmCP_HQD_PQ_BASE_HI)) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -407,12 +387,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t temp; enum hqd_dequeue_request_type type; unsigned long flags, end_jiffies; @@ -422,7 +401,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0); @@ -502,20 +481,19 @@ loop: break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -549,11 +527,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; @@ -561,12 +538,12 @@ static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -static int kgd_address_watch_disable(struct kgd_dev *kgd) +static int kgd_address_watch_disable(struct amdgpu_device *adev) { return 0; } -static int kgd_address_watch_execute(struct kgd_dev *kgd, +static int kgd_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -575,11 +552,10 @@ static int kgd_address_watch_execute(struct kgd_dev *kgd, return 0; } -static int kgd_wave_control_execute(struct kgd_dev *kgd, +static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -600,28 +576,24 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd, return 0; } -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, +static uint32_t kgd_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -static void set_scratch_backing_va(struct kgd_dev *kgd, +static void set_scratch_backing_va(struct amdgpu_device *adev, uint64_t va, uint32_t vmid) { - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va); - unlock_srbm(kgd); + unlock_srbm(adev); } -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base) +static void set_vm_context_page_table_base(struct amdgpu_device *adev, + uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID\n"); return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index bcc1cbeb8799..ddfe7aff919d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -46,37 +46,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - -static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe, +static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, uint32_t queue, uint32_t vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - mutex_lock(&adev->srbm_mutex); soc15_grbm_select(adev, mec, pipe, queue, vmid); } -static void unlock_srbm(struct kgd_dev *kgd) +static void unlock_srbm(struct amdgpu_device *adev) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - soc15_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); } -static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id, +static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, queue_id, 0); + lock_srbm(adev, mec, pipe, queue_id, 0); } static uint64_t get_queue_mask(struct amdgpu_device *adev, @@ -88,33 +77,29 @@ static uint64_t get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -static void release_queue(struct kgd_dev *kgd) +static void release_queue(struct amdgpu_device *adev) { - unlock_srbm(kgd); + unlock_srbm(adev); } -void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(kgd); + unlock_srbm(adev); } -int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because @@ -171,22 +156,21 @@ int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, * but still works */ -int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) +int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t mec; uint32_t pipe; mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(kgd, mec, pipe, 0, 0); + lock_srbm(adev, mec, pipe, 0, 0); WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL), CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(kgd); + unlock_srbm(adev); return 0; } @@ -233,19 +217,18 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v9_sdma_mqd *)mqd; } -int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) +int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + uint32_t __user *wptr, uint32_t wptr_shift, + uint32_t wptr_mask, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_mqd *m; uint32_t *mqd_hqd; uint32_t reg, hqd_base, data; m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; @@ -308,16 +291,15 @@ int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); - release_queue(kgd); + release_queue(adev); return 0; } -int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, +int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; struct v9_mqd *m; uint32_t mec, pipe; @@ -325,7 +307,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, m = get_mqd(mqd); - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -361,16 +343,15 @@ int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, out_unlock: spin_unlock(&adev->gfx.kiq.ring_lock); - release_queue(kgd); + release_queue(adev); return r; } -int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, +int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t i = 0, reg; #define HQD_N_REGS 56 #define DUMP_REG(addr) do { \ @@ -384,13 +365,13 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, if (*dump == NULL) return -ENOMEM; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - release_queue(kgd); + release_queue(adev); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -398,10 +379,9 @@ int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, return 0; } -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; @@ -468,11 +448,10 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, return 0; } -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, +static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, engine_id, queue_id); uint32_t i = 0, reg; @@ -500,15 +479,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return 0; } -bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) +bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t act; bool retval = false; uint32_t low, high; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE)); if (act) { low = lower_32_bits(queue_address >> 8); @@ -518,13 +497,12 @@ bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI))) retval = true; } - release_queue(kgd); + release_queue(adev); return retval; } -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) +static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; @@ -541,12 +519,11 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) return false; } -int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, +int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); enum hqd_dequeue_request_type type; unsigned long end_jiffies; uint32_t temp; @@ -555,7 +532,7 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - acquire_queue(kgd, pipe_id, queue_id); + acquire_queue(adev, pipe_id, queue_id); if (m->cp_hqd_vmid == 0) WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); @@ -584,20 +561,19 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - release_queue(kgd); + release_queue(adev); return -ETIME; } usleep_range(500, 1000); } - release_queue(kgd); + release_queue(adev); return 0; } -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, +static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, unsigned int utimeout) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; uint32_t sdma_rlc_reg_offset; uint32_t temp; @@ -634,11 +610,10 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, return 0; } -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid) { uint32_t value; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); @@ -647,12 +622,12 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } -int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd) +int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev) { return 0; } -int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, @@ -661,11 +636,10 @@ int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, return 0; } -int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); @@ -686,18 +660,16 @@ int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, return 0; } -uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, +uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset) { return 0; } -void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, +void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", vmid); @@ -804,7 +776,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, * * Reading registers referenced above involves programming GRBM appropriately */ -void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, +void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, int *pasid_wave_cnt, int *max_waves_per_cu) { int qidx; @@ -818,10 +790,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, int pasid_tmp; int max_queue_cnt; int vmid_wave_cnt = 0; - struct amdgpu_device *adev; DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); - adev = get_amdgpu_device(kgd); lock_spi_csq_mutexes(adev); soc15_grbm_select(adev, 1, 0, 0, 0); @@ -882,12 +852,10 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, adev->gfx.cu_info.max_waves_per_simd; } -void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, +void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); - - lock_srbm(kgd, 0, 0, 0, vmid); + lock_srbm(adev, 0, 0, 0, vmid); /* * Program TBA registers @@ -905,7 +873,7 @@ void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI), upper_32_bits(tma_addr >> 8)); - unlock_srbm(kgd); + unlock_srbm(adev); } const struct kfd2kgd_calls gfx_v9_kfd2kgd = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index c63591106879..24be49df26fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -22,48 +22,49 @@ -void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, +void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); -int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, +int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid); -int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, +int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id); +int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm); -int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, +int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off); -int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, +int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); -bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); -int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, +bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, + uint32_t queue_id); +int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, uint32_t queue_id); -int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd); -int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_address_watch_disable(struct amdgpu_device *adev); +int kgd_gfx_v9_address_watch_execute(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, uint32_t addr_lo); -int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd, +int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd); -uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, +uint32_t kgd_gfx_v9_address_watch_get_offset(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset); -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); -void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, +void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base); -void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid, +void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, int *pasid_wave_cnt, int *max_waves_per_cu); -void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd, +void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 71acd577803e..5df89a295177 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -60,12 +60,6 @@ static const char * const domain_bit_to_string[] = { static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work); - -static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) -{ - return (struct amdgpu_device *)kgd; -} - static bool kfd_mem_is_attached(struct amdgpu_vm *avm, struct kgd_mem *mem) { @@ -126,8 +120,19 @@ static size_t amdgpu_amdkfd_acc_size(uint64_t size) PAGE_ALIGN(size); } +/** + * @amdgpu_amdkfd_reserve_mem_limit() - Decrease available memory by size + * of buffer including any reserved for control structures + * + * @adev: Device to which allocated BO belongs to + * @size: Size of buffer, in bytes, encapsulated by B0. This should be + * equivalent to amdgpu_bo_size(BO) + * @alloc_flag: Flag used in allocating a BO as noted above + * + * Return: returns -ENOMEM in case of error, ZERO otherwise + */ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 domain, bool sg) + uint64_t size, u32 alloc_flag) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); @@ -137,20 +142,24 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, acc_size = amdgpu_amdkfd_acc_size(size); vram_needed = 0; - if (domain == AMDGPU_GEM_DOMAIN_GTT) { - /* TTM GTT memory */ + if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { system_mem_needed = acc_size + size; ttm_mem_needed = acc_size + size; - } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { - /* Userptr */ + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { + system_mem_needed = acc_size; + ttm_mem_needed = acc_size; + vram_needed = size; + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { system_mem_needed = acc_size + size; ttm_mem_needed = acc_size; - } else { - /* VRAM and SG */ + } else if (alloc_flag & + (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { system_mem_needed = acc_size; ttm_mem_needed = acc_size; - if (domain == AMDGPU_GEM_DOMAIN_VRAM) - vram_needed = size; + } else { + pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); + return -ENOMEM; } spin_lock(&kfd_mem_limit.mem_limit_lock); @@ -166,64 +175,72 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, (adev->kfd.vram_used + vram_needed > adev->gmc.real_vram_size - reserved_for_pt)) { ret = -ENOMEM; - } else { - kfd_mem_limit.system_mem_used += system_mem_needed; - kfd_mem_limit.ttm_mem_used += ttm_mem_needed; - adev->kfd.vram_used += vram_needed; + goto release; } + /* Update memory accounting by decreasing available system + * memory, TTM memory and GPU memory as computed above + */ + adev->kfd.vram_used += vram_needed; + kfd_mem_limit.system_mem_used += system_mem_needed; + kfd_mem_limit.ttm_mem_used += ttm_mem_needed; + +release: spin_unlock(&kfd_mem_limit.mem_limit_lock); return ret; } static void unreserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 domain, bool sg) + uint64_t size, u32 alloc_flag) { size_t acc_size; acc_size = amdgpu_amdkfd_acc_size(size); spin_lock(&kfd_mem_limit.mem_limit_lock); - if (domain == AMDGPU_GEM_DOMAIN_GTT) { + + if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { kfd_mem_limit.system_mem_used -= (acc_size + size); kfd_mem_limit.ttm_mem_used -= (acc_size + size); - } else if (domain == AMDGPU_GEM_DOMAIN_CPU && !sg) { + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { + kfd_mem_limit.system_mem_used -= acc_size; + kfd_mem_limit.ttm_mem_used -= acc_size; + adev->kfd.vram_used -= size; + } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { kfd_mem_limit.system_mem_used -= (acc_size + size); kfd_mem_limit.ttm_mem_used -= acc_size; - } else { + } else if (alloc_flag & + (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { kfd_mem_limit.system_mem_used -= acc_size; kfd_mem_limit.ttm_mem_used -= acc_size; - if (domain == AMDGPU_GEM_DOMAIN_VRAM) { - adev->kfd.vram_used -= size; - WARN_ONCE(adev->kfd.vram_used < 0, - "kfd VRAM memory accounting unbalanced"); - } + } else { + pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); + goto release; } - WARN_ONCE(kfd_mem_limit.system_mem_used < 0, - "kfd system memory accounting unbalanced"); + + WARN_ONCE(adev->kfd.vram_used < 0, + "KFD VRAM memory accounting unbalanced"); WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, - "kfd TTM memory accounting unbalanced"); + "KFD TTM memory accounting unbalanced"); + WARN_ONCE(kfd_mem_limit.system_mem_used < 0, + "KFD system memory accounting unbalanced"); +release: spin_unlock(&kfd_mem_limit.mem_limit_lock); } void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - u32 domain = bo->preferred_domains; - bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); - - if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) { - domain = AMDGPU_GEM_DOMAIN_CPU; - sg = false; - } + u32 alloc_flags = bo->kfd_bo->alloc_flags; + u64 size = amdgpu_bo_size(bo); - unreserve_mem_limit(adev, amdgpu_bo_size(bo), domain, sg); + unreserve_mem_limit(adev, size, alloc_flags); kfree(bo->kfd_bo); } - /* amdgpu_amdkfd_remove_eviction_fence - Removes eviction fence from BO's * reservation object. * @@ -646,12 +663,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, if (IS_ERR(gobj)) return PTR_ERR(gobj); - /* Import takes an extra reference on the dmabuf. Drop it now to - * avoid leaking it. We only need the one reference in - * kgd_mem->dmabuf. - */ - dma_buf_put(mem->dmabuf); - *bo = gem_to_amdgpu_bo(gobj); (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE; (*bo)->parent = amdgpu_bo_ref(mem->bo); @@ -1278,12 +1289,60 @@ create_evict_fence_fail: return ret; } -int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, +/** + * amdgpu_amdkfd_gpuvm_pin_bo() - Pins a BO using following criteria + * @bo: Handle of buffer object being pinned + * @domain: Domain into which BO should be pinned + * + * - USERPTR BOs are UNPINNABLE and will return error + * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their + * PIN count incremented. It is valid to PIN a BO multiple times + * + * Return: ZERO if successful in pinning, Non-Zero in case of error. + */ +static int amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain) +{ + int ret = 0; + + ret = amdgpu_bo_reserve(bo, false); + if (unlikely(ret)) + return ret; + + ret = amdgpu_bo_pin_restricted(bo, domain, 0, 0); + if (ret) + pr_err("Error in Pinning BO to domain: %d\n", domain); + + amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false); + amdgpu_bo_unreserve(bo); + + return ret; +} + +/** + * amdgpu_amdkfd_gpuvm_unpin_bo() - Unpins BO using following criteria + * @bo: Handle of buffer object being unpinned + * + * - Is a illegal request for USERPTR BOs and is ignored + * - All other BO types (GTT, VRAM, MMIO and DOORBELL) will have their + * PIN count decremented. Calls to UNPIN must balance calls to PIN + */ +static void amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo) +{ + int ret = 0; + + ret = amdgpu_bo_reserve(bo, false); + if (unlikely(ret)) + return; + + amdgpu_bo_unpin(bo); + amdgpu_bo_unreserve(bo); +} + +int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, struct file *filp, u32 pasid, void **process_info, struct dma_fence **ef) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_fpriv *drv_priv; struct amdgpu_vm *avm; int ret; @@ -1359,12 +1418,12 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, } } -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv) +void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, + void *drm_priv) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm; - if (WARN_ON(!kgd || !drm_priv)) + if (WARN_ON(!adev || !drm_priv)) return; avm = drm_priv_to_vm(drm_priv); @@ -1392,17 +1451,16 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) } int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - struct kgd_dev *kgd, uint64_t va, uint64_t size, + struct amdgpu_device *adev, uint64_t va, uint64_t size, void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); enum ttm_bo_type bo_type = ttm_bo_type_device; struct sg_table *sg = NULL; uint64_t user_addr = 0; struct amdgpu_bo *bo; - struct drm_gem_object *gobj; + struct drm_gem_object *gobj = NULL; u32 domain, alloc_domain; u64 alloc_flags; int ret; @@ -1460,7 +1518,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( amdgpu_sync_create(&(*mem)->sync); - ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg); + ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, flags); if (ret) { pr_debug("Insufficient memory\n"); goto err_reserve_limit; @@ -1506,20 +1564,34 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( if (offset) *offset = amdgpu_bo_mmap_offset(bo); + if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { + ret = amdgpu_amdkfd_gpuvm_pin_bo(bo, AMDGPU_GEM_DOMAIN_GTT); + if (ret) { + pr_err("Pinning MMIO/DOORBELL BO during ALLOC FAILED\n"); + goto err_pin_bo; + } + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; + } + return 0; allocate_init_user_pages_failed: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); +err_pin_bo: drm_vma_node_revoke(&gobj->vma_node, drm_priv); err_node_allow: - drm_gem_object_put(gobj); /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: - unreserve_mem_limit(adev, size, alloc_domain, !!sg); + unreserve_mem_limit(adev, size, flags); err_reserve_limit: mutex_destroy(&(*mem)->lock); - kfree(*mem); + if (gobj) + drm_gem_object_put(gobj); + else + kfree(*mem); err: if (sg) { sg_free_table(sg); @@ -1529,7 +1601,7 @@ err: } int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, uint64_t *size) { struct amdkfd_process_info *process_info = mem->process_info; @@ -1542,6 +1614,14 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( bool is_imported = false; mutex_lock(&mem->lock); + + /* Unpin MMIO/DOORBELL BO's that were pinnned during allocation */ + if (mem->alloc_flags & + (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL | + KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) { + amdgpu_amdkfd_gpuvm_unpin_bo(mem->bo); + } + mapped_to_gpu_memory = mem->mapped_to_gpu_memory; is_imported = mem->is_imported; mutex_unlock(&mem->lock); @@ -1621,10 +1701,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, bool *table_freed) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); int ret; struct amdgpu_bo *bo; @@ -1751,7 +1830,7 @@ out: } int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) + struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdkfd_process_info *process_info = avm->process_info; @@ -1812,7 +1891,7 @@ out: } int amdgpu_amdkfd_gpuvm_sync_memory( - struct kgd_dev *kgd, struct kgd_mem *mem, bool intr) + struct amdgpu_device *adev, struct kgd_mem *mem, bool intr) { struct amdgpu_sync sync; int ret; @@ -1828,7 +1907,7 @@ int amdgpu_amdkfd_gpuvm_sync_memory( return ret; } -int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, struct kgd_mem *mem, void **kptr, uint64_t *size) { int ret; @@ -1884,7 +1963,8 @@ bo_reserve_failed: return ret; } -void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem) +void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct amdgpu_device *adev, + struct kgd_mem *mem) { struct amdgpu_bo *bo = mem->bo; @@ -1894,12 +1974,9 @@ void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kg amdgpu_bo_unreserve(bo); } -int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, - struct kfd_vm_fault_info *mem) +int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, + struct kfd_vm_fault_info *mem) { - struct amdgpu_device *adev; - - adev = (struct amdgpu_device *)kgd; if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) { *mem = *adev->gmc.vm_fault_info; mb(); @@ -1908,13 +1985,12 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, return 0; } -int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, +int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, struct dma_buf *dma_buf, uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct drm_gem_object *obj; struct amdgpu_bo *bo; @@ -2541,11 +2617,9 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem) } /* Returns GPU-specific tiling mode information */ -int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, +int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config) { - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; - config->gb_addr_config = adev->gfx.config.gb_addr_config; config->tile_config_ptr = adev->gfx.config.tile_mode_array; config->num_tile_configs = diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 96b7bb13a2dd..12a6b1c99c93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, WREG32(adev->bios_scratch_reg_offset + 3, tmp); } +void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev, + u32 backlight_level) +{ + u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2); + + tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK; + tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) & + ATOM_S2_CURRENT_BL_LEVEL_MASK; + + WREG32(adev->bios_scratch_reg_offset + 2, tmp); +} + bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev) { u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 8cc0222dba19..27e74b1fc260 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev); void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock); void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev, bool hung); +void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev, + u32 backlight_level); bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index b9c11c2b2885..0de66f59adb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -827,6 +827,7 @@ static int amdgpu_connector_vga_get_modes(struct drm_connector *connector) amdgpu_connector_get_edid(connector); ret = amdgpu_connector_ddc_get_modes(connector); + amdgpu_get_native_mode(connector); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 5625f7736e37..90d22a376632 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3509,6 +3509,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->rmmio_size = pci_resource_len(adev->pdev, 2); } + for (i = 0; i < AMD_IP_BLOCK_TYPE_NUM; i++) + atomic_set(&adev->pm.pwr_state[i], POWER_STATE_UNKNOWN); + adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); if (adev->rmmio == NULL) { return -ENOMEM; @@ -3684,8 +3687,6 @@ fence_driver_init: /* Get a log2 for easy divisions. */ adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); - amdgpu_fbdev_init(adev); - r = amdgpu_pm_sysfs_init(adev); if (r) { adev->pm_sysfs_en = false; @@ -3830,7 +3831,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ - if (!amdgpu_device_has_dc_support(adev)) + if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) drm_helper_force_disable_all(adev_to_drm(adev)); else drm_atomic_helper_shutdown(adev_to_drm(adev)); @@ -3843,8 +3844,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_ucode_sysfs_fini(adev); sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes); - amdgpu_fbdev_fini(adev); - amdgpu_device_ip_fini_early(adev); amdgpu_irq_fini_hw(adev); @@ -3939,7 +3938,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) drm_kms_helper_poll_disable(dev); if (fbcon) - amdgpu_fbdev_set_suspend(adev, 1); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -4016,7 +4015,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) flush_delayed_work(&adev->delayed_init_work); if (fbcon) - amdgpu_fbdev_set_suspend(adev, 0); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); drm_kms_helper_poll_enable(dev); @@ -4286,6 +4285,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, { int r; + amdgpu_amdkfd_pre_reset(adev); + if (from_hypervisor) r = amdgpu_virt_request_full_gpu(adev, true); else @@ -4313,7 +4314,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev, amdgpu_irq_gpu_reset_resume_helper(adev); r = amdgpu_ib_ring_tests(adev); - amdgpu_amdkfd_post_reset(adev); error: if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) { @@ -4646,7 +4646,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (r) goto out; - amdgpu_fbdev_set_suspend(tmp_adev, 0); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(tmp_adev)->fb_helper, false); /* * The GPU enters bad state once faulty pages @@ -5028,7 +5028,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, cancel_delayed_work_sync(&tmp_adev->delayed_init_work); - amdgpu_amdkfd_pre_reset(tmp_adev); + if (!amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_pre_reset(tmp_adev); /* * Mark these ASICs to be reseted as untracked first @@ -5036,7 +5037,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ amdgpu_unregister_gpu_instance(tmp_adev); - amdgpu_fbdev_set_suspend(tmp_adev, 1); + drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); /* disable ras on ALL IPs */ if (!need_emergency_restart && @@ -5086,7 +5087,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); /* Actual ASIC resets if needed.*/ - /* TODO Implement XGMI hive reset logic for SRIOV */ + /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { r = amdgpu_device_reset_sriov(adev, job ? false : true); if (r) @@ -5127,7 +5128,7 @@ skip_hw_reset: drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); } - if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { + if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); } @@ -5148,7 +5149,7 @@ skip_sched_resume: list_for_each_entry(tmp_adev, device_list_handle, reset_list) { /* unlock kfd: SRIOV would do it separately */ if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev)) - amdgpu_amdkfd_post_reset(tmp_adev); + amdgpu_amdkfd_post_reset(tmp_adev); /* kfd_post_reset will do nothing if kfd device is not initialized, * need to bring up kfd here if it's not be initialized before diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index ff70bc233489..ea00090b3fb3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = { [HDP_HWIP] = HDP_HWID, [SDMA0_HWIP] = SDMA0_HWID, [SDMA1_HWIP] = SDMA1_HWID, + [SDMA2_HWIP] = SDMA2_HWID, + [SDMA3_HWIP] = SDMA3_HWID, [MMHUB_HWIP] = MMHUB_HWID, [ATHUB_HWIP] = ATHUB_HWID, [NBIO_HWIP] = NBIF_HWID, @@ -248,8 +250,8 @@ get_from_vram: offset = offsetof(struct binary_header, binary_checksum) + sizeof(bhdr->binary_checksum); - size = bhdr->binary_size - offset; - checksum = bhdr->binary_checksum; + size = le16_to_cpu(bhdr->binary_size) - offset; + checksum = le16_to_cpu(bhdr->binary_checksum); if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, size, checksum)) { @@ -270,7 +272,7 @@ get_from_vram: } if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - ihdr->size, checksum)) { + le16_to_cpu(ihdr->size), checksum)) { DRM_ERROR("invalid ip discovery data table checksum\n"); r = -EINVAL; goto out; @@ -282,7 +284,7 @@ get_from_vram: ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset); if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset, - ghdr->size, checksum)) { + le32_to_cpu(ghdr->size), checksum)) { DRM_ERROR("invalid gc data table checksum\n"); r = -EINVAL; goto out; @@ -489,10 +491,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset)); for (i = 0; i < 32; i++) { - if (le32_to_cpu(harvest_info->list[i].hw_id) == 0) + if (le16_to_cpu(harvest_info->list[i].hw_id) == 0) break; - switch (le32_to_cpu(harvest_info->list[i].hw_id)) { + switch (le16_to_cpu(harvest_info->list[i].hw_id)) { case VCN_HWID: vcn_harvest_count++; if (harvest_info->list[i].number_instance == 0) @@ -587,6 +589,9 @@ static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &nv_common_ip_block); break; default: + dev_err(adev->dev, + "Failed to add common ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -619,6 +624,9 @@ static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add gmc ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -648,6 +656,9 @@ static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); break; default: + dev_err(adev->dev, + "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n", + adev->ip_versions[OSSSYS_HWIP][0]); return -EINVAL; } return 0; @@ -688,6 +699,9 @@ static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add psp ip block(MP0_HWIP:0x%x)\n", + adev->ip_versions[MP0_HWIP][0]); return -EINVAL; } return 0; @@ -726,6 +740,9 @@ static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add smu ip block(MP1_HWIP:0x%x)\n", + adev->ip_versions[MP1_HWIP][0]); return -EINVAL; } return 0; @@ -753,6 +770,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); break; default: + dev_err(adev->dev, + "Failed to add dm ip block(DCE_HWIP:0x%x)\n", + adev->ip_versions[DCE_HWIP][0]); return -EINVAL; } } else if (adev->ip_versions[DCI_HWIP][0]) { @@ -763,6 +783,9 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &dm_ip_block); break; default: + dev_err(adev->dev, + "Failed to add dm ip block(DCI_HWIP:0x%x)\n", + adev->ip_versions[DCI_HWIP][0]); return -EINVAL; } #endif @@ -796,6 +819,9 @@ static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add gfx ip block(GC_HWIP:0x%x)\n", + adev->ip_versions[GC_HWIP][0]); return -EINVAL; } return 0; @@ -829,6 +855,9 @@ static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block); break; default: + dev_err(adev->dev, + "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n", + adev->ip_versions[SDMA0_HWIP][0]); return -EINVAL; } return 0; @@ -845,6 +874,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n", + adev->ip_versions[UVD_HWIP][0]); return -EINVAL; } switch (adev->ip_versions[VCE_HWIP][0]) { @@ -855,6 +887,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n", + adev->ip_versions[VCE_HWIP][0]); return -EINVAL; } } else { @@ -885,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) case IP_VERSION(3, 0, 64): case IP_VERSION(3, 1, 1): case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 192): amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block); @@ -893,6 +929,9 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block); break; default: + dev_err(adev->dev, + "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n", + adev->ip_versions[UVD_HWIP][0]); return -EINVAL; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 68108f151dad..18cc7155e667 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1599,13 +1599,10 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) continue; } robj = gem_to_amdgpu_bo(fb->obj[0]); - /* don't unpin kernel fb objects */ - if (!amdgpu_fbdev_robj_is_fb(adev, robj)) { - r = amdgpu_bo_reserve(robj, true); - if (r == 0) { - amdgpu_bo_unpin(robj); - amdgpu_bo_unreserve(robj); - } + r = amdgpu_bo_reserve(robj, true); + if (r == 0) { + amdgpu_bo_unpin(robj); + amdgpu_bo_unreserve(robj); } } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 438468b82eb6..3a6f125c6dc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2001,6 +2001,19 @@ retry_init: goto err_pci; } + /* + * 1. don't init fbdev on hw without DCE + * 2. don't init fbdev if there are no connectors + */ + if (adev->mode_info.mode_config_initialized && + !list_empty(&adev_to_drm(adev)->mode_config.connector_list)) { + /* select 8 bpp console on low vram cards */ + if (adev->gmc.real_vram_size <= (32*1024*1024)) + drm_fbdev_generic_setup(adev_to_drm(adev), 8); + else + drm_fbdev_generic_setup(adev_to_drm(adev), 32); + } + ret = amdgpu_debugfs_init(adev); if (ret) DRM_ERROR("Creating debugfs files failed (%d).\n", ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c deleted file mode 100644 index cd0acbea75da..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ /dev/null @@ -1,388 +0,0 @@ -/* - * Copyright © 2007 David Airlie - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - * - * Authors: - * David Airlie - */ - -#include <linux/module.h> -#include <linux/pm_runtime.h> -#include <linux/slab.h> -#include <linux/vga_switcheroo.h> - -#include <drm/amdgpu_drm.h> -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_fourcc.h> - -#include "amdgpu.h" -#include "cikd.h" -#include "amdgpu_gem.h" - -#include "amdgpu_display.h" - -/* object hierarchy - - this contains a helper + a amdgpu fb - the helper contains a pointer to amdgpu framebuffer baseclass. -*/ - -static int -amdgpufb_open(struct fb_info *info, int user) -{ - struct drm_fb_helper *fb_helper = info->par; - int ret = pm_runtime_get_sync(fb_helper->dev->dev); - if (ret < 0 && ret != -EACCES) { - pm_runtime_mark_last_busy(fb_helper->dev->dev); - pm_runtime_put_autosuspend(fb_helper->dev->dev); - return ret; - } - return 0; -} - -static int -amdgpufb_release(struct fb_info *info, int user) -{ - struct drm_fb_helper *fb_helper = info->par; - - pm_runtime_mark_last_busy(fb_helper->dev->dev); - pm_runtime_put_autosuspend(fb_helper->dev->dev); - return 0; -} - -static const struct fb_ops amdgpufb_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_open = amdgpufb_open, - .fb_release = amdgpufb_release, - .fb_fillrect = drm_fb_helper_cfb_fillrect, - .fb_copyarea = drm_fb_helper_cfb_copyarea, - .fb_imageblit = drm_fb_helper_cfb_imageblit, -}; - - -int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled) -{ - int aligned = width; - int pitch_mask = 0; - - switch (cpp) { - case 1: - pitch_mask = 255; - break; - case 2: - pitch_mask = 127; - break; - case 3: - case 4: - pitch_mask = 63; - break; - } - - aligned += pitch_mask; - aligned &= ~pitch_mask; - return aligned * cpp; -} - -static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj) -{ - struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); - int ret; - - ret = amdgpu_bo_reserve(abo, true); - if (likely(ret == 0)) { - amdgpu_bo_kunmap(abo); - amdgpu_bo_unpin(abo); - amdgpu_bo_unreserve(abo); - } - drm_gem_object_put(gobj); -} - -static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev, - struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object **gobj_p) -{ - const struct drm_format_info *info; - struct amdgpu_device *adev = rfbdev->adev; - struct drm_gem_object *gobj = NULL; - struct amdgpu_bo *abo = NULL; - bool fb_tiled = false; /* useful for testing */ - u32 tiling_flags = 0, domain; - int ret; - int aligned_size, size; - int height = mode_cmd->height; - u32 cpp; - u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_VRAM_CLEARED; - - info = drm_get_format_info(adev_to_drm(adev), mode_cmd); - cpp = info->cpp[0]; - - /* need to align pitch with crtc limits */ - mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp, - fb_tiled); - domain = amdgpu_display_supported_domains(adev, flags); - height = ALIGN(mode_cmd->height, 8); - size = mode_cmd->pitches[0] * height; - aligned_size = ALIGN(size, PAGE_SIZE); - ret = amdgpu_gem_object_create(adev, aligned_size, 0, domain, flags, - ttm_bo_type_device, NULL, &gobj); - if (ret) { - pr_err("failed to allocate framebuffer (%d)\n", aligned_size); - return -ENOMEM; - } - abo = gem_to_amdgpu_bo(gobj); - - if (fb_tiled) - tiling_flags = AMDGPU_TILING_SET(ARRAY_MODE, GRPH_ARRAY_2D_TILED_THIN1); - - ret = amdgpu_bo_reserve(abo, false); - if (unlikely(ret != 0)) - goto out_unref; - - if (tiling_flags) { - ret = amdgpu_bo_set_tiling_flags(abo, - tiling_flags); - if (ret) - dev_err(adev->dev, "FB failed to set tiling flags\n"); - } - - ret = amdgpu_bo_pin(abo, domain); - if (ret) { - amdgpu_bo_unreserve(abo); - goto out_unref; - } - - ret = amdgpu_ttm_alloc_gart(&abo->tbo); - if (ret) { - amdgpu_bo_unreserve(abo); - dev_err(adev->dev, "%p bind failed\n", abo); - goto out_unref; - } - - ret = amdgpu_bo_kmap(abo, NULL); - amdgpu_bo_unreserve(abo); - if (ret) { - goto out_unref; - } - - *gobj_p = gobj; - return 0; -out_unref: - amdgpufb_destroy_pinned_object(gobj); - *gobj_p = NULL; - return ret; -} - -static int amdgpufb_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct amdgpu_fbdev *rfbdev = (struct amdgpu_fbdev *)helper; - struct amdgpu_device *adev = rfbdev->adev; - struct fb_info *info; - struct drm_framebuffer *fb = NULL; - struct drm_mode_fb_cmd2 mode_cmd; - struct drm_gem_object *gobj = NULL; - struct amdgpu_bo *abo = NULL; - int ret; - - memset(&mode_cmd, 0, sizeof(mode_cmd)); - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - - if (sizes->surface_bpp == 24) - sizes->surface_bpp = 32; - - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - - ret = amdgpufb_create_pinned_object(rfbdev, &mode_cmd, &gobj); - if (ret) { - DRM_ERROR("failed to create fbcon object %d\n", ret); - return ret; - } - - abo = gem_to_amdgpu_bo(gobj); - - /* okay we have an object now allocate the framebuffer */ - info = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(info)) { - ret = PTR_ERR(info); - goto out; - } - - ret = amdgpu_display_gem_fb_init(adev_to_drm(adev), &rfbdev->rfb, - &mode_cmd, gobj); - if (ret) { - DRM_ERROR("failed to initialize framebuffer %d\n", ret); - goto out; - } - - fb = &rfbdev->rfb.base; - - /* setup helper */ - rfbdev->helper.fb = fb; - - info->fbops = &amdgpufb_ops; - - info->fix.smem_start = amdgpu_gmc_vram_cpu_pa(adev, abo); - info->fix.smem_len = amdgpu_bo_size(abo); - info->screen_base = amdgpu_bo_kptr(abo); - info->screen_size = amdgpu_bo_size(abo); - - drm_fb_helper_fill_info(info, &rfbdev->helper, sizes); - - /* setup aperture base/size for vesafb takeover */ - info->apertures->ranges[0].base = adev_to_drm(adev)->mode_config.fb_base; - info->apertures->ranges[0].size = adev->gmc.aper_size; - - /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ - - if (info->screen_base == NULL) { - ret = -ENOSPC; - goto out; - } - - DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); - DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->gmc.aper_base); - DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); - DRM_INFO("fb depth is %d\n", fb->format->depth); - DRM_INFO(" pitch is %d\n", fb->pitches[0]); - - vga_switcheroo_client_fb_set(adev->pdev, info); - return 0; - -out: - if (fb && ret) { - drm_gem_object_put(gobj); - drm_framebuffer_unregister_private(fb); - drm_framebuffer_cleanup(fb); - kfree(fb); - } - return ret; -} - -static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfbdev) -{ - struct amdgpu_framebuffer *rfb = &rfbdev->rfb; - int i; - - drm_fb_helper_unregister_fbi(&rfbdev->helper); - - if (rfb->base.obj[0]) { - for (i = 0; i < rfb->base.format->num_planes; i++) - drm_gem_object_put(rfb->base.obj[0]); - amdgpufb_destroy_pinned_object(rfb->base.obj[0]); - rfb->base.obj[0] = NULL; - drm_framebuffer_unregister_private(&rfb->base); - drm_framebuffer_cleanup(&rfb->base); - } - drm_fb_helper_fini(&rfbdev->helper); - - return 0; -} - -static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = { - .fb_probe = amdgpufb_create, -}; - -int amdgpu_fbdev_init(struct amdgpu_device *adev) -{ - struct amdgpu_fbdev *rfbdev; - int bpp_sel = 32; - int ret; - - /* don't init fbdev on hw without DCE */ - if (!adev->mode_info.mode_config_initialized) - return 0; - - /* don't init fbdev if there are no connectors */ - if (list_empty(&adev_to_drm(adev)->mode_config.connector_list)) - return 0; - - /* select 8 bpp console on low vram cards */ - if (adev->gmc.real_vram_size <= (32*1024*1024)) - bpp_sel = 8; - - rfbdev = kzalloc(sizeof(struct amdgpu_fbdev), GFP_KERNEL); - if (!rfbdev) - return -ENOMEM; - - rfbdev->adev = adev; - adev->mode_info.rfbdev = rfbdev; - - drm_fb_helper_prepare(adev_to_drm(adev), &rfbdev->helper, - &amdgpu_fb_helper_funcs); - - ret = drm_fb_helper_init(adev_to_drm(adev), &rfbdev->helper); - if (ret) { - kfree(rfbdev); - return ret; - } - - /* disable all the possible outputs/crtcs before entering KMS mode */ - if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display) - drm_helper_disable_unused_functions(adev_to_drm(adev)); - - drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); - return 0; -} - -void amdgpu_fbdev_fini(struct amdgpu_device *adev) -{ - if (!adev->mode_info.rfbdev) - return; - - amdgpu_fbdev_destroy(adev_to_drm(adev), adev->mode_info.rfbdev); - kfree(adev->mode_info.rfbdev); - adev->mode_info.rfbdev = NULL; -} - -void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state) -{ - if (adev->mode_info.rfbdev) - drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper, - state); -} - -int amdgpu_fbdev_total_size(struct amdgpu_device *adev) -{ - struct amdgpu_bo *robj; - int size = 0; - - if (!adev->mode_info.rfbdev) - return 0; - - robj = gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0]); - size += amdgpu_bo_size(robj); - return size; -} - -bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj) -{ - if (!adev->mode_info.rfbdev) - return false; - if (robj == gem_to_amdgpu_bo(adev->mode_info.rfbdev->rfb.base.obj[0])) - return true; - return false; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index a1e63ba4c54a..c0d8f40a5b45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -877,6 +877,32 @@ out: return r; } +static int amdgpu_gem_align_pitch(struct amdgpu_device *adev, + int width, + int cpp, + bool tiled) +{ + int aligned = width; + int pitch_mask = 0; + + switch (cpp) { + case 1: + pitch_mask = 255; + break; + case 2: + pitch_mask = 127; + break; + case 3: + case 4: + pitch_mask = 63; + break; + } + + aligned += pitch_mask; + aligned &= ~pitch_mask; + return aligned * cpp; +} + int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) @@ -885,7 +911,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_gem_object *gobj; uint32_t handle; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_CPU_GTT_USWC; + AMDGPU_GEM_CREATE_CPU_GTT_USWC | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; u32 domain; int r; @@ -897,8 +924,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, if (adev->mman.buffer_funcs_enabled) flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; - args->pitch = amdgpu_align_pitch(adev, args->width, - DIV_ROUND_UP(args->bpp, 8), 0); + args->pitch = amdgpu_gem_align_pitch(adev, args->width, + DIV_ROUND_UP(args->bpp, 8), 0); args->size = (u64)args->pitch * args->height; args->size = ALIGN(args->size, PAGE_SIZE); domain = amdgpu_bo_get_preferred_domain(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index f3d62e196901..0c7963dfacad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev, */ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) { - unsigned int count = AMDGPU_IH_MAX_NUM_IVS; + unsigned int count; u32 wptr; if (!ih->enabled || adev->shutdown) @@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) wptr = amdgpu_ih_get_wptr(adev, ih); restart_ih: + count = AMDGPU_IH_MAX_NUM_IVS; DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index cc2e0c9cfe0a..4f3c62adccbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -333,7 +333,6 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (!amdgpu_device_has_dc_support(adev)) { if (!adev->enable_virtual_display) /* Disable vblank IRQs aggressively for power-saving */ - /* XXX: can this be enabled for DC? */ adev_to_drm(adev)->vblank_disable_immediate = true; r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 89fb372ed49c..6043bf6fd414 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -232,8 +232,6 @@ struct amdgpu_i2c_chan { struct mutex mutex; }; -struct amdgpu_fbdev; - struct amdgpu_afmt { bool enabled; int offset; @@ -309,13 +307,6 @@ struct amdgpu_framebuffer { uint64_t address; }; -struct amdgpu_fbdev { - struct drm_fb_helper helper; - struct amdgpu_framebuffer rfb; - struct list_head fbdev_list; - struct amdgpu_device *adev; -}; - struct amdgpu_mode_info { struct atom_context *atom_context; struct card_info *atom_card_info; @@ -341,8 +332,6 @@ struct amdgpu_mode_info { struct edid *bios_hardcoded_edid; int bios_hardcoded_edid_size; - /* pointer to fbdev info structure */ - struct amdgpu_fbdev *rfbdev; /* firmware flags */ u32 firmware_flags; /* pointer to backlight encoder */ @@ -631,15 +620,6 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode); -/* fbdev layer */ -int amdgpu_fbdev_init(struct amdgpu_device *adev); -void amdgpu_fbdev_fini(struct amdgpu_device *adev); -void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state); -int amdgpu_fbdev_total_size(struct amdgpu_device *adev); -bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj); - -int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled); - /* amdgpu_display.c */ void amdgpu_display_print_display_setup(struct drm_device *dev); int amdgpu_display_modeset_create_props(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 4fcfc2313b8c..3a7b56e57cec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1032,9 +1032,14 @@ int amdgpu_bo_init(struct amdgpu_device *adev) /* On A+A platform, VRAM can be mapped as WB */ if (!adev->gmc.xgmi.connected_to_cpu) { /* reserve PAT memory space to WC for VRAM */ - arch_io_reserve_memtype_wc(adev->gmc.aper_base, + int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size); + if (r) { + DRM_ERROR("Unable to set WC memtype for the aperture base\n"); + return r; + } + /* Add an MTRR for the VRAM */ adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base, adev->gmc.aper_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 08133de21fdd..46910e7b2927 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -892,6 +892,38 @@ void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, } } +static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + int ret = 0; + + /* + * choosing right query method according to + * whether smu support query error information + */ + ret = smu_get_ecc_info(&adev->smu, (void *)&(ras->umc_ecc)); + if (ret == -EOPNOTSUPP) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_count) + adev->umc.ras_funcs->query_ras_error_count(adev, err_data); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_address) + adev->umc.ras_funcs->query_ras_error_address(adev, err_data); + } else if (!ret) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_count) + adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, err_data); + + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_address) + adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, err_data); + } +} + /* query/inject/cure begin */ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) @@ -905,15 +937,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, switch (info->head.block) { case AMDGPU_RAS_BLOCK__UMC: - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_count) - adev->umc.ras_funcs->query_ras_error_count(adev, &err_data); - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_address) - adev->umc.ras_funcs->query_ras_error_address(adev, &err_data); + amdgpu_ras_get_ecc_info(adev, &err_data); break; case AMDGPU_RAS_BLOCK__SDMA: if (adev->sdma.funcs->query_ras_error_count) { @@ -1935,9 +1959,11 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) if (!con || !con->eh_data) return 0; + mutex_lock(&con->recovery_lock); control = &con->eeprom_control; data = con->eh_data; save_count = data->count - control->ras_num_recs; + mutex_unlock(&con->recovery_lock); /* only new entries are saved */ if (save_count > 0) { if (amdgpu_ras_eeprom_append(control, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index e36f4de9fa55..1c708122d492 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -319,6 +319,19 @@ struct ras_common_if { char name[32]; }; +#define MAX_UMC_CHANNEL_NUM 32 + +struct ecc_info_per_ch { + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + uint64_t mca_umc_status; + uint64_t mca_umc_addr; +}; + +struct umc_ecc_info { + struct ecc_info_per_ch ecc[MAX_UMC_CHANNEL_NUM]; +}; + struct amdgpu_ras { /* ras infrastructure */ /* for ras itself. */ @@ -358,6 +371,9 @@ struct amdgpu_ras { struct delayed_work ras_counte_delay_work; atomic_t ras_ue_count; atomic_t ras_ce_count; + + /* record umc error info queried from smu */ + struct umc_ecc_info umc_ecc; }; struct ras_fs_data { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c15687ce67c4..fb0d8bffdce2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -913,11 +913,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, ttm->num_pages, bo_mem, ttm); } - if (bo_mem->mem_type == AMDGPU_PL_GDS || - bo_mem->mem_type == AMDGPU_PL_GWS || - bo_mem->mem_type == AMDGPU_PL_OA) - return -EINVAL; - if (bo_mem->mem_type != TTM_PL_TT || !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { gtt->offset = AMDGPU_BO_INVALID_OFFSET; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index a90029ee9733..6e4bea012ea4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -94,30 +94,58 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int ret = 0; kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_count) - adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status); - - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_address && - adev->umc.max_ras_err_cnt_per_query) { - err_data->err_addr = - kcalloc(adev->umc.max_ras_err_cnt_per_query, - sizeof(struct eeprom_table_record), GFP_KERNEL); - - /* still call query_ras_error_address to clear error status - * even NOMEM error is encountered - */ - if(!err_data->err_addr) - dev_warn(adev->dev, "Failed to alloc memory for " - "umc error address record!\n"); - - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status); + ret = smu_get_ecc_info(&adev->smu, (void *)&(con->umc_ecc)); + if (ret == -EOPNOTSUPP) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_count) + adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status); + + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if(!err_data->err_addr) + dev_warn(adev->dev, "Failed to alloc memory for " + "umc error address record!\n"); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status); + } + } else if (!ret) { + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_count) + adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, ras_error_status); + + if (adev->umc.ras_funcs && + adev->umc.ras_funcs->ecc_info_query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if(!err_data->err_addr) + dev_warn(adev->dev, "Failed to alloc memory for " + "umc error address record!\n"); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, ras_error_status); + } } /* only uncorrectable error needs gpu reset */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 1f5fe2315236..9e40bade0a68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -49,6 +49,10 @@ struct amdgpu_umc_ras_funcs { void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); bool (*query_ras_poison_mode)(struct amdgpu_device *adev); + void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + void (*ecc_info_query_ras_error_address)(struct amdgpu_device *adev, + void *ras_error_status); }; struct amdgpu_umc_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 4f7c70845785..585961c2f5f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) break; case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) fw_name = FIRMWARE_SIENNA_CICHLID; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 04cf9b207e62..3fc49823f527 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -283,17 +283,15 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev) *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL); if (!*data) - return -ENOMEM; + goto data_failure; bps = kmalloc_array(align_space, sizeof((*data)->bps), GFP_KERNEL); - bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL); + if (!bps) + goto bps_failure; - if (!bps || !bps_bo) { - kfree(bps); - kfree(bps_bo); - kfree(*data); - return -ENOMEM; - } + bps_bo = kmalloc_array(align_space, sizeof((*data)->bps_bo), GFP_KERNEL); + if (!bps_bo) + goto bps_bo_failure; (*data)->bps = bps; (*data)->bps_bo = bps_bo; @@ -303,6 +301,13 @@ static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev) virt->ras_init_done = true; return 0; + +bps_bo_failure: + kfree(bps); +bps_failure: + kfree(*data); +data_failure: + return -ENOMEM; } static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index ce982afeff91..ac9a8cd21c4b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle) int i = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) - if (adev->mode_info.crtcs[i]) - hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); + if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function) + hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer); kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->amdgpu_vkms_output); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 0fad2bf854ae..567df2db23ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -386,6 +386,7 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) "%s", "xgmi_hive_info"); if (ret) { dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n"); + kobject_put(&hive->kobj); kfree(hive); hive = NULL; goto pro_end; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index b200b9e722d9..8318ee8339f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2092,22 +2092,18 @@ static int dce_v8_0_pick_dig_encoder(struct drm_encoder *encoder) return 1; else return 0; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: if (dig->linkb) return 3; else return 2; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: if (dig->linkb) return 5; else return 4; - break; case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: return 6; - break; default: DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index e7dfeb466a0e..dbe7442fb25c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -7707,8 +7707,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(10, 3, 1): case IP_VERSION(10, 3, 3): - clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | - ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); break; default: preempt_disable(); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index b4b80f27b894..b305fd39874f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025 +#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026 +#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1 + enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -4055,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle) gfx_v9_0_cp_enable(adev, false); - /* Skip suspend with A+A reset */ - if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) { - dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n"); + /* Skip stopping RLC with A+A reset or when RLC controls GFX clock */ + if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) || + (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) { + dev_dbg(adev->dev, "Skipping RLC halt\n"); return 0; } @@ -4238,19 +4244,38 @@ failed_kiq_read: static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) { - uint64_t clock; + uint64_t clock, clock_lo, clock_hi, hi_check; - amdgpu_gfx_off_ctrl(adev, false); - mutex_lock(&adev->gfx.gpu_clock_mutex); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { - clock = gfx_v9_0_kiq_read_clock(adev); - } else { - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 3, 0): + preempt_disable(); + clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir); + /* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over + * roughly every 42 seconds. + */ + if (hi_check != clock_hi) { + clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir); + clock_hi = hi_check; + } + preempt_enable(); + clock = clock_lo | (clock_hi << 32ULL); + break; + default: + amdgpu_gfx_off_ctrl(adev, false); + mutex_lock(&adev->gfx.gpu_clock_mutex); + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) { + clock = gfx_v9_0_kiq_read_clock(adev); + } else { + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + } + mutex_unlock(&adev->gfx.gpu_clock_mutex); + amdgpu_gfx_off_ctrl(adev, true); + break; } - mutex_unlock(&adev->gfx.gpu_clock_mutex); - amdgpu_gfx_off_ctrl(adev, true); return clock; } diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 1d8414c3fadb..38241cf0e1f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -160,6 +160,7 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev, tmp = RREG32(ih_regs->ih_rb_cntl); tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0)); + tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1); /* enable_intr field is only valid in ring0 */ if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0)); @@ -275,10 +276,8 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev, tmp = navi10_ih_rb_cntl(ih, tmp); if (ih == &adev->irq.ih) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled); - if (ih == &adev->irq.ih1) { - tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0); + if (ih == &adev->irq.ih1) tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1); - } if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) { if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) { @@ -319,7 +318,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) { struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2}; u32 ih_chicken; - u32 tmp; int ret; int i; @@ -363,15 +361,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell, ih[0]->doorbell_index); - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); - tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL, - CLIENT18_IS_STORM_CLIENT, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp); - - tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL); - tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1); - WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp); - pci_set_master(adev->pdev); /* enable interrupts */ @@ -420,12 +409,19 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev, u32 wptr, tmp; struct amdgpu_ih_regs *ih_regs; - wptr = le32_to_cpu(*ih->wptr_cpu); - ih_regs = &ih->ih_regs; + if (ih == &adev->irq.ih) { + /* Only ring0 supports writeback. On other rings fall back + * to register-based code with overflow checking below. + */ + wptr = le32_to_cpu(*ih->wptr_cpu); - if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) - goto out; + if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) + goto out; + } + ih_regs = &ih->ih_regs; + + /* Double check that the overflow wasn't already cleared. */ wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr); if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) goto out; @@ -513,15 +509,11 @@ static int navi10_ih_self_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - uint32_t wptr = cpu_to_le32(entry->src_data[0]); - switch (entry->ring_id) { case 1: - *adev->irq.ih1.wptr_cpu = wptr; schedule_work(&adev->irq.ih1_work); break; case 2: - *adev->irq.ih2.wptr_cpu = wptr; schedule_work(&adev->irq.ih2_work); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index 4ecd2b5808ce..ee7cab37dfd5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -359,6 +359,10 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CONFIG_CNTL, data); + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } #define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1 diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 0d2d629e2d6a..4bbacf1be25a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -276,6 +276,10 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE(smnPCIE_CI_CNTL, data); + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static void nbio_v6_1_program_ltr(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index 3c00666a13e1..37a4039fdfc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -273,7 +273,9 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = { static void nbio_v7_0_init_registers(struct amdgpu_device *adev) { - + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = + SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index 8f2a315e7c73..3444332ea110 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -371,6 +371,10 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data); } + + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } const struct amdgpu_nbio_funcs nbio_v7_2_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index b8bd03d16dba..dc5e93756fea 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -362,7 +362,9 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = { static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { - + if (amdgpu_sriov_vf(adev)) + adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; } static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) @@ -692,6 +694,9 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev) { uint32_t def, data; + if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4)) + return; + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 59eafa31c626..2ec1ffb36b1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode, switch (adev->ip_versions[UVD_HWIP][0]) { case IP_VERSION(3, 0, 0): case IP_VERSION(3, 0, 64): + case IP_VERSION(3, 0, 192): if (amdgpu_sriov_vf(adev)) { if (encode) *codecs = &sriov_sc_video_codecs_encode; @@ -731,8 +732,10 @@ static int nv_common_early_init(void *handle) #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; - adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + if (!amdgpu_sriov_vf(adev)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &nv_pcie_rreg; @@ -1032,7 +1035,7 @@ static int nv_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ nv_enable_doorbell_aperture(adev, true); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0c316a2d42ed..de9b55383e9f 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -971,8 +971,10 @@ static int soc15_common_early_init(void *handle) #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; - adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; - adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + if (!amdgpu_sriov_vf(adev)) { + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; + } adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &soc15_pcie_rreg; @@ -1285,7 +1287,7 @@ static int soc15_common_hw_init(void *handle) * for the purpose of expose those registers * to process space */ - if (adev->nbio.funcs->remap_hdp_registers) + if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index f7ec3fe134e5..6dd1e19e8d43 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -50,6 +50,165 @@ static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev, return adev->umc.channel_offs * ch_inst + UMC_V6_7_INST_DIST * umc_inst; } +static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev, + uint32_t umc_inst, + uint32_t ch_inst) +{ + return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; +} + +static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev, + uint32_t channel_index, + unsigned long *error_count) +{ + uint32_t ecc_err_cnt; + uint64_t mc_umc_status; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + /* + * select the lower chip and check the error count + * skip add error count, calc error counter only from mca_umc_status + */ + ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_lo_chip; + + /* + * select the higher chip and check the err counter + * skip add error count, calc error counter only from mca_umc_status + */ + ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_hi_chip; + + /* check for SRAM correctable error + MCUMC_STATUS is a 64 bit register */ + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) + *error_count += 1; +} + +static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev, + uint32_t channel_index, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + /* check the MCUMC_STATUS */ + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + *error_count += 1; +} + +static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + uint32_t channel_index = 0; + + /*TODO: driver needs to toggle DF Cstate to ensure + * safe access of UMC registers. Will add the protection */ + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + channel_index = get_umc_v6_7_channel_index(adev, + umc_inst, + ch_inst); + umc_v6_7_ecc_info_query_correctable_error_count(adev, + channel_index, + &(err_data->ce_count)); + umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev, + channel_index, + &(err_data->ue_count)); + } +} + +static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, + uint32_t umc_reg_offset, + uint32_t ch_inst, + uint32_t umc_inst) +{ + uint64_t mc_umc_status, err_addr, retired_page; + struct eeprom_table_record *err_rec; + uint32_t channel_index; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + channel_index = + adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + + if (mc_umc_status == 0) + return; + + if (!err_data->err_addr) + return; + + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; + + /* calculate error address if ue/ce error is detected */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + + err_addr = ras->umc_ecc.ecc[channel_index].mca_umc_addr; + err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + + /* translate umc channel address to soc pa, 3 parts are included */ + retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* we only save ue error information currently, ce is skipped */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) + == 1) { + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_inst; + + err_data->err_addr_cnt++; + } + } +} + +static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + /*TODO: driver needs to toggle DF Cstate to ensure + * safe access of UMC resgisters. Will add the protection + * when firmware interface is ready */ + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v6_7_reg_offset(adev, + umc_inst, + ch_inst); + umc_v6_7_ecc_info_query_error_address(adev, + err_data, + umc_reg_offset, + ch_inst, + umc_inst); + } +} + static void umc_v6_7_query_correctable_error_count(struct amdgpu_device *adev, uint32_t umc_reg_offset, unsigned long *error_count) @@ -327,4 +486,6 @@ const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = { .query_ras_error_count = umc_v6_7_query_ras_error_count, .query_ras_error_address = umc_v6_7_query_ras_error_address, .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode, + .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count, + .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index f6233019f042..d60576ce10cd 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -43,15 +43,15 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, */ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT || ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) && - dev->device_info->asic_family == CHIP_HAWAII) { + dev->adev->asic_type == CHIP_HAWAII) { struct cik_ih_ring_entry *tmp_ihre = (struct cik_ih_ring_entry *)patched_ihre; *patched_flag = true; *tmp_ihre = *ihre; - vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd); - ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid); + vmid = f2g->read_vmid_from_vmfault_reg(dev->adev); + ret = f2g->get_atc_vmid_pasid_mapping_info(dev->adev, vmid, &pasid); tmp_ihre->ring_id &= 0x000000ff; tmp_ihre->ring_id |= vmid << 8; @@ -113,7 +113,7 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev, kfd_process_vm_fault(dev->dqm, pasid); memset(&info, 0, sizeof(info)); - amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->kgd, &info); + amdgpu_amdkfd_gpuvm_get_vm_fault_info(dev->adev, &info); if (!info.page_addr && !info.status) return; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 24ebd61395d8..4bfc0c8ab764 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -321,7 +321,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, /* Return gpu_id as doorbell offset for mmap usage */ args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); - if (KFD_IS_SOC15(dev->device_info->asic_family)) + if (KFD_IS_SOC15(dev)) /* On SOC15 ASICs, include the doorbell offset within the * process doorbell frame, which is 2 pages. */ @@ -580,7 +580,7 @@ static int kfd_ioctl_dbg_register(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_register not supported on CZ\n"); return -EINVAL; } @@ -631,7 +631,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep, if (!dev || !dev->dbgmgr) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n"); return -EINVAL; } @@ -676,7 +676,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); return -EINVAL; } @@ -784,7 +784,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); return -EINVAL; } @@ -851,7 +851,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep, dev = kfd_device_by_id(args->gpu_id); if (dev) /* Reading GPU clock counter from KGD */ - args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd); + args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->adev); else /* Node without GPU resource */ args->gpu_clock_counter = 0; @@ -1041,7 +1041,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, goto out_unlock; } - err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd, + err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->adev, mem, &kern_addr, &size); if (err) { pr_err("Failed to map event page to kernel\n"); @@ -1051,7 +1051,7 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, err = kfd_event_page_set(p, kern_addr, size); if (err) { pr_err("Failed to set event page\n"); - amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->kgd, mem); + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->adev, mem); goto out_unlock; } @@ -1137,7 +1137,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) dev->kfd2kgd->set_scratch_backing_va( - dev->kgd, args->va_addr, pdd->qpd.vmid); + dev->adev, args->va_addr, pdd->qpd.vmid); return 0; @@ -1158,7 +1158,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep, if (!dev) return -EINVAL; - amdgpu_amdkfd_get_tile_config(dev->kgd, &config); + amdgpu_amdkfd_get_tile_config(dev->adev, &config); args->gb_addr_config = config.gb_addr_config; args->num_banks = config.num_banks; @@ -1244,7 +1244,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev) if (dev->use_iommu_v2) return false; - amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info); + amdgpu_amdkfd_get_local_mem_info(dev->adev, &mem_info); if (mem_info.local_mem_size_private == 0 && mem_info.local_mem_size_public > 0) return true; @@ -1313,7 +1313,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, err = -EINVAL; goto err_unlock; } - offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd); + offset = dev->adev->rmmio_remap.bus_addr; if (!offset) { err = -ENOMEM; goto err_unlock; @@ -1321,7 +1321,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, } err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - dev->kgd, args->va_addr, args->size, + dev->adev, args->va_addr, args->size, pdd->drm_priv, (struct kgd_mem **) &mem, &offset, flags); @@ -1353,7 +1353,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, return 0; err_free: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); @@ -1399,7 +1399,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, goto err_unlock; } - ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, + ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, &size); /* If freeing the buffer failed, leave the handle in place for @@ -1484,7 +1484,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - peer->kgd, (struct kgd_mem *)mem, + peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv, &table_freed); if (err) { pr_err("Failed to map to gpu %d/%d\n", @@ -1496,7 +1496,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, mutex_unlock(&p->mutex); - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); + err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); goto sync_memory_failed; @@ -1593,7 +1593,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv); + peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv); if (err) { pr_err("Failed to unmap from gpu %d/%d\n", i, args->n_devices); @@ -1603,8 +1603,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, } mutex_unlock(&p->mutex); - if (dev->device_info->asic_family == CHIP_ALDEBARAN) { - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, + if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) { + err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); @@ -1680,7 +1680,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, { struct kfd_ioctl_get_dmabuf_info_args *args = data; struct kfd_dev *dev = NULL; - struct kgd_dev *dma_buf_kgd; + struct amdgpu_device *dmabuf_adev; void *metadata_buffer = NULL; uint32_t flags; unsigned int i; @@ -1700,15 +1700,15 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, } /* Get dmabuf info from KGD */ - r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd, - &dma_buf_kgd, &args->size, + r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd, + &dmabuf_adev, &args->size, metadata_buffer, args->metadata_size, &args->metadata_size, &flags); if (r) goto exit; /* Reverse-lookup gpu_id from kgd pointer */ - dev = kfd_device_by_kgd(dma_buf_kgd); + dev = kfd_device_by_adev(dmabuf_adev); if (!dev) { r = -EINVAL; goto exit; @@ -1758,7 +1758,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, goto err_unlock; } - r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf, + r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->adev, dmabuf, args->va_addr, pdd->drm_priv, (struct kgd_mem **)&mem, &size, NULL); @@ -1779,7 +1779,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, return 0; err_free: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); @@ -2066,7 +2066,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; - address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd); + address = dev->adev->rmmio_remap.bus_addr; vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index cfedfb1e8596..f187596faf66 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1340,7 +1340,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, int ret; unsigned int num_cu_shared; - switch (kdev->device_info->asic_family) { + switch (kdev->adev->asic_type) { case CHIP_KAVERI: pcache_info = kaveri_cache_info; num_of_cache_types = ARRAY_SIZE(kaveri_cache_info); @@ -1377,67 +1377,71 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info = vegam_cache_info; num_of_cache_types = ARRAY_SIZE(vegam_cache_info); break; - case CHIP_VEGA10: - pcache_info = vega10_cache_info; - num_of_cache_types = ARRAY_SIZE(vega10_cache_info); - break; - case CHIP_VEGA12: - pcache_info = vega12_cache_info; - num_of_cache_types = ARRAY_SIZE(vega12_cache_info); - break; - case CHIP_VEGA20: - case CHIP_ARCTURUS: - pcache_info = vega20_cache_info; - num_of_cache_types = ARRAY_SIZE(vega20_cache_info); - break; - case CHIP_ALDEBARAN: - pcache_info = aldebaran_cache_info; - num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info); - break; - case CHIP_RAVEN: - pcache_info = raven_cache_info; - num_of_cache_types = ARRAY_SIZE(raven_cache_info); - break; - case CHIP_RENOIR: - pcache_info = renoir_cache_info; - num_of_cache_types = ARRAY_SIZE(renoir_cache_info); - break; - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_CYAN_SKILLFISH: - pcache_info = navi10_cache_info; - num_of_cache_types = ARRAY_SIZE(navi10_cache_info); - break; - case CHIP_NAVI14: - pcache_info = navi14_cache_info; - num_of_cache_types = ARRAY_SIZE(navi14_cache_info); - break; - case CHIP_SIENNA_CICHLID: - pcache_info = sienna_cichlid_cache_info; - num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info); - break; - case CHIP_NAVY_FLOUNDER: - pcache_info = navy_flounder_cache_info; - num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info); - break; - case CHIP_DIMGREY_CAVEFISH: - pcache_info = dimgrey_cavefish_cache_info; - num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info); - break; - case CHIP_VANGOGH: - pcache_info = vangogh_cache_info; - num_of_cache_types = ARRAY_SIZE(vangogh_cache_info); - break; - case CHIP_BEIGE_GOBY: - pcache_info = beige_goby_cache_info; - num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); - break; - case CHIP_YELLOW_CARP: - pcache_info = yellow_carp_cache_info; - num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); - break; default: - return -EINVAL; + switch(KFD_GC_VERSION(kdev)) { + case IP_VERSION(9, 0, 1): + pcache_info = vega10_cache_info; + num_of_cache_types = ARRAY_SIZE(vega10_cache_info); + break; + case IP_VERSION(9, 2, 1): + pcache_info = vega12_cache_info; + num_of_cache_types = ARRAY_SIZE(vega12_cache_info); + break; + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 4, 1): + pcache_info = vega20_cache_info; + num_of_cache_types = ARRAY_SIZE(vega20_cache_info); + break; + case IP_VERSION(9, 4, 2): + pcache_info = aldebaran_cache_info; + num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info); + break; + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): + pcache_info = raven_cache_info; + num_of_cache_types = ARRAY_SIZE(raven_cache_info); + break; + case IP_VERSION(9, 3, 0): + pcache_info = renoir_cache_info; + num_of_cache_types = ARRAY_SIZE(renoir_cache_info); + break; + case IP_VERSION(10, 1, 10): + case IP_VERSION(10, 1, 2): + case IP_VERSION(10, 1, 3): + pcache_info = navi10_cache_info; + num_of_cache_types = ARRAY_SIZE(navi10_cache_info); + break; + case IP_VERSION(10, 1, 1): + pcache_info = navi14_cache_info; + num_of_cache_types = ARRAY_SIZE(navi14_cache_info); + break; + case IP_VERSION(10, 3, 0): + pcache_info = sienna_cichlid_cache_info; + num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info); + break; + case IP_VERSION(10, 3, 2): + pcache_info = navy_flounder_cache_info; + num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info); + break; + case IP_VERSION(10, 3, 4): + pcache_info = dimgrey_cavefish_cache_info; + num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info); + break; + case IP_VERSION(10, 3, 1): + pcache_info = vangogh_cache_info; + num_of_cache_types = ARRAY_SIZE(vangogh_cache_info); + break; + case IP_VERSION(10, 3, 5): + pcache_info = beige_goby_cache_info; + num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); + break; + case IP_VERSION(10, 3, 3): + pcache_info = yellow_carp_cache_info; + num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); + break; + default: + return -EINVAL; + } } *size_filled = 0; @@ -1963,8 +1967,6 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, struct crat_subtype_iolink *sub_type_hdr, uint32_t proximity_domain) { - struct amdgpu_device *adev = (struct amdgpu_device *)kdev->kgd; - *avail_size -= sizeof(struct crat_subtype_iolink); if (*avail_size < 0) return -ENOMEM; @@ -1981,7 +1983,7 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, /* Fill in IOLINK subtype. * TODO: Fill-in other fields of iolink subtype */ - if (adev->gmc.xgmi.connected_to_cpu) { + if (kdev->adev->gmc.xgmi.connected_to_cpu) { /* * with host gpu xgmi link, host can access gpu memory whether * or not pcie bar type is large, so always create bidirectional @@ -1990,19 +1992,19 @@ static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size, sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL; sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI; sub_type_hdr->num_hops_xgmi = 1; - if (adev->asic_type == CHIP_ALDEBARAN) { + if (KFD_GC_VERSION(kdev) == IP_VERSION(9, 4, 2)) { sub_type_hdr->minimum_bandwidth_mbs = amdgpu_amdkfd_get_xgmi_bandwidth_mbytes( - kdev->kgd, NULL, true); + kdev->adev, NULL, true); sub_type_hdr->maximum_bandwidth_mbs = sub_type_hdr->minimum_bandwidth_mbs; } } else { sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS; sub_type_hdr->minimum_bandwidth_mbs = - amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, true); + amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, true); sub_type_hdr->maximum_bandwidth_mbs = - amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, false); + amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->adev, false); } sub_type_hdr->proximity_domain_from = proximity_domain; @@ -2044,11 +2046,11 @@ static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size, sub_type_hdr->proximity_domain_from = proximity_domain_from; sub_type_hdr->proximity_domain_to = proximity_domain_to; sub_type_hdr->num_hops_xgmi = - amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd); + amdgpu_amdkfd_get_xgmi_hops_count(kdev->adev, peer_kdev->adev); sub_type_hdr->maximum_bandwidth_mbs = - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, peer_kdev->kgd, false); + amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, peer_kdev->adev, false); sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ? - amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, NULL, true) : 0; + amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->adev, NULL, true) : 0; return 0; } @@ -2114,7 +2116,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT; cu->proximity_domain = proximity_domain; - amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info); + amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info); cu->num_simd_per_cu = cu_info.simd_per_cu; cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number; cu->max_waves_simd = cu_info.max_waves_per_simd; @@ -2145,7 +2147,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * report the total FB size (public+private) as a single * private heap. */ - amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(kdev->adev, &local_mem_info); sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index 159add0f5aaa..1e30717b5253 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -41,7 +41,7 @@ static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev) { - dev->kfd2kgd->address_watch_disable(dev->kgd); + dev->kfd2kgd->address_watch_disable(dev->adev); } static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev, @@ -322,7 +322,7 @@ static int dbgdev_address_watch_nodiq(struct kfd_dbgdev *dbgdev, pr_debug("\t\t%30s\n", "* * * * * * * * * * * * * * * * * *"); pdd->dev->kfd2kgd->address_watch_execute( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, cntl.u32All, addrHi.u32All, @@ -420,7 +420,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_CNTL); @@ -431,7 +431,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_ADDR_HI); @@ -441,7 +441,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_ADDR_LO); @@ -457,7 +457,7 @@ static int dbgdev_address_watch_diq(struct kfd_dbgdev *dbgdev, aw_reg_add_dword = dbgdev->dev->kfd2kgd->address_watch_get_offset( - dbgdev->dev->kgd, + dbgdev->dev->adev, i, ADDRESS_WATCH_REG_CNTL); @@ -752,7 +752,7 @@ static int dbgdev_wave_control_nodiq(struct kfd_dbgdev *dbgdev, pr_debug("\t\t %30s\n", "* * * * * * * * * * * * * * * * * *"); - return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->kgd, + return dbgdev->dev->kfd2kgd->wave_control_execute(dbgdev->dev->adev, reg_gfx_index.u32All, reg_sq_cmd.u32All); } @@ -784,7 +784,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info - (dev->kgd, vmid, &queried_pasid); + (dev->adev, vmid, &queried_pasid); if (status && queried_pasid == p->pasid) { pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n", @@ -811,7 +811,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) /* for non DIQ we need to patch the VMID: */ reg_sq_cmd.bits.vm_id = vmid; - dev->kfd2kgd->wave_control_execute(dev->kgd, + dev->kfd2kgd->wave_control_execute(dev->adev, reg_gfx_index.u32All, reg_sq_cmd.u32All); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 3b119db16003..e1294fba0c26 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -55,7 +55,6 @@ extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd; #ifdef KFD_SUPPORT_IOMMU_V2 static const struct kfd_device_info kaveri_device_info = { - .asic_family = CHIP_KAVERI, .asic_name = "kaveri", .gfx_target_version = 70000, .max_pasid_bits = 16, @@ -69,13 +68,10 @@ static const struct kfd_device_info kaveri_device_info = { .supports_cwsr = false, .needs_iommu_device = true, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info carrizo_device_info = { - .asic_family = CHIP_CARRIZO, .asic_name = "carrizo", .gfx_target_version = 80001, .max_pasid_bits = 16, @@ -89,13 +85,10 @@ static const struct kfd_device_info carrizo_device_info = { .supports_cwsr = true, .needs_iommu_device = true, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info raven_device_info = { - .asic_family = CHIP_RAVEN, .asic_name = "raven", .gfx_target_version = 90002, .max_pasid_bits = 16, @@ -108,15 +101,12 @@ static const struct kfd_device_info raven_device_info = { .supports_cwsr = true, .needs_iommu_device = true, .needs_pci_atomics = true, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; #endif #ifdef CONFIG_DRM_AMDGPU_CIK static const struct kfd_device_info hawaii_device_info = { - .asic_family = CHIP_HAWAII, .asic_name = "hawaii", .gfx_target_version = 70001, .max_pasid_bits = 16, @@ -130,14 +120,11 @@ static const struct kfd_device_info hawaii_device_info = { .supports_cwsr = false, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; #endif static const struct kfd_device_info tonga_device_info = { - .asic_family = CHIP_TONGA, .asic_name = "tonga", .gfx_target_version = 80002, .max_pasid_bits = 16, @@ -150,13 +137,10 @@ static const struct kfd_device_info tonga_device_info = { .supports_cwsr = false, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info fiji_device_info = { - .asic_family = CHIP_FIJI, .asic_name = "fiji", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -169,13 +153,10 @@ static const struct kfd_device_info fiji_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info fiji_vf_device_info = { - .asic_family = CHIP_FIJI, .asic_name = "fiji", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -188,14 +169,11 @@ static const struct kfd_device_info fiji_vf_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info polaris10_device_info = { - .asic_family = CHIP_POLARIS10, .asic_name = "polaris10", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -208,13 +186,10 @@ static const struct kfd_device_info polaris10_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info polaris10_vf_device_info = { - .asic_family = CHIP_POLARIS10, .asic_name = "polaris10", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -227,13 +202,10 @@ static const struct kfd_device_info polaris10_vf_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info polaris11_device_info = { - .asic_family = CHIP_POLARIS11, .asic_name = "polaris11", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -246,13 +218,10 @@ static const struct kfd_device_info polaris11_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info polaris12_device_info = { - .asic_family = CHIP_POLARIS12, .asic_name = "polaris12", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -265,13 +234,10 @@ static const struct kfd_device_info polaris12_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vegam_device_info = { - .asic_family = CHIP_VEGAM, .asic_name = "vegam", .gfx_target_version = 80003, .max_pasid_bits = 16, @@ -284,13 +250,10 @@ static const struct kfd_device_info vegam_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vega10_device_info = { - .asic_family = CHIP_VEGA10, .asic_name = "vega10", .gfx_target_version = 90000, .max_pasid_bits = 16, @@ -303,13 +266,10 @@ static const struct kfd_device_info vega10_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vega10_vf_device_info = { - .asic_family = CHIP_VEGA10, .asic_name = "vega10", .gfx_target_version = 90000, .max_pasid_bits = 16, @@ -322,13 +282,10 @@ static const struct kfd_device_info vega10_vf_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vega12_device_info = { - .asic_family = CHIP_VEGA12, .asic_name = "vega12", .gfx_target_version = 90004, .max_pasid_bits = 16, @@ -341,13 +298,10 @@ static const struct kfd_device_info vega12_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info vega20_device_info = { - .asic_family = CHIP_VEGA20, .asic_name = "vega20", .gfx_target_version = 90006, .max_pasid_bits = 16, @@ -360,13 +314,10 @@ static const struct kfd_device_info vega20_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info arcturus_device_info = { - .asic_family = CHIP_ARCTURUS, .asic_name = "arcturus", .gfx_target_version = 90008, .max_pasid_bits = 16, @@ -379,13 +330,10 @@ static const struct kfd_device_info arcturus_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 6, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info aldebaran_device_info = { - .asic_family = CHIP_ALDEBARAN, .asic_name = "aldebaran", .gfx_target_version = 90010, .max_pasid_bits = 16, @@ -398,13 +346,10 @@ static const struct kfd_device_info aldebaran_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 3, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info renoir_device_info = { - .asic_family = CHIP_RENOIR, .asic_name = "renoir", .gfx_target_version = 90012, .max_pasid_bits = 16, @@ -417,13 +362,10 @@ static const struct kfd_device_info renoir_device_info = { .supports_cwsr = true, .needs_iommu_device = false, .needs_pci_atomics = false, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info navi10_device_info = { - .asic_family = CHIP_NAVI10, .asic_name = "navi10", .gfx_target_version = 100100, .max_pasid_bits = 16, @@ -437,13 +379,10 @@ static const struct kfd_device_info navi10_device_info = { .supports_cwsr = true, .needs_pci_atomics = true, .no_atomic_fw_version = 145, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info navi12_device_info = { - .asic_family = CHIP_NAVI12, .asic_name = "navi12", .gfx_target_version = 100101, .max_pasid_bits = 16, @@ -457,13 +396,10 @@ static const struct kfd_device_info navi12_device_info = { .supports_cwsr = true, .needs_pci_atomics = true, .no_atomic_fw_version = 145, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info navi14_device_info = { - .asic_family = CHIP_NAVI14, .asic_name = "navi14", .gfx_target_version = 100102, .max_pasid_bits = 16, @@ -477,13 +413,10 @@ static const struct kfd_device_info navi14_device_info = { .supports_cwsr = true, .needs_pci_atomics = true, .no_atomic_fw_version = 145, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info sienna_cichlid_device_info = { - .asic_family = CHIP_SIENNA_CICHLID, .asic_name = "sienna_cichlid", .gfx_target_version = 100300, .max_pasid_bits = 16, @@ -497,13 +430,10 @@ static const struct kfd_device_info sienna_cichlid_device_info = { .supports_cwsr = true, .needs_pci_atomics = true, .no_atomic_fw_version = 92, - .num_sdma_engines = 4, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info navy_flounder_device_info = { - .asic_family = CHIP_NAVY_FLOUNDER, .asic_name = "navy_flounder", .gfx_target_version = 100301, .max_pasid_bits = 16, @@ -517,13 +447,10 @@ static const struct kfd_device_info navy_flounder_device_info = { .supports_cwsr = true, .needs_pci_atomics = true, .no_atomic_fw_version = 92, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info vangogh_device_info = { - .asic_family = CHIP_VANGOGH, .asic_name = "vangogh", .gfx_target_version = 100303, .max_pasid_bits = 16, @@ -537,13 +464,10 @@ static const struct kfd_device_info vangogh_device_info = { .supports_cwsr = true, .needs_pci_atomics = true, .no_atomic_fw_version = 92, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info dimgrey_cavefish_device_info = { - .asic_family = CHIP_DIMGREY_CAVEFISH, .asic_name = "dimgrey_cavefish", .gfx_target_version = 100302, .max_pasid_bits = 16, @@ -557,13 +481,10 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = { .supports_cwsr = true, .needs_pci_atomics = true, .no_atomic_fw_version = 92, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info beige_goby_device_info = { - .asic_family = CHIP_BEIGE_GOBY, .asic_name = "beige_goby", .gfx_target_version = 100304, .max_pasid_bits = 16, @@ -577,13 +498,10 @@ static const struct kfd_device_info beige_goby_device_info = { .supports_cwsr = true, .needs_pci_atomics = true, .no_atomic_fw_version = 92, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; static const struct kfd_device_info yellow_carp_device_info = { - .asic_family = CHIP_YELLOW_CARP, .asic_name = "yellow_carp", .gfx_target_version = 100305, .max_pasid_bits = 16, @@ -597,13 +515,10 @@ static const struct kfd_device_info yellow_carp_device_info = { .supports_cwsr = true, .needs_pci_atomics = true, .no_atomic_fw_version = 92, - .num_sdma_engines = 1, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 2, }; static const struct kfd_device_info cyan_skillfish_device_info = { - .asic_family = CHIP_CYAN_SKILLFISH, .asic_name = "cyan_skillfish", .gfx_target_version = 100103, .max_pasid_bits = 16, @@ -616,8 +531,6 @@ static const struct kfd_device_info cyan_skillfish_device_info = { .needs_iommu_device = false, .supports_cwsr = true, .needs_pci_atomics = true, - .num_sdma_engines = 2, - .num_xgmi_sdma_engines = 0, .num_sdma_queues_per_engine = 8, }; @@ -627,12 +540,11 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd); static int kfd_resume(struct kfd_dev *kfd); -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf) +struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) { struct kfd_dev *kfd; const struct kfd_device_info *device_info; const struct kfd2kgd_calls *f2g; - struct amdgpu_device *adev = (struct amdgpu_device *)kgd; struct pci_dev *pdev = adev->pdev; switch (adev->asic_type) { @@ -815,8 +727,12 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf) } if (!device_info || !f2g) { - dev_err(kfd_device, "%s %s not supported in kfd\n", - amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); + if (adev->ip_versions[GC_HWIP][0]) + dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n", + adev->ip_versions[GC_HWIP][0], vf ? "VF" : ""); + else + dev_err(kfd_device, "%s %s not supported in kfd\n", + amdgpu_asic_name[adev->asic_type], vf ? "VF" : ""); return NULL; } @@ -824,7 +740,7 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf) if (!kfd) return NULL; - kfd->kgd = kgd; + kfd->adev = adev; kfd->device_info = device_info; kfd->pdev = pdev; kfd->init_complete = false; @@ -845,23 +761,23 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf) static void kfd_cwsr_init(struct kfd_dev *kfd) { if (cwsr_enable && kfd->device_info->supports_cwsr) { - if (kfd->device_info->asic_family < CHIP_VEGA10) { + if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) { BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_gfx8_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); - } else if (kfd->device_info->asic_family == CHIP_ARCTURUS) { + } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) { BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_arcturus_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex); - } else if (kfd->device_info->asic_family == CHIP_ALDEBARAN) { + } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) { BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_aldebaran_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex); - } else if (kfd->device_info->asic_family < CHIP_NAVI10) { + } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) { BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_gfx9_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); - } else if (kfd->device_info->asic_family < CHIP_SIENNA_CICHLID) { + } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) { BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE); kfd->cwsr_isa = cwsr_trap_nv1x_hex; kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex); @@ -882,18 +798,17 @@ static int kfd_gws_init(struct kfd_dev *kfd) if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) return 0; - if (hws_gws_support - || (kfd->device_info->asic_family == CHIP_VEGA10 - && kfd->mec2_fw_version >= 0x81b3) - || (kfd->device_info->asic_family >= CHIP_VEGA12 - && kfd->device_info->asic_family <= CHIP_RAVEN - && kfd->mec2_fw_version >= 0x1b3) - || (kfd->device_info->asic_family == CHIP_ARCTURUS - && kfd->mec2_fw_version >= 0x30) - || (kfd->device_info->asic_family == CHIP_ALDEBARAN - && kfd->mec2_fw_version >= 0x28)) - ret = amdgpu_amdkfd_alloc_gws(kfd->kgd, - amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws); + if (hws_gws_support || (KFD_IS_SOC15(kfd) && + ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1) + && kfd->mec2_fw_version >= 0x81b3) || + (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0) + && kfd->mec2_fw_version >= 0x1b3) || + (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1) + && kfd->mec2_fw_version >= 0x30) || + (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) + && kfd->mec2_fw_version >= 0x28)))) + ret = amdgpu_amdkfd_alloc_gws(kfd->adev, + kfd->adev->gds.gws_size, &kfd->gws); return ret; } @@ -910,11 +825,11 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, unsigned int size, map_process_packet_size; kfd->ddev = ddev; - kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, + kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC1); - kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, + kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_MEC2); - kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, + kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; @@ -927,7 +842,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * 32 and 64-bit requests are possible and must be * supported. */ - kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->kgd); + kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev); if (!kfd->pci_atomic_requested && kfd->device_info->needs_pci_atomics && (!kfd->device_info->no_atomic_fw_version || @@ -959,10 +874,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, * calculate max size of runlist packet. * There can be only 2 packets at once */ - map_process_packet_size = - kfd->device_info->asic_family == CHIP_ALDEBARAN ? + map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ? sizeof(struct pm4_mes_map_process_aldebaran) : - sizeof(struct pm4_mes_map_process); + sizeof(struct pm4_mes_map_process); size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size + max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) + sizeof(struct pm4_mes_runlist)) * 2; @@ -974,7 +888,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, size += 512 * 1024; if (amdgpu_amdkfd_alloc_gtt_mem( - kfd->kgd, size, &kfd->gtt_mem, + kfd->adev, size, &kfd->gtt_mem, &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr, false)) { dev_err(kfd_device, "Could not allocate %d bytes\n", size); @@ -995,9 +909,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto kfd_doorbell_error; } - kfd->hive_id = amdgpu_amdkfd_get_hive_id(kfd->kgd); + kfd->hive_id = kfd->adev->gmc.xgmi.hive_id; - kfd->noretry = amdgpu_amdkfd_get_noretry(kfd->kgd); + kfd->noretry = kfd->adev->gmc.noretry; if (kfd_interrupt_init(kfd)) { dev_err(kfd_device, "Error initializing interrupts\n"); @@ -1015,7 +929,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, */ if (kfd_gws_init(kfd)) { dev_err(kfd_device, "Could not allocate %d gws\n", - amdgpu_amdkfd_get_num_gws(kfd->kgd)); + kfd->adev->gds.gws_size); goto gws_error; } @@ -1030,7 +944,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); - svm_migrate_init((struct amdgpu_device *)kfd->kgd); + svm_migrate_init(kfd->adev); if(kgd2kfd_resume_iommu(kfd)) goto device_iommu_error; @@ -1068,10 +982,10 @@ kfd_interrupt_error: kfd_doorbell_error: kfd_gtt_sa_fini(kfd); kfd_gtt_sa_init_error: - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); alloc_gtt_mem_failure: if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws); + amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); dev_err(kfd_device, "device %x:%x NOT added due to errors\n", kfd->pdev->vendor, kfd->pdev->device); @@ -1088,9 +1002,9 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) kfd_doorbell_fini(kfd); ida_destroy(&kfd->doorbell_ida); kfd_gtt_sa_fini(kfd); - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem); if (kfd->gws) - amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws); + amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws); } kfree(kfd); @@ -1526,7 +1440,7 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) void kfd_inc_compute_active(struct kfd_dev *kfd) { if (atomic_inc_return(&kfd->compute_profile) == 1) - amdgpu_amdkfd_set_compute_idle(kfd->kgd, false); + amdgpu_amdkfd_set_compute_idle(kfd->adev, false); } void kfd_dec_compute_active(struct kfd_dev *kfd) @@ -1534,7 +1448,7 @@ void kfd_dec_compute_active(struct kfd_dev *kfd) int count = atomic_dec_return(&kfd->compute_profile); if (count == 0) - amdgpu_amdkfd_set_compute_idle(kfd->kgd, true); + amdgpu_amdkfd_set_compute_idle(kfd->adev, true); WARN_ONCE(count < 0, "Compute profile ref. count error"); } @@ -1544,6 +1458,26 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask); } +/* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and + * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA. + * When the device has more than two engines, we reserve two for PCIe to enable + * full-duplex and the rest are used as XGMI. + */ +unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev) +{ + /* If XGMI is not supported, all SDMA engines are PCIe */ + if (!kdev->adev->gmc.xgmi.supported) + return kdev->adev->sdma.num_instances; + + return min(kdev->adev->sdma.num_instances, 2); +} + +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev) +{ + /* After reserved for PCIe, the rest of engines are XGMI */ + return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 003ba6a373ff..2af2b3268171 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -99,38 +99,29 @@ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) return dqm->dev->shared_resources.num_pipe_per_mec; } -static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm) -{ - return dqm->dev->device_info->num_sdma_engines; -} - -static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm) -{ - return dqm->dev->device_info->num_xgmi_sdma_engines; -} - static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) { - return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm); + return kfd_get_num_sdma_engines(dqm->dev) + + kfd_get_num_xgmi_sdma_engines(dqm->dev); } unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { - return dqm->dev->device_info->num_sdma_engines - * dqm->dev->device_info->num_sdma_queues_per_engine; + return kfd_get_num_sdma_engines(dqm->dev) * + dqm->dev->device_info->num_sdma_queues_per_engine; } unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) { - return dqm->dev->device_info->num_xgmi_sdma_engines - * dqm->dev->device_info->num_sdma_queues_per_engine; + return kfd_get_num_xgmi_sdma_engines(dqm->dev) * + dqm->dev->device_info->num_sdma_queues_per_engine; } void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { return dqm->dev->kfd2kgd->program_sh_mem_settings( - dqm->dev->kgd, qpd->vmid, + dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, @@ -157,7 +148,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) { struct kfd_dev *dev = qpd->dqm->dev; - if (!KFD_IS_SOC15(dev->device_info->asic_family)) { + if (!KFD_IS_SOC15(dev)) { /* On pre-SOC15 chips we need to use the queue ID to * preserve the user mode ABI. */ @@ -202,7 +193,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, unsigned int old; struct kfd_dev *dev = qpd->dqm->dev; - if (!KFD_IS_SOC15(dev->device_info->asic_family) || + if (!KFD_IS_SOC15(dev) || q->properties.type == KFD_QUEUE_TYPE_SDMA || q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) return; @@ -216,7 +207,7 @@ static void program_trap_handler_settings(struct device_queue_manager *dqm, { if (dqm->dev->kfd2kgd->program_trap_handler_settings) dqm->dev->kfd2kgd->program_trap_handler_settings( - dqm->dev->kgd, qpd->vmid, + dqm->dev->adev, qpd->vmid, qpd->tba_addr, qpd->tma_addr); } @@ -250,21 +241,20 @@ static int allocate_vmid(struct device_queue_manager *dqm, program_sh_mem_settings(dqm, qpd); - if (dqm->dev->device_info->asic_family >= CHIP_VEGA10 && - dqm->dev->cwsr_enabled) + if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled) program_trap_handler_settings(dqm, qpd); /* qpd->page_table_base is set earlier when register_process() * is called, i.e. when the first queue is created. */ - dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd, + dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev, qpd->vmid, qpd->page_table_base); /* invalidate the VM context after pasid and vmid mapping is set up */ kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY); if (dqm->dev->kfd2kgd->set_scratch_backing_va) - dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd, + dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev, qpd->sh_hidden_private_base, qpd->vmid); return 0; @@ -283,7 +273,7 @@ static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, if (ret) return ret; - return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid, + return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid, qpd->ib_base, (uint32_t *)qpd->ib_kaddr, pmf->release_mem_size / sizeof(uint32_t)); } @@ -293,7 +283,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm, struct queue *q) { /* On GFX v7, CP doesn't flush TC at dequeue */ - if (q->device->device_info->asic_family == CHIP_HAWAII) + if (q->device->adev->asic_type == CHIP_HAWAII) if (flush_texture_cache_nocpsch(q->device, qpd)) pr_err("Failed to flush TC\n"); @@ -776,7 +766,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, if (!list_empty(&qpd->queues_list)) { dqm->dev->kfd2kgd->set_vm_context_page_table_base( - dqm->dev->kgd, + dqm->dev->adev, qpd->vmid, qpd->page_table_base); kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY); @@ -954,7 +944,7 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) { return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( - dqm->dev->kgd, pasid, vmid); + dqm->dev->adev, pasid, vmid); } static void init_interrupts(struct device_queue_manager *dqm) @@ -963,7 +953,7 @@ static void init_interrupts(struct device_queue_manager *dqm) for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) if (is_pipe_enabled(dqm, 0, i)) - dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i); + dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i); } static int initialize_nocpsch(struct device_queue_manager *dqm) @@ -1017,7 +1007,7 @@ static int start_nocpsch(struct device_queue_manager *dqm) pr_info("SW scheduler is used"); init_interrupts(dqm); - if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + if (dqm->dev->adev->asic_type == CHIP_HAWAII) return pm_init(&dqm->packet_mgr, dqm); dqm->sched_running = true; @@ -1026,7 +1016,7 @@ static int start_nocpsch(struct device_queue_manager *dqm) static int stop_nocpsch(struct device_queue_manager *dqm) { - if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + if (dqm->dev->adev->asic_type == CHIP_HAWAII) pm_uninit(&dqm->packet_mgr, false); dqm->sched_running = false; @@ -1055,9 +1045,9 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, dqm->sdma_bitmap &= ~(1ULL << bit); q->sdma_id = bit; q->properties.sdma_engine_id = q->sdma_id % - get_num_sdma_engines(dqm); + kfd_get_num_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / - get_num_sdma_engines(dqm); + kfd_get_num_sdma_engines(dqm->dev); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { if (dqm->xgmi_sdma_bitmap == 0) { pr_err("No more XGMI SDMA queue to allocate\n"); @@ -1072,10 +1062,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, * assumes the first N engines are always * PCIe-optimized ones */ - q->properties.sdma_engine_id = get_num_sdma_engines(dqm) + - q->sdma_id % get_num_xgmi_sdma_engines(dqm); + q->properties.sdma_engine_id = + kfd_get_num_sdma_engines(dqm->dev) + + q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / - get_num_xgmi_sdma_engines(dqm); + kfd_get_num_xgmi_sdma_engines(dqm->dev); } pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); @@ -1132,7 +1123,7 @@ static int set_sched_resources(struct device_queue_manager *dqm) res.queue_mask |= 1ull << amdgpu_queue_mask_bit_to_set_resource_bit( - (struct amdgpu_device *)dqm->dev->kgd, i); + dqm->dev->adev, i); } res.gws_mask = ~0ull; res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0; @@ -1226,6 +1217,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) bool hanging; dqm_lock(dqm); + if (!dqm->sched_running) { + dqm_unlock(dqm); + return 0; + } + if (!dqm->is_hws_hang) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); hanging = dqm->is_hws_hang || dqm->is_resetting; @@ -1845,7 +1841,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) dev->device_info->num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; - retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size, + retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), (void *)&(mem_obj->cpu_ptr), false); @@ -1862,7 +1858,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) if (!dqm) return NULL; - switch (dev->device_info->asic_family) { + switch (dev->adev->asic_type) { /* HWS is not available on Hawaii. */ case CHIP_HAWAII: /* HWS depends on CWSR for timely dequeue. CWSR is not @@ -1925,7 +1921,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) goto out_free; } - switch (dev->device_info->asic_family) { + switch (dev->adev->asic_type) { case CHIP_CARRIZO: device_queue_manager_init_vi(&dqm->asic_ops); break; @@ -1947,31 +1943,16 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) device_queue_manager_init_vi_tonga(&dqm->asic_ops); break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - device_queue_manager_init_v9(&dqm->asic_ops); - break; - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - device_queue_manager_init_v10_navi10(&dqm->asic_ops); - break; default: - WARN(1, "Unexpected ASIC family %u", - dev->device_info->asic_family); - goto out_free; + if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) + device_queue_manager_init_v10_navi10(&dqm->asic_ops); + else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) + device_queue_manager_init_v9(&dqm->asic_ops); + else { + WARN(1, "Unexpected ASIC family %u", + dev->adev->asic_type); + goto out_free; + } } if (init_mqd_managers(dqm)) @@ -1995,7 +1976,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, { WARN(!mqd, "No hiq sdma mqd trunk to free"); - amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem); } void device_queue_manager_uninit(struct device_queue_manager *dqm) @@ -2026,7 +2007,7 @@ static void kfd_process_hw_exception(struct work_struct *work) { struct device_queue_manager *dqm = container_of(work, struct device_queue_manager, hw_exception_work); - amdgpu_amdkfd_gpu_reset(dqm->dev->kgd); + amdgpu_amdkfd_gpu_reset(dqm->dev->adev); } #if defined(CONFIG_DEBUG_FS) @@ -2065,7 +2046,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) return 0; } - r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs); if (!r) { @@ -2087,7 +2068,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) continue; r = dqm->dev->kfd2kgd->hqd_dump( - dqm->dev->kgd, pipe, queue, &dump, &n_regs); + dqm->dev->adev, pipe, queue, &dump, &n_regs); if (r) break; @@ -2104,7 +2085,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) queue < dqm->dev->device_info->num_sdma_queues_per_engine; queue++) { r = dqm->dev->kfd2kgd->hqd_sdma_dump( - dqm->dev->kgd, pipe, queue, &dump, &n_regs); + dqm->dev->adev, pipe, queue, &dump, &n_regs); if (r) break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c index b5c3d13643f1..f20434d9980e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c @@ -62,7 +62,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm, SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; - if (dqm->dev->device_info->asic_family == CHIP_ALDEBARAN) { + if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) { /* Aldebaran can safely support different XNACK modes * per process */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 3eea4edee355..afe72dd11325 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -935,8 +935,10 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, u32 pasid, /* Workaround on Raven to not kill the process when memory is freed * before IOMMU is able to finish processing all the excessive PPRs */ - if (dev->device_info->asic_family != CHIP_RAVEN && - dev->device_info->asic_family != CHIP_RENOIR) { + + if (KFD_GC_VERSION(dev) != IP_VERSION(9, 1, 0) && + KFD_GC_VERSION(dev) != IP_VERSION(9, 2, 2) && + KFD_GC_VERSION(dev) != IP_VERSION(9, 3, 0)) { mutex_lock(&p->event_mutex); /* Lookup events by type and signal them */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index d1388896f9c1..2e2b7ceb71db 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -394,7 +394,7 @@ int kfd_init_apertures(struct kfd_process *process) pdd->gpuvm_base = pdd->gpuvm_limit = 0; pdd->scratch_base = pdd->scratch_limit = 0; } else { - switch (dev->device_info->asic_family) { + switch (dev->adev->asic_type) { case CHIP_KAVERI: case CHIP_HAWAII: case CHIP_CARRIZO: @@ -406,29 +406,14 @@ int kfd_init_apertures(struct kfd_process *process) case CHIP_VEGAM: kfd_init_apertures_vi(pdd, id); break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - kfd_init_apertures_v9(pdd, id); - break; default: - WARN(1, "Unexpected ASIC family %u", - dev->device_info->asic_family); - return -EINVAL; + if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1)) + kfd_init_apertures_v9(pdd, id); + else { + WARN(1, "Unexpected ASIC family %u", + dev->adev->asic_type); + return -EINVAL; + } } if (!dev->use_iommu_v2) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 543e7ea75593..20512a4e9a91 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -231,7 +231,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { kfd_signal_poison_consumed_event(dev, pasid); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev); return; } break; @@ -253,7 +253,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev, kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); } else if (source_id == SOC15_INTSRC_SDMA_ECC) { kfd_signal_poison_consumed_event(dev, pasid); - amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev); return; } } else if (client_id == SOC15_IH_CLIENTID_VMC || diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 64b4ac339904..406479a369a9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -91,7 +91,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev, kq->pq_gpu_addr = kq->pq->gpu_addr; /* For CIK family asics, kq->eop_mem is not needed */ - if (dev->device_info->asic_family > CHIP_MULLINS) { + if (dev->adev->asic_type > CHIP_MULLINS) { retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem); if (retval != 0) goto err_eop_allocate_vidmem; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 9b9c2b9bf2ef..d84cec0022b1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -938,7 +938,7 @@ int svm_migrate_init(struct amdgpu_device *adev) void *r; /* Page migration works on Vega10 or newer */ - if (kfddev->device_info->asic_family < CHIP_VEGA10) + if (!KFD_IS_SOC15(kfddev)) return -EINVAL; pgmap = &kfddev->pgmap; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index c021519af810..7b4118915bf6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -100,7 +100,7 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm, struct kfd_cu_info cu_info; uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0}; int i, se, sh, cu; - amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info); + amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info); if (cu_mask_count > cu_info.cu_active_number) cu_mask_count = cu_info.cu_active_number; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 8128f4d312f1..e9a8e21e144e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -171,7 +171,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, wptr_mask, mms); } @@ -180,7 +180,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } @@ -276,7 +276,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout, + return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -289,7 +289,7 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied(struct mqd_manager *mm, void *mqd, @@ -297,7 +297,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, + return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -306,7 +306,7 @@ static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 270160fc401b..d74d8a6ac27a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -148,7 +148,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); - r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms); return r; @@ -158,7 +158,7 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, + return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, queue_id, p->doorbell_off); } @@ -239,7 +239,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy - (mm->dev->kgd, mqd, type, timeout, + (mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -254,7 +254,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied( - mm->dev->kgd, queue_address, + mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -320,7 +320,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } @@ -363,14 +363,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 4e5932f54b5a..326eb2285029 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -108,7 +108,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); if (!mqd_mem_obj) return NULL; - retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, + retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev, ALIGN(q->ctl_stack_size, PAGE_SIZE) + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), &(mqd_mem_obj->gtt_mem), @@ -199,7 +199,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms); } @@ -208,7 +208,7 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, + return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, queue_id, p->doorbell_off); } @@ -291,7 +291,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy - (mm->dev->kgd, mqd, type, timeout, + (mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -301,7 +301,7 @@ static void free_mqd(struct mqd_manager *mm, void *mqd, struct kfd_dev *kfd = mm->dev; if (mqd_mem_obj->gtt_mem) { - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); + amdgpu_amdkfd_free_gtt_mem(kfd->adev, mqd_mem_obj->gtt_mem); kfree(mqd_mem_obj); } else { kfd_gtt_sa_free(mm->dev, mqd_mem_obj); @@ -313,7 +313,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied( - mm->dev->kgd, queue_address, + mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -375,7 +375,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } @@ -418,14 +418,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index cd9220eb8a7a..d456e950ce1d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -162,7 +162,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, wptr_shift, wptr_mask, mms); } @@ -265,7 +265,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy - (mm->dev->kgd, mqd, type, timeout, + (mm->dev->adev, mqd, type, timeout, pipe_id, queue_id); } @@ -280,7 +280,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied( - mm->dev->kgd, queue_address, + mm->dev->adev, queue_address, pipe_id, queue_id); } @@ -347,7 +347,7 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, (uint32_t __user *)p->write_ptr, mms); } @@ -389,14 +389,14 @@ static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); } static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index e547f1f8c49f..1439420925a0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -223,7 +223,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm, int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) { - switch (dqm->dev->device_info->asic_family) { + switch (dqm->dev->adev->asic_type) { case CHIP_KAVERI: case CHIP_HAWAII: /* PM4 packet structures on CIK are the same as on VI */ @@ -236,31 +236,16 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) case CHIP_VEGAM: pm->pmf = &kfd_vi_pm_funcs; break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - pm->pmf = &kfd_v9_pm_funcs; - break; - case CHIP_ALDEBARAN: - pm->pmf = &kfd_aldebaran_pm_funcs; - break; default: - WARN(1, "Unexpected ASIC family %u", - dqm->dev->device_info->asic_family); - return -EINVAL; + if (KFD_GC_VERSION(dqm->dev) == IP_VERSION(9, 4, 2)) + pm->pmf = &kfd_aldebaran_pm_funcs; + else if (KFD_GC_VERSION(dqm->dev) >= IP_VERSION(9, 0, 1)) + pm->pmf = &kfd_v9_pm_funcs; + else { + WARN(1, "Unexpected ASIC family %u", + dqm->dev->adev->asic_type); + return -EINVAL; + } } pm->dqm = dqm; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 94e92c0812db..7ea528941951 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -183,7 +183,8 @@ enum cache_policy { cache_policy_noncoherent }; -#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10) +#define KFD_GC_VERSION(dev) ((dev)->adev->ip_versions[GC_HWIP][0]) +#define KFD_IS_SOC15(dev) ((KFD_GC_VERSION(dev)) >= (IP_VERSION(9, 0, 1))) struct kfd_event_interrupt_class { bool (*interrupt_isr)(struct kfd_dev *dev, @@ -194,7 +195,6 @@ struct kfd_event_interrupt_class { }; struct kfd_device_info { - enum amd_asic_type asic_family; const char *asic_name; uint32_t gfx_target_version; const struct kfd_event_interrupt_class *event_interrupt_class; @@ -208,11 +208,12 @@ struct kfd_device_info { bool needs_iommu_device; bool needs_pci_atomics; uint32_t no_atomic_fw_version; - unsigned int num_sdma_engines; - unsigned int num_xgmi_sdma_engines; unsigned int num_sdma_queues_per_engine; }; +unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev); +unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev); + struct kfd_mem_obj { uint32_t range_start; uint32_t range_end; @@ -228,7 +229,7 @@ struct kfd_vmid_info { }; struct kfd_dev { - struct kgd_dev *kgd; + struct amdgpu_device *adev; const struct kfd_device_info *device_info; struct pci_dev *pdev; @@ -766,7 +767,7 @@ struct svm_range_list { struct list_head deferred_range_list; spinlock_t deferred_list_lock; atomic_t evicted_ranges; - bool drain_pagefaults; + atomic_t drain_pagefaults; struct delayed_work restore_work; DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE); struct task_struct *faulting_task; @@ -891,7 +892,7 @@ struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id); -int kfd_process_gpuid_from_kgd(struct kfd_process *p, +int kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev, uint32_t *gpuid, uint32_t *gpuidx); static inline int kfd_process_gpuid_from_gpuidx(struct kfd_process *p, @@ -984,7 +985,7 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain( struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); -struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd); +struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev); int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index b993011cfa64..d4c8a6948a9f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -288,7 +288,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) /* Collect wave count from device if it supports */ wave_cnt = 0; max_waves_per_cu = 0; - dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt, + dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt, &max_waves_per_cu); /* Translate wave count to number of compute units */ @@ -692,12 +692,12 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem, struct kfd_dev *dev = pdd->dev; if (kptr) { - amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->kgd, mem); + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->adev, mem); kptr = NULL; } - amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv); - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv, + amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->adev, mem, pdd->drm_priv); + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, mem, pdd->drm_priv, NULL); } @@ -714,24 +714,24 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, struct kfd_dev *kdev = pdd->dev; int err; - err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size, + err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->adev, gpu_va, size, pdd->drm_priv, mem, NULL, flags); if (err) goto err_alloc_mem; - err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, *mem, + err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->adev, *mem, pdd->drm_priv, NULL); if (err) goto err_map_mem; - err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, *mem, true); + err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->adev, *mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); goto sync_memory_failed; } if (kptr) { - err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd, + err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->adev, (struct kgd_mem *)*mem, kptr, NULL); if (err) { pr_debug("Map GTT BO to kernel failed\n"); @@ -742,10 +742,10 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, return err; sync_memory_failed: - amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->kgd, *mem, pdd->drm_priv); + amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->adev, *mem, pdd->drm_priv); err_map_mem: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, *mem, pdd->drm_priv, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->adev, *mem, pdd->drm_priv, NULL); err_alloc_mem: *mem = NULL; @@ -940,10 +940,10 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd) if (!peer_pdd->drm_priv) continue; amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - peer_pdd->dev->kgd, mem, peer_pdd->drm_priv); + peer_pdd->dev->adev, mem, peer_pdd->drm_priv); } - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, mem, pdd->drm_priv, NULL); kfd_process_device_remove_obj_handle(pdd, id); } @@ -974,7 +974,7 @@ static void kfd_process_kunmap_signal_bo(struct kfd_process *p) if (!mem) goto out; - amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->kgd, mem); + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->adev, mem); out: mutex_unlock(&p->mutex); @@ -1003,7 +1003,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) if (pdd->drm_file) { amdgpu_amdkfd_gpuvm_release_process_vm( - pdd->dev->kgd, pdd->drm_priv); + pdd->dev->adev, pdd->drm_priv); fput(pdd->drm_file); } @@ -1317,14 +1317,13 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * support the SVM APIs and don't need to be considered * for the XNACK mode selection. */ - if (dev->device_info->asic_family < CHIP_VEGA10) + if (!KFD_IS_SOC15(dev)) continue; /* Aldebaran can always support XNACK because it can support * per-process XNACK mode selection. But let the dev->noretry * setting still influence the default XNACK mode. */ - if (supported && - dev->device_info->asic_family == CHIP_ALDEBARAN) + if (supported && KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) continue; /* GFXv10 and later GPUs do not support shader preemption @@ -1332,7 +1331,7 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported) * management and memory-manager-related preemptions or * even deadlocks. */ - if (dev->device_info->asic_family >= CHIP_NAVI10) + if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1)) return false; if (dev->noretry) @@ -1431,7 +1430,7 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, int range_start = dev->shared_resources.non_cp_doorbells_start; int range_end = dev->shared_resources.non_cp_doorbells_end; - if (!KFD_IS_SOC15(dev->device_info->asic_family)) + if (!KFD_IS_SOC15(dev)) return 0; qpd->doorbell_bitmap = @@ -1547,7 +1546,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd, dev = pdd->dev; ret = amdgpu_amdkfd_gpuvm_acquire_process_vm( - dev->kgd, drm_file, p->pasid, + dev->adev, drm_file, p->pasid, &p->kgd_process_info, &p->ef); if (ret) { pr_err("Failed to create process VM object\n"); @@ -1779,14 +1778,13 @@ int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id) } int -kfd_process_gpuid_from_kgd(struct kfd_process *p, struct amdgpu_device *adev, +kfd_process_gpuid_from_adev(struct kfd_process *p, struct amdgpu_device *adev, uint32_t *gpuid, uint32_t *gpuidx) { - struct kgd_dev *kgd = (struct kgd_dev *)adev; int i; for (i = 0; i < p->n_pdds; i++) - if (p->pdds[i] && p->pdds[i]->dev->kgd == kgd) { + if (p->pdds[i] && p->pdds[i]->dev->adev == adev) { *gpuid = p->pdds[i]->dev->id; *gpuidx = i; return 0; @@ -1951,10 +1949,10 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) * only happens when the first queue is created. */ if (pdd->qpd.vmid) - amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd, + amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, pdd->qpd.vmid); } else { - amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd, + amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev, pdd->process->pasid, type); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 3627e7ac161b..4f8464658daf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -118,7 +118,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid, return ret; pqn->q->gws = mem; - pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0; + pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0; return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, pqn->q, NULL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index ed4bc5f844ce..deae12dc777d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -207,7 +207,6 @@ void kfd_smi_event_update_gpu_reset(struct kfd_dev *dev, bool post_reset) void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, uint64_t throttle_bitmask) { - struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; /* * ThermalThrottle msg = throttle_bitmask(8): * thermal_interrupt_count(16): @@ -223,14 +222,13 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n", KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask, - atomic64_read(&adev->smu.throttle_int_counter)); + atomic64_read(&dev->adev->smu.throttle_int_counter)); add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len); } void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) { - struct amdgpu_device *adev = (struct amdgpu_device *)dev->kgd; struct amdgpu_task_info task_info; /* VmFault msg = (hex)uint32_pid(8) + :(1) + task name(16) = 25 */ /* 1 byte event + 1 byte space + 25 bytes msg + 1 byte \n + @@ -243,7 +241,7 @@ void kfd_smi_event_update_vmfault(struct kfd_dev *dev, uint16_t pasid) return; memset(&task_info, 0, sizeof(struct amdgpu_task_info)); - amdgpu_vm_get_task_info(adev, pasid, &task_info); + amdgpu_vm_get_task_info(dev->adev, pasid, &task_info); /* Report VM faults from user applications, not retry from kernel */ if (!task_info.pid) return; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 16137c4247bb..755265f6c53b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -193,7 +193,6 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; pr_debug("mapping to gpu idx 0x%x\n", gpuidx); pdd = kfd_process_device_from_gpuidx(p, gpuidx); @@ -201,9 +200,8 @@ svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = svm_range_dma_map_dev(adev, prange, offset, npages, + r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, hmm_pfns, gpuidx); if (r) break; @@ -581,7 +579,7 @@ svm_range_get_adev_by_id(struct svm_range *prange, uint32_t gpu_id) return NULL; } - return (struct amdgpu_device *)pdd->dev->kgd; + return pdd->dev->adev; } struct kfd_process_device * @@ -593,7 +591,7 @@ svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev) p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpu_idx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpu_idx); if (r) { pr_debug("failed to get device id by adev %p\n", adev); return NULL; @@ -1053,8 +1051,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, if (domain == SVM_RANGE_VRAM_DOMAIN) bo_adev = amdgpu_ttm_adev(prange->svm_bo->bo->tbo.bdev); - switch (adev->asic_type) { - case CHIP_ARCTURUS: + switch (KFD_GC_VERSION(adev->kfd.dev)) { + case IP_VERSION(9, 4, 1): if (domain == SVM_RANGE_VRAM_DOMAIN) { if (bo_adev == adev) { mapping_flags |= coherent ? @@ -1070,7 +1068,7 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange, AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } break; - case CHIP_ALDEBARAN: + case IP_VERSION(9, 4, 2): if (domain == SVM_RANGE_VRAM_DOMAIN) { if (bo_adev == adev) { mapping_flags |= coherent ? @@ -1129,7 +1127,6 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); struct kfd_process_device *pdd; struct dma_fence *fence = NULL; - struct amdgpu_device *adev; struct kfd_process *p; uint32_t gpuidx; int r = 0; @@ -1145,9 +1142,9 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv), + r = svm_range_unmap_from_gpu(pdd->dev->adev, + drm_priv_to_vm(pdd->drm_priv), start, last, &fence); if (r) break; @@ -1159,7 +1156,7 @@ svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, if (r) break; } - amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev, + amdgpu_amdkfd_flush_gpu_tlb_pasid(pdd->dev->adev, p->pasid, TLB_FLUSH_HEAVYWEIGHT); } @@ -1243,8 +1240,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct kfd_process *p; p = container_of(prange->svms, struct kfd_process, svms); - amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev, - p->pasid, TLB_FLUSH_LEGACY); + amdgpu_amdkfd_flush_gpu_tlb_pasid(adev, p->pasid, TLB_FLUSH_LEGACY); } out: return r; @@ -1257,7 +1253,6 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, { struct kfd_process_device *pdd; struct amdgpu_device *bo_adev; - struct amdgpu_device *adev; struct kfd_process *p; struct dma_fence *fence = NULL; uint32_t gpuidx; @@ -1276,19 +1271,18 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset, pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; pdd = kfd_bind_process_to_device(pdd->dev, p); if (IS_ERR(pdd)) return -EINVAL; - if (bo_adev && adev != bo_adev && - !amdgpu_xgmi_same_hive(adev, bo_adev)) { + if (bo_adev && pdd->dev->adev != bo_adev && + !amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { pr_debug("cannot map to device idx %d\n", gpuidx); continue; } - r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv), + r = svm_range_map_to_gpu(pdd->dev->adev, drm_priv_to_vm(pdd->drm_priv), prange, offset, npages, readonly, prange->dma_addr[gpuidx], bo_adev, wait ? &fence : NULL); @@ -1322,7 +1316,6 @@ struct svm_validate_context { static int svm_range_reserve_bos(struct svm_validate_context *ctx) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; struct amdgpu_vm *vm; uint32_t gpuidx; int r; @@ -1334,7 +1327,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) pr_debug("failed to find device idx %d\n", gpuidx); return -EINVAL; } - adev = (struct amdgpu_device *)pdd->dev->kgd; vm = drm_priv_to_vm(pdd->drm_priv); ctx->tv[gpuidx].bo = &vm->root.bo->tbo; @@ -1356,9 +1348,9 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx) r = -EINVAL; goto unreserve_out; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv), + r = amdgpu_vm_validate_pt_bos(pdd->dev->adev, + drm_priv_to_vm(pdd->drm_priv), svm_range_bo_validate, NULL); if (r) { pr_debug("failed %d validate pt bos\n", r); @@ -1381,12 +1373,10 @@ static void svm_range_unreserve_bos(struct svm_validate_context *ctx) static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; pdd = kfd_process_device_from_gpuidx(p, gpuidx); - adev = (struct amdgpu_device *)pdd->dev->kgd; - return SVM_ADEV_PGMAP_OWNER(adev); + return SVM_ADEV_PGMAP_OWNER(pdd->dev->adev); } /* @@ -1574,7 +1564,6 @@ retry_flush_work: static void svm_range_restore_work(struct work_struct *work) { struct delayed_work *dwork = to_delayed_work(work); - struct amdkfd_process_info *process_info; struct svm_range_list *svms; struct svm_range *prange; struct kfd_process *p; @@ -1594,12 +1583,10 @@ static void svm_range_restore_work(struct work_struct *work) * the lifetime of this thread, kfd_process and mm will be valid. */ p = container_of(svms, struct kfd_process, svms); - process_info = p->kgd_process_info; mm = p->mm; if (!mm) return; - mutex_lock(&process_info->lock); svm_range_list_lock_and_flush_work(svms, mm); mutex_lock(&svms->lock); @@ -1652,7 +1639,6 @@ static void svm_range_restore_work(struct work_struct *work) out_reschedule: mutex_unlock(&svms->lock); mmap_write_unlock(mm); - mutex_unlock(&process_info->lock); /* If validation failed, reschedule another attempt */ if (evicted_ranges) { @@ -1966,23 +1952,30 @@ svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange) static void svm_range_drain_retry_fault(struct svm_range_list *svms) { struct kfd_process_device *pdd; - struct amdgpu_device *adev; struct kfd_process *p; + int drain; uint32_t i; p = container_of(svms, struct kfd_process, svms); +restart: + drain = atomic_read(&svms->drain_pagefaults); + if (!drain) + return; + for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) { pdd = p->pdds[i]; if (!pdd) continue; pr_debug("drain retry fault gpu %d svms %p\n", i, svms); - adev = (struct amdgpu_device *)pdd->dev->kgd; - amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1); + amdgpu_ih_wait_on_checkpoint_process(pdd->dev->adev, + &pdd->dev->adev->irq.ih1); pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms); } + if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain) + goto restart; } static void svm_range_deferred_list_work(struct work_struct *work) @@ -1990,43 +1983,41 @@ static void svm_range_deferred_list_work(struct work_struct *work) struct svm_range_list *svms; struct svm_range *prange; struct mm_struct *mm; + struct kfd_process *p; svms = container_of(work, struct svm_range_list, deferred_list_work); pr_debug("enter svms 0x%p\n", svms); + p = container_of(svms, struct kfd_process, svms); + /* Avoid mm is gone when inserting mmu notifier */ + mm = get_task_mm(p->lead_thread); + if (!mm) { + pr_debug("svms 0x%p process mm gone\n", svms); + return; + } +retry: + mmap_write_lock(mm); + + /* Checking for the need to drain retry faults must be inside + * mmap write lock to serialize with munmap notifiers. + */ + if (unlikely(atomic_read(&svms->drain_pagefaults))) { + mmap_write_unlock(mm); + svm_range_drain_retry_fault(svms); + goto retry; + } + spin_lock(&svms->deferred_list_lock); while (!list_empty(&svms->deferred_range_list)) { prange = list_first_entry(&svms->deferred_range_list, struct svm_range, deferred_list); + list_del_init(&prange->deferred_list); spin_unlock(&svms->deferred_list_lock); + pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, prange->start, prange->last, prange->work_item.op); - mm = prange->work_item.mm; -retry: - mmap_write_lock(mm); mutex_lock(&svms->lock); - - /* Checking for the need to drain retry faults must be in - * mmap write lock to serialize with munmap notifiers. - * - * Remove from deferred_list must be inside mmap write lock, - * otherwise, svm_range_list_lock_and_flush_work may hold mmap - * write lock, and continue because deferred_list is empty, then - * deferred_list handle is blocked by mmap write lock. - */ - spin_lock(&svms->deferred_list_lock); - if (unlikely(svms->drain_pagefaults)) { - svms->drain_pagefaults = false; - spin_unlock(&svms->deferred_list_lock); - mutex_unlock(&svms->lock); - mmap_write_unlock(mm); - svm_range_drain_retry_fault(svms); - goto retry; - } - list_del_init(&prange->deferred_list); - spin_unlock(&svms->deferred_list_lock); - mutex_lock(&prange->migrate_mutex); while (!list_empty(&prange->child_list)) { struct svm_range *pchild; @@ -2042,12 +2033,13 @@ retry: svm_range_handle_list_op(svms, prange); mutex_unlock(&svms->lock); - mmap_write_unlock(mm); spin_lock(&svms->deferred_list_lock); } spin_unlock(&svms->deferred_list_lock); + mmap_write_unlock(mm); + mmput(mm); pr_debug("exit svms 0x%p\n", svms); } @@ -2056,12 +2048,6 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, struct mm_struct *mm, enum svm_work_list_ops op) { spin_lock(&svms->deferred_list_lock); - /* Make sure pending page faults are drained in the deferred worker - * before the range is freed to avoid straggler interrupts on - * unmapped memory causing "phantom faults". - */ - if (op == SVM_OP_UNMAP_RANGE) - svms->drain_pagefaults = true; /* if prange is on the deferred list */ if (!list_empty(&prange->deferred_list)) { pr_debug("update exist prange 0x%p work op %d\n", prange, op); @@ -2140,6 +2126,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms, prange, prange->start, prange->last, start, last); + /* Make sure pending page faults are drained in the deferred worker + * before the range is freed to avoid straggler interrupts on + * unmapped memory causing "phantom faults". + */ + atomic_inc(&svms->drain_pagefaults); + unmap_parent = start <= prange->start && last >= prange->last; list_for_each_entry(pchild, &prange->child_list, child_list) { @@ -2301,7 +2293,7 @@ svm_range_best_restore_location(struct svm_range *prange, p = container_of(prange->svms, struct kfd_process, svms); - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, gpuidx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, gpuidx); if (r < 0) { pr_debug("failed to get gpuid from kgd\n"); return -1; @@ -2478,7 +2470,7 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev, pr_debug("Failed to create prange in address [0x%llx]\n", addr); return NULL; } - if (kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx)) { + if (kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx)) { pr_debug("failed to get gpuid from kgd\n"); svm_range_free(prange); return NULL; @@ -2545,7 +2537,7 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, uint32_t gpuid; int r; - r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx); + r = kfd_process_gpuid_from_adev(p, adev, &gpuid, &gpuidx); if (r < 0) return; } @@ -2559,20 +2551,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, } static bool -svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault) +svm_fault_allowed(struct vm_area_struct *vma, bool write_fault) { unsigned long requested = VM_READ; - struct vm_area_struct *vma; if (write_fault) requested |= VM_WRITE; - vma = find_vma(mm, addr << PAGE_SHIFT); - if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { - pr_debug("address 0x%llx VMA is removed\n", addr); - return true; - } - pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested, vma->vm_flags); return (vma->vm_flags & requested) == requested; @@ -2590,6 +2575,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, int32_t best_loc; int32_t gpuidx = MAX_GPU_INSTANCE; bool write_locked = false; + struct vm_area_struct *vma; int r = 0; if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { @@ -2600,7 +2586,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, p = kfd_lookup_process_by_pasid(pasid); if (!p) { pr_debug("kfd process not founded pasid 0x%x\n", pasid); - return -ESRCH; + return 0; } if (!p->xnack_enabled) { pr_debug("XNACK not enabled for pasid 0x%x\n", pasid); @@ -2611,10 +2597,19 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr); + if (atomic_read(&svms->drain_pagefaults)) { + pr_debug("draining retry fault, drop fault 0x%llx\n", addr); + r = 0; + goto out; + } + + /* p->lead_thread is available as kfd_process_wq_release flush the work + * before releasing task ref. + */ mm = get_task_mm(p->lead_thread); if (!mm) { pr_debug("svms 0x%p failed to get mm\n", svms); - r = -ESRCH; + r = 0; goto out; } @@ -2652,6 +2647,7 @@ retry_write_locked: if (svm_range_skip_recover(prange)) { amdgpu_gmc_filter_faults_remove(adev, addr, pasid); + r = 0; goto out_unlock_range; } @@ -2660,10 +2656,21 @@ retry_write_locked: if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) { pr_debug("svms 0x%p [0x%lx %lx] already restored\n", svms, prange->start, prange->last); + r = 0; goto out_unlock_range; } - if (!svm_fault_allowed(mm, addr, write_fault)) { + /* __do_munmap removed VMA, return success as we are handling stale + * retry fault. + */ + vma = find_vma(mm, addr << PAGE_SHIFT); + if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) { + pr_debug("address 0x%llx VMA is removed\n", addr); + r = 0; + goto out_unlock_range; + } + + if (!svm_fault_allowed(vma, write_fault)) { pr_debug("fault addr 0x%llx no %s permission\n", addr, write_fault ? "write" : "read"); r = -EPERM; @@ -2741,6 +2748,14 @@ void svm_range_list_fini(struct kfd_process *p) /* Ensure list work is finished before process is destroyed */ flush_work(&p->svms.deferred_list_work); + /* + * Ensure no retry fault comes in afterwards, as page fault handler will + * not find kfd process and take mm lock to recover fault. + */ + atomic_inc(&p->svms.drain_pagefaults); + svm_range_drain_retry_fault(&p->svms); + + list_for_each_entry_safe(prange, next, &p->svms.list, list) { svm_range_unlink(prange); svm_range_remove_notifier(prange); @@ -2761,6 +2776,7 @@ int svm_range_list_init(struct kfd_process *p) mutex_init(&svms->lock); INIT_LIST_HEAD(&svms->list); atomic_set(&svms->evicted_ranges, 0); + atomic_set(&svms->drain_pagefaults, 0); INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work); INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work); INIT_LIST_HEAD(&svms->deferred_range_list); @@ -2953,7 +2969,6 @@ svm_range_best_prefetch_location(struct svm_range *prange) uint32_t best_loc = prange->prefetch_loc; struct kfd_process_device *pdd; struct amdgpu_device *bo_adev; - struct amdgpu_device *adev; struct kfd_process *p; uint32_t gpuidx; @@ -2981,12 +2996,11 @@ svm_range_best_prefetch_location(struct svm_range *prange) pr_debug("failed to get device by idx 0x%x\n", gpuidx); continue; } - adev = (struct amdgpu_device *)pdd->dev->kgd; - if (adev == bo_adev) + if (pdd->dev->adev == bo_adev) continue; - if (!amdgpu_xgmi_same_hive(adev, bo_adev)) { + if (!amdgpu_xgmi_same_hive(pdd->dev->adev, bo_adev)) { best_loc = 0; break; } @@ -3150,7 +3164,6 @@ static int svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) { - struct amdkfd_process_info *process_info = p->kgd_process_info; struct mm_struct *mm = current->mm; struct list_head update_list; struct list_head insert_list; @@ -3169,8 +3182,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, svms = &p->svms; - mutex_lock(&process_info->lock); - svm_range_list_lock_and_flush_work(svms, mm); r = svm_range_is_valid(p, start, size); @@ -3246,8 +3257,6 @@ out_unlock_range: mutex_unlock(&svms->lock); mmap_read_unlock(mm); out: - mutex_unlock(&process_info->lock); - pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, &p->svms, start, start + size - 1, r); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index dd593ad0614a..2d44b26b6657 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -113,7 +113,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) return device; } -struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd) +struct kfd_dev *kfd_device_by_adev(const struct amdgpu_device *adev) { struct kfd_topology_device *top_dev; struct kfd_dev *device = NULL; @@ -121,7 +121,7 @@ struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd) down_read(&topology_lock); list_for_each_entry(top_dev, &topology_device_list, list) - if (top_dev->gpu && top_dev->gpu->kgd == kgd) { + if (top_dev->gpu && top_dev->gpu->adev == adev) { device = top_dev->gpu; break; } @@ -515,7 +515,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, HSA_CAP_WATCH_POINTS_TOTALBITS_MASK); } - if (dev->gpu->device_info->asic_family == CHIP_TONGA) + if (dev->gpu->adev->asic_type == CHIP_TONGA) dev->node_props.capability |= HSA_CAP_AQL_QUEUE_DOUBLE_MAP; @@ -531,7 +531,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_32bit_prop(buffer, offs, "sdma_fw_version", dev->gpu->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", - amdgpu_amdkfd_get_unique_id(dev->gpu->kgd)); + dev->gpu->adev->unique_id); } @@ -1106,7 +1106,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) if (!gpu) return 0; - amdgpu_amdkfd_get_local_mem_info(gpu->kgd, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(gpu->adev, &local_mem_info); local_mem_size = local_mem_info.local_mem_size_private + local_mem_info.local_mem_size_public; @@ -1189,7 +1189,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) * for APUs - If CRAT from ACPI reports more than one bank, then * all the banks will report the same mem_clk_max information */ - amdgpu_amdkfd_get_local_mem_info(dev->gpu->kgd, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info); list_for_each_entry(mem, &dev->mem_props, list) mem->mem_clk_max = local_mem_info.mem_clk_max; @@ -1217,8 +1217,7 @@ static void kfd_set_iolink_no_atomics(struct kfd_topology_device *dev, /* set gpu (dev) flags. */ } else { if (!dev->gpu->pci_atomic_requested || - dev->gpu->device_info->asic_family == - CHIP_HAWAII) + dev->gpu->adev->asic_type == CHIP_HAWAII) link->flags |= CRAT_IOLINK_FLAGS_NO_ATOMICS_32_BIT | CRAT_IOLINK_FLAGS_NO_ATOMICS_64_BIT; } @@ -1239,7 +1238,7 @@ static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev, */ if (inbound_link->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS || (inbound_link->iolink_type == CRAT_IOLINK_TYPE_XGMI && - to_dev->gpu->device_info->asic_family == CHIP_VEGA20)) { + KFD_GC_VERSION(to_dev->gpu) == IP_VERSION(9, 4, 0))) { outbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT; inbound_link->flags |= CRAT_IOLINK_FLAGS_NON_COHERENT; } @@ -1286,7 +1285,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu) void *crat_image = NULL; size_t image_size = 0; int proximity_domain; - struct amdgpu_device *adev; INIT_LIST_HEAD(&temp_topology_device_list); @@ -1296,10 +1294,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu) proximity_domain = atomic_inc_return(&topology_crat_proximity_domain); - adev = (struct amdgpu_device *)(gpu->kgd); - /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */ - if (gpu->hive_id && adev->gmc.xgmi.connected_to_cpu) { + if (gpu->hive_id && gpu->adev->gmc.xgmi.connected_to_cpu) { struct kfd_topology_device *top_dev; down_read(&topology_lock); @@ -1372,7 +1368,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * needed for the topology */ - amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info); + amdgpu_amdkfd_get_cu_info(dev->gpu->adev, &cu_info); strncpy(dev->node_props.name, gpu->device_info->asic_name, KFD_TOPOLOGY_PUBLIC_NAME_SIZE); @@ -1384,33 +1380,32 @@ int kfd_topology_add_device(struct kfd_dev *gpu) dev->node_props.vendor_id = gpu->pdev->vendor; dev->node_props.device_id = gpu->pdev->device; dev->node_props.capability |= - ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) << - HSA_CAP_ASIC_REVISION_SHIFT) & + ((dev->gpu->adev->rev_id << HSA_CAP_ASIC_REVISION_SHIFT) & HSA_CAP_ASIC_REVISION_MASK); dev->node_props.location_id = pci_dev_id(gpu->pdev); dev->node_props.domain = pci_domain_nr(gpu->pdev->bus); dev->node_props.max_engine_clk_fcompute = - amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd); + amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->adev); dev->node_props.max_engine_clk_ccompute = cpufreq_quick_get_max(0) / 1000; dev->node_props.drm_render_minor = gpu->shared_resources.drm_render_minor; dev->node_props.hive_id = gpu->hive_id; - dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines; + dev->node_props.num_sdma_engines = kfd_get_num_sdma_engines(gpu); dev->node_props.num_sdma_xgmi_engines = - gpu->device_info->num_xgmi_sdma_engines; + kfd_get_num_xgmi_sdma_engines(gpu); dev->node_props.num_sdma_queues_per_engine = gpu->device_info->num_sdma_queues_per_engine; dev->node_props.num_gws = (dev->gpu->gws && dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? - amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0; + dev->gpu->adev->gds.gws_size : 0; dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm); kfd_fill_mem_clk_max_info(dev); kfd_fill_iolink_non_crat_info(dev); - switch (dev->gpu->device_info->asic_family) { + switch (dev->gpu->adev->asic_type) { case CHIP_KAVERI: case CHIP_HAWAII: case CHIP_TONGA: @@ -1429,30 +1424,14 @@ int kfd_topology_add_device(struct kfd_dev *gpu) HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - case CHIP_NAVI10: - case CHIP_NAVI12: - case CHIP_NAVI14: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_VANGOGH: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_YELLOW_CARP: - case CHIP_CYAN_SKILLFISH: - dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << - HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & - HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); - break; default: - WARN(1, "Unexpected ASIC family %u", - dev->gpu->device_info->asic_family); + if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(9, 0, 1)) + dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << + HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & + HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); + else + WARN(1, "Unexpected ASIC family %u", + dev->gpu->adev->asic_type); } /* @@ -1469,7 +1448,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) * because it doesn't consider masked out CUs * max_waves_per_simd: Carrizo reports wrong max_waves_per_simd */ - if (dev->gpu->device_info->asic_family == CHIP_CARRIZO) { + if (dev->gpu->adev->asic_type == CHIP_CARRIZO) { dev->node_props.simd_count = cu_info.simd_per_cu * cu_info.cu_active_number; dev->node_props.max_waves_per_simd = 10; @@ -1477,16 +1456,17 @@ int kfd_topology_add_device(struct kfd_dev *gpu) /* kfd only concerns sram ecc on GFX and HBM ecc on UMC */ dev->node_props.capability |= - ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? + ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ? HSA_CAP_SRAM_EDCSUPPORTED : 0; - dev->node_props.capability |= ((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? + dev->node_props.capability |= + ((dev->gpu->adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ? HSA_CAP_MEM_EDCSUPPORTED : 0; - if (adev->asic_type != CHIP_VEGA10) - dev->node_props.capability |= (adev->ras_enabled != 0) ? + if (KFD_GC_VERSION(dev->gpu) != IP_VERSION(9, 0, 1)) + dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ? HSA_CAP_RASEVENTNOTIFY : 0; - if (KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) + if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev)) dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED; kfd_debug_print_topology(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index a8db017c9b8e..f0cc59d2fd5d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -25,38 +25,11 @@ #include <linux/types.h> #include <linux/list.h> +#include <linux/kfd_sysfs.h> #include "kfd_crat.h" #define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32 -#define HSA_CAP_HOT_PLUGGABLE 0x00000001 -#define HSA_CAP_ATS_PRESENT 0x00000002 -#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004 -#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008 -#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010 -#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020 -#define HSA_CAP_VA_LIMIT 0x00000040 -#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080 -#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00 -#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8 -#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000 -#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12 - -#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0 -#define HSA_CAP_DOORBELL_TYPE_1_0 0x1 -#define HSA_CAP_DOORBELL_TYPE_2_0 0x2 -#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000 - -#define HSA_CAP_RESERVED_WAS_SRAM_EDCSUPPORTED 0x00080000 /* Old buggy user mode depends on this being 0 */ -#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000 -#define HSA_CAP_RASEVENTNOTIFY 0x00200000 -#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000 -#define HSA_CAP_ASIC_REVISION_SHIFT 22 -#define HSA_CAP_SRAM_EDCSUPPORTED 0x04000000 -#define HSA_CAP_SVMAPI_SUPPORTED 0x08000000 -#define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000 -#define HSA_CAP_RESERVED 0xe00f8000 - struct kfd_node_properties { uint64_t hive_id; uint32_t cpu_cores_count; @@ -93,17 +66,6 @@ struct kfd_node_properties { char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE]; }; -#define HSA_MEM_HEAP_TYPE_SYSTEM 0 -#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1 -#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2 -#define HSA_MEM_HEAP_TYPE_GPU_GDS 3 -#define HSA_MEM_HEAP_TYPE_GPU_LDS 4 -#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5 - -#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001 -#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002 -#define HSA_MEM_FLAGS_RESERVED 0xfffffffc - struct kfd_mem_properties { struct list_head list; uint32_t heap_type; @@ -116,12 +78,6 @@ struct kfd_mem_properties { struct attribute attr; }; -#define HSA_CACHE_TYPE_DATA 0x00000001 -#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002 -#define HSA_CACHE_TYPE_CPU 0x00000004 -#define HSA_CACHE_TYPE_HSACU 0x00000008 -#define HSA_CACHE_TYPE_RESERVED 0xfffffff0 - struct kfd_cache_properties { struct list_head list; uint32_t processor_id_low; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4130082c5873..116a280d8a20 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -51,6 +51,7 @@ #include <drm/drm_hdcp.h> #endif #include "amdgpu_pm.h" +#include "amdgpu_atombios.h" #include "amd_shared.h" #include "amdgpu_dm_irq.h" @@ -789,8 +790,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params) plink = adev->dm.dc->links[notify.link_index]; if (plink) { plink->hpd_status = - notify.hpd_status == - DP_HPD_PLUG ? true : false; + notify.hpd_status == DP_HPD_PLUG; } } queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); @@ -1455,6 +1455,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.power_down_display_on_boot = true; + if (check_seamless_boot_capability(adev)) { + init_data.flags.power_down_display_on_boot = false; + init_data.flags.allow_seamless_boot_optimization = true; + DRM_INFO("Seamless boot condition check passed\n"); + } + INIT_LIST_HEAD(&adev->dm.da_list); /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1479,8 +1485,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) adev->dm.dc->debug.disable_stutter = true; - if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { adev->dm.dc->debug.disable_dsc = true; + adev->dm.dc->debug.disable_dsc_edp = true; + } if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) adev->dm.dc->debug.disable_clock_gate = true; @@ -2303,14 +2311,6 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) goto fail; } - - res = dc_validate_global_state(dc, context, false); - - if (res != DC_OK) { - DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res); - goto fail; - } - res = dc_commit_state(dc, context); fail: @@ -2561,6 +2561,23 @@ static int dm_resume(void *handle) if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; + /* + * The dc->current_state is backed up into dm->cached_dc_state + * before we commit 0 streams. + * + * DC will clear link encoder assignments on the real state + * but the changes won't propagate over to the copy we made + * before the 0 streams commit. + * + * DC expects that link encoder assignments are *not* valid + * when committing a state, so as a workaround it needs to be + * cleared here. + */ + link_enc_cfg_init(dm->dc, dc_state); + + if (dc_enable_dmub_notifications(adev->dm.dc)) + amdgpu_dm_outbox_init(adev); + r = dm_dmub_hw_init(adev); if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -2572,20 +2589,11 @@ static int dm_resume(void *handle) for (i = 0; i < dc_state->stream_count; i++) { dc_state->streams[i]->mode_changed = true; - for (j = 0; j < dc_state->stream_status->plane_count; j++) { - dc_state->stream_status->plane_states[j]->update_flags.raw + for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { + dc_state->stream_status[i].plane_states[j]->update_flags.raw = 0xffffffff; } } -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* - * Resource allocation happens for link encoders for newer ASIC in - * dc_validate_global_state, so we need to revalidate it. - * - * This shouldn't fail (it passed once before), so warn if it does. - */ - WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK); -#endif WARN_ON(!dc_commit_state(dm->dc, dc_state)); @@ -2608,6 +2616,10 @@ static int dm_resume(void *handle) /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ dc_resource_state_construct(dm->dc, dm_state->context); + /* Re-enable outbox interrupts for DPIA. */ + if (dc_enable_dmub_notifications(adev->dm.dc)) + amdgpu_dm_outbox_init(adev); + /* Before powering on DC we need to re-initialize DMUB. */ r = dm_dmub_hw_init(adev); if (r) @@ -3909,6 +3921,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, caps = dm->backlight_caps[bl_idx]; dm->brightness[bl_idx] = user_brightness; + /* update scratch register */ + if (bl_idx == 0) + amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; @@ -4242,7 +4257,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); - + if (dm->num_of_edps) + update_connector_ext_caps(aconnector); if (psr_feature_enabled) amdgpu_dm_set_psr_caps(link); } @@ -4250,6 +4266,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } + /* + * Disable vblank IRQs aggressively for power-saving. + * + * TODO: Fix vblank control helpers to delay PSR entry to allow this when PSR + * is also supported. + */ + adev_to_drm(adev)->vblank_disable_immediate = !psr_feature_enabled; + /* Software is initialized. Now we can register interrupt handlers. */ switch (adev->asic_type) { #if defined(CONFIG_DRM_AMD_DC_SI) @@ -6035,7 +6059,8 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, { stream->timing.flags.DSC = 0; - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || + sink->sink_signal == SIGNAL_TYPE_EDP)) { dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, @@ -6043,6 +6068,64 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, } } +static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, + struct dc_sink *sink, struct dc_stream_state *stream, + struct dsc_dec_dpcd_caps *dsc_caps, + uint32_t max_dsc_target_bpp_limit_override) +{ + const struct dc_link_settings *verified_link_cap = NULL; + uint32_t link_bw_in_kbps; + uint32_t edp_min_bpp_x16, edp_max_bpp_x16; + struct dc *dc = sink->ctx->dc; + struct dc_dsc_bw_range bw_range = {0}; + struct dc_dsc_config dsc_cfg = {0}; + + verified_link_cap = dc_link_get_link_cap(stream->link); + link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); + edp_min_bpp_x16 = 8 * 16; + edp_max_bpp_x16 = 8 * 16; + + if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) + edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; + + if (edp_max_bpp_x16 < edp_min_bpp_x16) + edp_min_bpp_x16 = edp_max_bpp_x16; + + if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], + dc->debug.dsc_min_slice_height_override, + edp_min_bpp_x16, edp_max_bpp_x16, + dsc_caps, + &stream->timing, + &bw_range)) { + + if (bw_range.max_kbps < link_bw_in_kbps) { + if (dc_dsc_compute_config(dc->res_pool->dscs[0], + dsc_caps, + dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + 0, + &stream->timing, + &dsc_cfg)) { + stream->timing.dsc_cfg = dsc_cfg; + stream->timing.flags.DSC = 1; + stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; + } + return; + } + } + + if (dc_dsc_compute_config(dc->res_pool->dscs[0], + dsc_caps, + dc->debug.dsc_min_slice_height_override, + max_dsc_target_bpp_limit_override, + link_bw_in_kbps, + &stream->timing, + &dsc_cfg)) { + stream->timing.dsc_cfg = dsc_cfg; + stream->timing.flags.DSC = 1; + } +} + static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, struct dc_sink *sink, struct dc_stream_state *stream, struct dsc_dec_dpcd_caps *dsc_caps) @@ -6050,6 +6133,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, struct drm_connector *drm_connector = &aconnector->base; uint32_t link_bandwidth_kbps; uint32_t max_dsc_target_bpp_limit_override = 0; + struct dc *dc = sink->ctx->dc; link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, dc_link_get_link_cap(aconnector->dc_link)); @@ -6062,7 +6146,12 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_dsc_policy_set_enable_dsc_when_not_needed( aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp && + dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { + + apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); + + } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], dsc_caps, @@ -10758,8 +10847,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, trace_amdgpu_dm_atomic_check_begin(state); ret = drm_atomic_helper_check_modeset(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); goto fail; + } /* Check connector changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -10775,6 +10866,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); if (IS_ERR(new_crtc_state)) { + DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); ret = PTR_ERR(new_crtc_state); goto fail; } @@ -10789,8 +10881,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = add_affected_mst_dsc_crtcs(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); goto fail; + } } } } @@ -10805,19 +10899,25 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, continue; ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); goto fail; + } if (!new_crtc_state->enable) continue; ret = drm_atomic_add_affected_connectors(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); goto fail; + } ret = drm_atomic_add_affected_planes(state, crtc); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); goto fail; + } if (dm_old_crtc_state->dsc_force_changed) new_crtc_state->mode_changed = true; @@ -10854,6 +10954,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (IS_ERR(new_plane_state)) { ret = PTR_ERR(new_plane_state); + DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); goto fail; } } @@ -10866,8 +10967,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_plane_state, false, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; + } } /* Disable all crtcs which require disable */ @@ -10877,8 +10980,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state, false, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); goto fail; + } } /* Enable all crtcs which require enable */ @@ -10888,8 +10993,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_crtc_state, true, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); goto fail; + } } /* Add new/modified planes */ @@ -10899,20 +11006,26 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, new_plane_state, true, &lock_and_validation_needed); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); goto fail; + } } /* Run this here since we want to validate the streams we created */ ret = drm_atomic_helper_check_planes(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); goto fail; + } /* Check cursor planes scaling */ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); goto fail; + } } if (state->legacy_cursor_update) { @@ -10999,20 +11112,28 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, */ if (lock_and_validation_needed) { ret = dm_atomic_get_state(state, &dm_state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); goto fail; + } ret = do_aquire_global_lock(dev, state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); goto fail; + } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) + if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars)) { + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); goto fail; + } ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); goto fail; + } #endif /* @@ -11022,12 +11143,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, * to get stuck in an infinite loop and hang eventually. */ ret = drm_dp_mst_atomic_check(state); - if (ret) + if (ret) { + DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); goto fail; - status = dc_validate_global_state(dc, dm_state->context, false); + } + status = dc_validate_global_state(dc, dm_state->context, true); if (status != DC_OK) { - drm_dbg_atomic(dev, - "DC global validation failure: %s (%d)", + DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", dc_status_to_str(status), status); ret = -EINVAL; goto fail; @@ -11528,3 +11650,24 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS, (uint32_t *)operation_result); } + +/* + * Check whether seamless boot is supported. + * + * So far we only support seamless boot on CHIP_VANGOGH. + * If everything goes well, we may consider expanding + * seamless boot to other ASICs. + */ +bool check_seamless_boot_capability(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_VANGOGH: + if (!adev->mman.keep_stolen_vga_memory) + return true; + break; + default: + break; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 37e61a88d49e..bb65f41d1a59 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -731,4 +731,7 @@ extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx, unsigned int link_index, void *payload, void *operation_result); + +bool check_seamless_boot_capability(struct amdgpu_device *adev); + #endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index cce062adc439..8a441a22c46e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) ret = -EINVAL; goto cleanup; } + + if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) && + (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) { + DRM_DEBUG_DRIVER("No DP connector available for CRC source\n"); + ret = -EINVAL; + goto cleanup; + } + } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 8cbeeb7c986d..72a2e84645df 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -584,7 +584,7 @@ bool dm_helpers_dp_write_dsc_enable( ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); } - if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); DC_LOG_DC("Send DSC %s to sst display\n", enable_dsc ? "enable" : "disable"); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 32a5ce09a62a..cc34a35d0bcb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,6 +36,8 @@ #include "dm_helpers.h" #include "dc_link_ddc.h" +#include "ddc_service_types.h" +#include "dpcd_defs.h" #include "i2caux_interface.h" #include "dmub_cmd.h" @@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { }; #if defined(CONFIG_DRM_AMD_DC_DCN) +static bool needs_dsc_aux_workaround(struct dc_link *link) +{ + if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && + link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) + return true; + + return false; +} + static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) { struct dc_sink *dc_sink = aconnector->dc_sink; @@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto u8 *dsc_branch_dec_caps = NULL; aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); -#if defined(CONFIG_HP_HOOK_WORKAROUND) + /* * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs * because it only check the dsc/fec caps of the "port variable" and not the dock @@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux * */ - - if (!aconnector->dsc_aux && !port->parent->port_parent) + if (!aconnector->dsc_aux && !port->parent->port_parent && + needs_dsc_aux_workaround(aconnector->dc_link)) aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux; -#endif + if (!aconnector->dsc_aux) return false; diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index a4bef4364afd..1e385d55e7fb 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -2995,7 +2995,7 @@ static bool bios_parser2_construct( &bp->object_info_tbl.revision); if (bp->object_info_tbl.revision.major == 1 - && bp->object_info_tbl.revision.minor >= 4) { + && bp->object_info_tbl.revision.minor == 4) { struct display_object_info_table_v1_4 *tbl_v1_4; tbl_v1_4 = GET_IMAGE(struct display_object_info_table_v1_4, @@ -3004,8 +3004,10 @@ static bool bios_parser2_construct( return false; bp->object_info_tbl.v1_4 = tbl_v1_4; - } else + } else { + ASSERT(0); return false; + } dal_firmware_parser_init_cmd_tbl(bp); dal_bios_parser_init_cmd_tbl_helper2(&bp->cmd_helper, dce_version); diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c index 6b248cd2a461..c8b0a2f05b4d 100644 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c @@ -763,7 +763,7 @@ unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, uint32_t hw return 4; } -bool dcn_validate_bandwidth( +bool dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 26f96ee32472..9200c8ce02ba 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -308,8 +308,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base) case FAMILY_NV: if (ASICREV_IS_SIENNA_CICHLID_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { dcn3_clk_mgr_destroy(clk_mgr); - } - if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { + } else if (ASICREV_IS_DIMGREY_CAVEFISH_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { dcn3_clk_mgr_destroy(clk_mgr); } if (ASICREV_IS_BEIGE_GOBY_P(clk_mgr_base->ctx->asic_id.hw_internal_rev)) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index f4c9a458ace8..a13ff1783b9b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -66,7 +66,7 @@ #define TO_CLK_MGR_DCN31(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn31, base) -int dcn31_get_active_display_cnt_wa( +static int dcn31_get_active_display_cnt_wa( struct dc *dc, struct dc_state *context) { @@ -118,7 +118,7 @@ static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable) } } -static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, +void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) { @@ -284,7 +284,7 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base) dcn31_smu_enable_pme_wa(clk_mgr); } -static void dcn31_init_clocks(struct clk_mgr *clk_mgr) +void dcn31_init_clocks(struct clk_mgr *clk_mgr) { memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate @@ -294,7 +294,7 @@ static void dcn31_init_clocks(struct clk_mgr *clk_mgr) clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN; } -static bool dcn31_are_clock_states_equal(struct dc_clocks *a, +bool dcn31_are_clock_states_equal(struct dc_clocks *a, struct dc_clocks *b) { if (a->dispclk_khz != b->dispclk_khz) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h index f8f100535526..961b10a49486 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h @@ -39,6 +39,13 @@ struct clk_mgr_dcn31 { struct dcn31_smu_watermark_set smu_wm_set; }; +bool dcn31_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b); +void dcn31_init_clocks(struct clk_mgr *clk_mgr); +void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, + struct dc_state *context, + bool safe_to_lower); + void dcn31_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_dcn31 *clk_mgr, struct pp_smu_funcs *pp_smu, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 0ded4decee05..17b7408d84b7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -221,9 +221,9 @@ static bool create_links( link = link_create(&link_init_params); if (link) { - dc->links[dc->link_count] = link; - link->dc = dc; - ++dc->link_count; + dc->links[dc->link_count] = link; + link->dc = dc; + ++dc->link_count; } } @@ -808,6 +808,10 @@ void dc_stream_set_static_screen_params(struct dc *dc, static void dc_destruct(struct dc *dc) { + // reset link encoder assignment table on destruct + if (dc->res_pool->funcs->link_encs_assign) + link_enc_cfg_init(dc, dc->current_state); + if (dc->current_state) { dc_release_state(dc->current_state); dc->current_state = NULL; @@ -1016,8 +1020,6 @@ static bool dc_construct(struct dc *dc, goto fail; } - dc_resource_state_construct(dc, dc->current_state); - if (!create_links(dc, init_params->num_virtual_links)) goto fail; @@ -1027,8 +1029,7 @@ static bool dc_construct(struct dc *dc, if (!create_link_encoders(dc)) goto fail; - /* Initialise DIG link encoder resource tracking variables. */ - link_enc_cfg_init(dc, dc->current_state); + dc_resource_state_construct(dc, dc->current_state); return true; @@ -1830,6 +1831,19 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) dc_stream_log(dc, stream); } + /* + * Previous validation was perfomred with fast_validation = true and + * the full DML state required for hardware programming was skipped. + * + * Re-validate here to calculate these parameters / watermarks. + */ + result = dc_validate_global_state(dc, context, false); + if (result != DC_OK) { + DC_LOG_ERROR("DC commit global validation failure: %s (%d)", + dc_status_to_str(result), result); + return result; + } + result = dc_commit_state_no_check(dc, context); return (result == DC_OK); @@ -2870,7 +2884,8 @@ static void commit_planes_for_stream(struct dc *dc, #endif if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) - if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { + if (top_pipe_to_program && + top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) { if (should_use_dmub_lock(stream->link)) { union dmub_hw_lock_flags hw_locks = { 0 }; struct dmub_hw_lock_inst_flags inst_flags = { 0 }; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 60544788e911..faa0bc308fc8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -270,10 +270,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type) /* Link may not have physical HPD pin. */ if (link->ep_type != DISPLAY_ENDPOINT_PHY) { - if (link->hpd_status) - *type = dc_connection_single; - else + if (link->is_hpd_pending || !link->hpd_status) *type = dc_connection_none; + else + *type = dc_connection_single; return true; } @@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link, dal_ddc_service_set_transaction_type(link->ddc, sink_caps->transaction_type); +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock + * reports DSC support. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) + link->wa_flags.dpia_mst_dsc_always_on = true; +#endif + #if defined(CONFIG_DRM_AMD_DC_HDCP) /* In case of fallback to SST when topology discovery below fails * HDCP caps will be querried again later by the upper layer (caller @@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link, LINK_INFO("link=%d, mst branch is now Disconnected\n", link->link_index); + /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->wa_flags.dpia_mst_dsc_always_on = false; + dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); link->mst_stream_alloc_table.stream_count = 0; @@ -1999,6 +2015,57 @@ static enum dc_status enable_link_dp_mst( return enable_link_dp(state, pipe_ctx); } +void dc_link_blank_all_dp_displays(struct dc *dc) +{ + unsigned int i; + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) || + (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL)) + continue; + + /* DP 2.0 spec requires that we read LTTPR caps first */ + dp_retrieve_lttpr_cap(dc->links[i]); + /* if any of the displays are lit up turn them off */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) + dc_link_blank_dp_stream(dc->links[i], true); + } + +} + +void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init) +{ + unsigned int j; + struct dc *dc = link->ctx->dc; + enum signal_type signal = link->connector_signal; + + if ((signal == SIGNAL_TYPE_EDP) || + (signal == SIGNAL_TYPE_DISPLAY_PORT)) { + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + link->link_enc->funcs->get_dig_frontend && + link->link_enc->funcs->is_dig_enabled(link->link_enc)) { + unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); + + if (fe != ENGINE_ID_UNKNOWN) + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank(link, + dc->res_pool->stream_enc[j]); + break; + } + } + } + + if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) + dp_receiver_power_ctrl(link, false); + } +} + static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx, enum engine_id eng_id, struct ext_hdmi_settings *settings) @@ -2946,7 +3013,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active link->psr_settings.psr_power_opt = *power_opts; if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) - psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt); + psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst); } /* Enable or Disable PSR */ @@ -3913,9 +3980,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; #if defined(CONFIG_DRM_AMD_DC_DCN) struct link_encoder *link_enc = NULL; - struct dc_state *state = pipe_ctx->stream->ctx->dc->current_state; - struct link_enc_assignment link_enc_assign; - int i; #endif if (cp_psp && cp_psp->funcs.update_stream_config) { @@ -3943,18 +4007,15 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) pipe_ctx->stream->ctx->dc, pipe_ctx->stream); } + ASSERT(link_enc); + // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0 config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; - //look up the link_enc_assignment for the current pipe_ctx - for (i = 0; i < state->stream_count; i++) { - if (pipe_ctx->stream == state->streams[i]) { - link_enc_assign = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; - } - } // Add flag to guard new A0 DIG mapping - if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true) { - config.dig_be = link_enc_assign.eng_id; + if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && + pipe_ctx->stream->link->dc->ctx->dce_version == DCN_VERSION_3_1) { + config.dig_be = link_enc->preferred_engine; config.dio_output_type = pipe_ctx->stream->link->ep_type; config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; } else { @@ -3966,10 +4027,8 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true && link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) { if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { - link_enc = link_enc_assign.stream->link_enc; - // enum ID 1-4 maps to DPIA PHY ID 0-3 - config.phy_idx = link_enc_assign.ep_id.link_id.enum_id - ENUM_ID_1; + config.phy_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; } else { // for non DPIA mode over B0, ABCDE maps to 01564 switch (link_enc->transmitter) { @@ -4242,7 +4301,8 @@ void core_link_enable_stream( /* eDP lit up by bios already, no need to enable again. */ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && apply_edp_fast_boot_optimization && - !pipe_ctx->stream->timing.flags.DSC) { + !pipe_ctx->stream->timing.flags.DSC && + !pipe_ctx->next_odm_pipe) { pipe_ctx->stream->dpms_off = false; #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, false); @@ -4749,6 +4809,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link) link->local_sink && link->local_sink->edid_caps.panel_patch.disable_fec) || (link->connector_signal == SIGNAL_TYPE_EDP + // enable FEC for EDP if DSC is supported + && link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT == false )) is_fec_disable = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 60539b1f2a80..24dc662ec3e4 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -626,7 +626,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc, do { struct aux_payload current_payload; bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= - payload->length ? true : false; + payload->length; uint32_t payload_length = is_end_of_payload ? payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index cb7bf9148904..297553074bfd 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -430,7 +430,7 @@ enum dc_status dpcd_set_link_settings( status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && lt_settings->link_settings.use_link_rate_set == true) { rate = 0; /* WA for some MUX chips that will power down with eDP and lose supported @@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training( } for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++) - lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0; + lt_settings->dpcd_lane_settings[lane].raw = 0; } if (status == LINK_TRAINING_SUCCESS) { @@ -3346,6 +3346,148 @@ bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *lin return false; } +static bool decide_edp_link_settings_with_dsc(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw, + enum dc_link_rate max_link_rate) +{ + struct dc_link_settings initial_link_setting; + struct dc_link_settings current_link_setting; + uint32_t link_bw; + + unsigned int policy = 0; + + policy = link->ctx->dc->debug.force_dsc_edp_policy; + if (max_link_rate == LINK_RATE_UNKNOWN) + max_link_rate = link->verified_link_cap.link_rate; + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if ((link->dpcd_caps.dpcd_rev.raw < DPCD_REV_13 || + link->dpcd_caps.edp_supported_link_rates_count == 0)) { + /* for DSC enabled case, we search for minimum lane count */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = LINK_RATE_LOW; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = false; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate = + increase_link_rate( + current_link_setting.link_rate); + } else { + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate = initial_link_setting.link_rate; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + current_link_setting.link_rate = + increase_link_rate( + current_link_setting.link_rate); + current_link_setting.lane_count = + initial_link_setting.lane_count; + } + } + } + return false; + } + + /* if optimize edp link is supported */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = true; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dc_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate_set < + link->dpcd_caps.edp_supported_link_rates_count + && current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else { + if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate_set = initial_link_setting.link_rate_set; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + current_link_setting.lane_count = + initial_link_setting.lane_count; + } else + break; + } + } + } + return false; +} + static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) { *link_setting = link->verified_link_cap; @@ -3380,7 +3522,25 @@ void decide_link_settings(struct dc_stream_state *stream, if (decide_mst_link_settings(link, link_setting)) return; } else if (link->connector_signal == SIGNAL_TYPE_EDP) { - if (decide_edp_link_settings(link, link_setting, req_bw)) + /* enable edp link optimization for DSC eDP case */ + if (stream->timing.flags.DSC) { + enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; + + if (link->ctx->dc->debug.force_dsc_edp_policy) { + /* calculate link max link rate cap*/ + struct dc_link_settings tmp_link_setting; + struct dc_crtc_timing tmp_timing = stream->timing; + uint32_t orig_req_bw; + + tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; + tmp_timing.flags.DSC = 0; + orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing); + decide_edp_link_settings(link, &tmp_link_setting, orig_req_bw); + max_link_rate = tmp_link_setting.link_rate; + } + if (decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate)) + return; + } else if (decide_edp_link_settings(link, link_setting, req_bw)) return; } else if (decide_dp_link_settings(link, link_setting, req_bw)) return; @@ -4454,7 +4614,7 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data, sizeof(lttpr_dpcd_data)); if (status != DC_OK) { - dm_error("%s: Read LTTPR caps data failed.\n", __func__); + DC_LOG_DP2("%s: Read LTTPR caps data failed.\n", __func__); return false; } @@ -5885,7 +6045,10 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing); - decide_edp_link_settings(link, &link_setting, req_bw); + if (!crtc_timing->flags.DSC) + decide_edp_link_settings(link, &link_setting, req_bw); + else + decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate || lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c index b1c9f77d6bf4..d72122593959 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c @@ -94,17 +94,17 @@ static enum link_training_result dpia_configure_link(struct dc_link *link, lt_settings); status = dpcd_configure_channel_coding(link, lt_settings); - if (status != DC_OK && !link->hpd_status) + if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; /* Configure lttpr mode */ status = dpcd_configure_lttpr_mode(link, lt_settings); - if (status != DC_OK && !link->hpd_status) + if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; /* Set link rate, lane count and spread. */ status = dpcd_set_link_settings(link, lt_settings); - if (status != DC_OK && !link->hpd_status) + if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; if (link->preferred_training_settings.fec_enable) @@ -112,7 +112,7 @@ static enum link_training_result dpia_configure_link(struct dc_link *link, else fec_enable = true; status = dp_set_fec_ready(link, fec_enable); - if (status != DC_OK && !link->hpd_status) + if (status != DC_OK && link->is_hpd_pending) return LINK_TRAINING_ABORT; return LINK_TRAINING_SUCCESS; @@ -388,7 +388,7 @@ static enum link_training_result dpia_training_cr_non_transparent(struct dc_link } /* Abort link training if clock recovery failed due to HPD unplug. */ - if (!link->hpd_status) + if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" @@ -490,7 +490,7 @@ static enum link_training_result dpia_training_cr_transparent(struct dc_link *li } /* Abort link training if clock recovery failed due to HPD unplug. */ - if (!link->hpd_status) + if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n" @@ -675,7 +675,7 @@ static enum link_training_result dpia_training_eq_non_transparent(struct dc_link } /* Abort link training if equalization failed due to HPD unplug. */ - if (!link->hpd_status) + if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" @@ -758,7 +758,7 @@ static enum link_training_result dpia_training_eq_transparent(struct dc_link *li } /* Abort link training if equalization failed due to HPD unplug. */ - if (!link->hpd_status) + if (link->is_hpd_pending) result = LINK_TRAINING_ABORT; DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n" @@ -892,10 +892,10 @@ static void dpia_training_abort(struct dc_link *link, uint32_t hop) __func__, link->link_id.enum_id - ENUM_ID_1, link->lttpr_mode, - link->hpd_status); + link->is_hpd_pending); /* Abandon clean-up if sink unplugged. */ - if (!link->hpd_status) + if (link->is_hpd_pending) return; if (hop != DPRX) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index 25e48a8cbb78..a55944da8d53 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -118,7 +118,10 @@ static void remove_link_enc_assignment( */ if (get_stream_using_link_enc(state, eng_id) == NULL) state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id; + stream->link_enc = NULL; + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; break; } } @@ -148,6 +151,7 @@ static void add_link_enc_assignment( .ep_type = stream->link->ep_type}, .eng_id = eng_id, .stream = stream}; + dc_stream_retain(stream); state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN; stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx]; break; @@ -227,7 +231,7 @@ static struct link_encoder *get_link_enc_used_by_link( .link_id = link->link_id, .ep_type = link->ep_type}; - for (i = 0; i < state->stream_count; i++) { + for (i = 0; i < MAX_PIPES; i++) { struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i]; if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) @@ -237,28 +241,18 @@ static struct link_encoder *get_link_enc_used_by_link( return link_enc; } /* Clear all link encoder assignments. */ -static void clear_enc_assignments(struct dc_state *state) +static void clear_enc_assignments(const struct dc *dc, struct dc_state *state) { int i; - enum engine_id eng_id; - struct dc_stream_state *stream; for (i = 0; i < MAX_PIPES; i++) { state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false; - eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id; - stream = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream; - if (eng_id != ENGINE_ID_UNKNOWN) - state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_id - ENGINE_ID_DIGA] = eng_id; - if (stream) - stream->link_enc = NULL; + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; + if (state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream != NULL) { + dc_stream_release(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream); + state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; + } } -} - -void link_enc_cfg_init( - struct dc *dc, - struct dc_state *state) -{ - int i; for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { if (dc->res_pool->link_encoders[i]) @@ -266,8 +260,13 @@ void link_enc_cfg_init( else state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN; } +} - clear_enc_assignments(state); +void link_enc_cfg_init( + const struct dc *dc, + struct dc_state *state) +{ + clear_enc_assignments(dc, state); state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY; } @@ -284,12 +283,9 @@ void link_enc_cfg_link_encs_assign( ASSERT(state->stream_count == stream_count); - if (stream_count == 0) - clear_enc_assignments(state); - /* Release DIG link encoder resources before running assignment algorithm. */ - for (i = 0; i < stream_count; i++) - dc->res_pool->funcs->link_enc_unassign(state, streams[i]); + for (i = 0; i < dc->current_state->stream_count; i++) + dc->res_pool->funcs->link_enc_unassign(state, dc->current_state->streams[i]); for (i = 0; i < MAX_PIPES; i++) ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false); @@ -544,6 +540,7 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state) uint8_t dig_stream_count = 0; int matching_stream_ptrs = 0; int eng_ids_per_ep_id[MAX_PIPES] = {0}; + int valid_bitmap = 0; /* (1) No. valid entries same as stream count. */ for (i = 0; i < MAX_PIPES; i++) { @@ -625,5 +622,15 @@ bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state) is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams; ASSERT(is_valid); + if (is_valid == false) { + valid_bitmap = + (valid_entries & 0x1) | + ((valid_stream_ptrs & 0x1) << 1) | + ((valid_uniqueness & 0x1) << 2) | + ((valid_avail & 0x1) << 3) | + ((valid_streams & 0x1) << 4); + dm_error("Invalid link encoder assignments: 0x%x\n", valid_bitmap); + } + return is_valid; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index c32fdccd4d92..ce8f7f4fa2b7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged( if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) return false; + // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks + if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) + return false; + return true; } @@ -2078,7 +2082,6 @@ static void mark_seamless_boot_stream( { struct dc_bios *dcb = dc->ctx->dc_bios; - /* TODO: Check Linux */ if (dc->config.allow_seamless_boot_optimization && !dcb->funcs->is_accelerated_mode(dcb)) { if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing)) @@ -2224,6 +2227,9 @@ void dc_resource_state_construct( struct dc_state *dst_ctx) { dst_ctx->clk_mgr = dc->clk_mgr; + + /* Initialise DIG link encoder resource tracking variables. */ + link_enc_cfg_init(dc, dst_ctx); } @@ -2252,16 +2258,6 @@ enum dc_status dc_validate_global_state( if (!new_ctx) return DC_ERROR_UNEXPECTED; -#if defined(CONFIG_DRM_AMD_DC_DCN) - - /* - * Update link encoder to stream assignment. - * TODO: Split out reason allocation from validation. - */ - if (dc->res_pool->funcs->link_encs_assign && fast_validate == false) - dc->res_pool->funcs->link_encs_assign( - dc, new_ctx, new_ctx->streams, new_ctx->stream_count); -#endif if (dc->res_pool->funcs->validate_global) { result = dc->res_pool->funcs->validate_global(dc, new_ctx); @@ -2313,6 +2309,16 @@ enum dc_status dc_validate_global_state( if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate)) result = DC_FAIL_BANDWIDTH_VALIDATE; +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* + * Only update link encoder to stream assignment after bandwidth validation passed. + * TODO: Split out assignment and validation. + */ + if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false) + dc->res_pool->funcs->link_encs_assign( + dc, new_ctx, new_ctx->streams, new_ctx->stream_count); +#endif + return result; } @@ -2506,17 +2512,7 @@ static void set_avi_info_frame( /* TODO : We should handle YCC quantization */ /* but we do not have matrix calculation */ - if (stream->qy_bit == 1) { - if (color_space == COLOR_SPACE_SRGB || - color_space == COLOR_SPACE_2020_RGB_FULLRANGE) - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; - else if (color_space == COLOR_SPACE_SRGB_LIMITED || - color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; - else - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; - } else - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; ///VIC format = stream->timing.timing_3d_format; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c index a249a0e5edd0..4b5e4d8e7735 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c @@ -33,14 +33,6 @@ * Private functions ******************************************************************************/ -static void dc_sink_destruct(struct dc_sink *sink) -{ - if (sink->dc_container_id) { - kfree(sink->dc_container_id); - sink->dc_container_id = NULL; - } -} - static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_data *init_params) { @@ -75,7 +67,7 @@ void dc_sink_retain(struct dc_sink *sink) static void dc_sink_free(struct kref *kref) { struct dc_sink *sink = container_of(kref, struct dc_sink, refcount); - dc_sink_destruct(sink); + kfree(sink->dc_container_id); kfree(sink); } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3aac3f4a2852..bf2878235dba 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.160" +#define DC_VER "3.2.163" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -188,6 +188,7 @@ struct dc_caps { #if defined(CONFIG_DRM_AMD_DC_DCN) bool dp_hpo; #endif + bool edp_dsc_support; bool vbios_lttpr_aware; bool vbios_lttpr_enable; }; @@ -508,7 +509,8 @@ union dpia_debug_options { uint32_t disable_dpia:1; uint32_t force_non_lttpr:1; uint32_t extend_aux_rd_interval:1; - uint32_t reserved:29; + uint32_t disable_mst_dsc_work_around:1; + uint32_t reserved:28; } bits; uint32_t raw; }; @@ -573,6 +575,8 @@ struct dc_debug_options { bool native422_support; bool disable_dsc; enum visual_confirm visual_confirm; + int visual_confirm_rect_height; + bool sanity_checks; bool max_disp_clk; bool surface_trace; @@ -667,6 +671,8 @@ struct dc_debug_options { bool validate_dml_output; bool enable_dmcub_surface_flip; bool usbc_combo_phy_reset_wa; + bool disable_dsc_edp; + unsigned int force_dsc_edp_policy; bool enable_dram_clock_change_one_display_vactive; #if defined(CONFIG_DRM_AMD_DC_DCN) /* TODO - remove once tested */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index 360f3199ea6f..541376fabbef 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -115,13 +115,44 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv) } } +void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv) +{ + struct dmub_srv *dmub = dmub_srv->dmub; + struct dc_context *dc_ctx = dmub_srv->ctx; + enum dmub_status status = DMUB_STATUS_OK; + + status = dmub_srv_clear_inbox0_ack(dmub); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error clearing INBOX0 ack: status=%d\n", status); + dc_dmub_srv_log_diagnostic_data(dmub_srv); + } +} + +void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv) +{ + struct dmub_srv *dmub = dmub_srv->dmub; + struct dc_context *dc_ctx = dmub_srv->ctx; + enum dmub_status status = DMUB_STATUS_OK; + + status = dmub_srv_wait_for_inbox0_ack(dmub, 100000); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error waiting for INBOX0 HW Lock Ack\n"); + dc_dmub_srv_log_diagnostic_data(dmub_srv); + } +} + void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data) { struct dmub_srv *dmub = dmub_srv->dmub; - if (dmub->hw_funcs.send_inbox0_cmd) - dmub->hw_funcs.send_inbox0_cmd(dmub, data); - // TODO: Add wait command -- poll register for ACK + struct dc_context *dc_ctx = dmub_srv->ctx; + enum dmub_status status = DMUB_STATUS_OK; + + status = dmub_srv_send_inbox0_cmd(dmub, data); + if (status != DMUB_STATUS_OK) { + DC_ERROR("Error sending INBOX0 cmd\n"); + dc_dmub_srv_log_diagnostic_data(dmub_srv); + } } bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd) diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index 3e35eee7188c..7e4e2ec5915d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -68,6 +68,8 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu void dc_dmub_trace_event_control(struct dc *dc, bool enable); +void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv); +void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv); void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data); bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca); diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 180ecd860296..37af564c4b33 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -113,6 +113,7 @@ struct dc_link { * DIG encoder. */ bool is_dig_mapping_flexible; bool hpd_status; /* HPD status of link without physical HPD pin. */ + bool is_hpd_pending; /* Indicates a new received hpd */ bool edp_sink_present; @@ -191,6 +192,8 @@ struct dc_link { bool dp_skip_DID2; bool dp_skip_reset_segment; bool dp_mot_reset_segment; + /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ + bool dpia_mst_dsc_always_on; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -224,6 +227,8 @@ static inline void get_edp_links(const struct dc *dc, *edp_num = 0; for (i = 0; i < dc->link_count; i++) { // report any eDP links, even unconnected DDI's + if (!dc->links[i]) + continue; if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) { edp_links[*edp_num] = dc->links[i]; if (++(*edp_num) == MAX_NUM_EDP) @@ -287,6 +292,10 @@ bool dc_link_setup_psr(struct dc_link *dc_link, void dc_link_get_psr_residency(const struct dc_link *link, uint32_t *residency); +void dc_link_blank_all_dp_displays(struct dc *dc); + +void dc_link_blank_dp_stream(struct dc_link *link, bool hw_init); + /* Request DC to detect if there is a Panel connected. * boot - If this call is during initial boot. * Return false for any type of detection failure or MST detection @@ -298,7 +307,7 @@ enum dc_detect_reason { DETECT_REASON_HPD, DETECT_REASON_HPDRX, DETECT_REASON_FALLBACK, - DETECT_REASON_RETRAIN + DETECT_REASON_RETRAIN, }; bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c index 27218ede150a..70eaac017624 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c @@ -67,9 +67,6 @@ static void write_indirect_azalia_reg(struct audio *audio, /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */ REG_SET(AZALIA_F0_CODEC_ENDPOINT_DATA, 0, AZALIA_ENDPOINT_REG_DATA, reg_data); - - DC_LOG_HW_AUDIO("AUDIO:write_indirect_azalia_reg: index: %u data: %u\n", - reg_index, reg_data); } static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index) @@ -85,9 +82,6 @@ static uint32_t read_indirect_azalia_reg(struct audio *audio, uint32_t reg_index /* AZALIA_F0_CODEC_ENDPOINT_DATA endpoint data */ value = REG_READ(AZALIA_F0_CODEC_ENDPOINT_DATA); - DC_LOG_HW_AUDIO("AUDIO:read_indirect_azalia_reg: index: %u data: %u\n", - reg_index, value); - return value; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h index 5622d5e32d81..dbd2cfed0603 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.h @@ -113,6 +113,7 @@ struct dce_audio_shift { uint8_t DCCG_AUDIO_DTO2_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO; + uint32_t CLOCK_GATING_DISABLE; }; struct dce_audio_mask { @@ -132,6 +133,7 @@ struct dce_audio_mask { uint32_t DCCG_AUDIO_DTO2_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO0_USE_512FBR_DTO; uint32_t DCCG_AUDIO_DTO1_USE_512FBR_DTO; + uint32_t CLOCK_GATING_DISABLE; }; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c index 9baf8ca0a920..b1b2e3c6f379 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c @@ -56,8 +56,11 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_cmd_lock_hw hw_lock_cmd) { union dmub_inbox0_data_register data = { 0 }; + data.inbox0_cmd_lock_hw = hw_lock_cmd; + dc_dmub_srv_clear_inbox0_ack(dmub_srv); dc_dmub_srv_send_inbox0_cmd(dmub_srv, data); + dc_dmub_srv_wait_for_inbox0_ack(dmub_srv); } bool should_use_dmub_lock(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 90eb8eedacf2..87ed48d5530d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -230,7 +230,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_ /** * Set PSR power optimization flags. */ -static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt) +static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst) { union dmub_rb_cmd cmd; struct dc_context *dc = dmub->ctx; @@ -239,7 +239,9 @@ static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR; cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT; cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data); + cmd.psr_set_power_opt.psr_set_power_opt_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt; + cmd.psr_set_power_opt.psr_set_power_opt_data.panel_inst = panel_inst; dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); dc_dmub_srv_cmd_execute(dc->dmub_srv); @@ -327,6 +329,16 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->fec_enable_delay_in100us = link->dc->debug.fec_enable_delay_in100us; copy_settings_data->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; copy_settings_data->panel_inst = panel_inst; + copy_settings_data->dsc_enable_status = (pipe_ctx->stream->timing.flags.DSC == 1); + + if (link->fec_state == dc_link_fec_enabled && + (!memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_1, + sizeof(link->dpcd_caps.sink_dev_id_str)) || + !memcmp(link->dpcd_caps.sink_dev_id_str, DP_SINK_DEVICE_STR_ID_2, + sizeof(link->dpcd_caps.sink_dev_id_str)))) + copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 1; + else + copy_settings_data->debug.bitfields.force_wakeup_by_tps3 = 0; dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); dc_dmub_srv_cmd_execute(dc->dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h index 5dbd479660f1..01acc01cc191 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h @@ -46,7 +46,7 @@ struct dmub_psr_funcs { void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst); void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency, uint8_t panel_inst); - void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt); + void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt, uint8_t panel_inst); }; struct dmub_psr *dmub_psr_create(struct dc_context *ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 24e47df526f6..3d421583e9ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1602,6 +1602,11 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); + if (dc_is_dp_signal(pipe_ctx->stream->signal) && + pipe_ctx->stream_res.stream_enc->funcs->reset_fifo) + pipe_ctx->stream_res.stream_enc->funcs->reset_fifo( + pipe_ctx->stream_res.stream_enc); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); @@ -1655,30 +1660,12 @@ static enum dc_status apply_single_controller_ctx_to_hw( static void power_down_encoders(struct dc *dc) { - int i, j; + int i; for (i = 0; i < dc->link_count; i++) { enum signal_type signal = dc->links[i]->connector_signal; - if ((signal == SIGNAL_TYPE_EDP) || - (signal == SIGNAL_TYPE_DISPLAY_PORT)) { - if (dc->links[i]->link_enc->funcs->get_dig_frontend && - dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) { - unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend( - dc->links[i]->link_enc); - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], - dc->res_pool->stream_enc[j]); - break; - } - } - } - - if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) - dp_receiver_power_ctrl(dc->links[i], false); - } + dc_link_blank_dp_stream(dc->links[i], false); if (signal != SIGNAL_TYPE_EDP) signal = SIGNAL_TYPE_NONE; @@ -1846,7 +1833,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) } } // We are trying to enable eDP, don't power down VDD - if (edp_stream_num) + if (edp_stream_num && can_apply_edp_fast_boot) keep_edp_vdd_on = true; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c index 44293d66b46b..e31a6f1516bb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c @@ -39,6 +39,10 @@ #define BLACK_OFFSET_RGB_Y 0x0 #define BLACK_OFFSET_CBCR 0x8000 +#define VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT 3 +#define VISUAL_CONFIRM_RECT_HEIGHT_MIN 1 +#define VISUAL_CONFIRM_RECT_HEIGHT_MAX 10 + #define REG(reg)\ dpp->tf_regs->reg @@ -685,9 +689,17 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, const struct rect *recout) { int visual_confirm_on = 0; + unsigned short visual_confirm_rect_height = VISUAL_CONFIRM_RECT_HEIGHT_DEFAULT; + if (dpp->base.ctx->dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) visual_confirm_on = 1; + /* Check bounds to ensure the VC bar height was set to a sane value */ + if ((dpp->base.ctx->dc->debug.visual_confirm_rect_height >= VISUAL_CONFIRM_RECT_HEIGHT_MIN) && + (dpp->base.ctx->dc->debug.visual_confirm_rect_height <= VISUAL_CONFIRM_RECT_HEIGHT_MAX)) { + visual_confirm_rect_height = dpp->base.ctx->dc->debug.visual_confirm_rect_height; + } + REG_SET_2(RECOUT_START, 0, /* First pixel of RECOUT in the active OTG area */ RECOUT_START_X, recout->x, @@ -699,7 +711,7 @@ static void dpp1_dscl_set_recout(struct dcn10_dpp *dpp, RECOUT_WIDTH, recout->width, /* Number of RECOUT vertical lines */ RECOUT_HEIGHT, recout->height - - visual_confirm_on * 2 * (dpp->base.inst + 1)); + - visual_confirm_on * 2 * (dpp->base.inst + visual_confirm_rect_height)); } /** diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 0b788d794fb3..a2b925cc4132 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1362,11 +1362,48 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) tg->funcs->tg_init(tg); } + + /* Power gate DSCs */ + if (hws->funcs.dsc_pg_control != NULL) { + uint32_t num_opps = 0; + uint32_t opp_id_src0 = OPP_ID_INVALID; + uint32_t opp_id_src1 = OPP_ID_INVALID; + + // Step 1: To find out which OPTC is running & OPTC DSC is ON + for (i = 0; i < dc->res_pool->res_cap->num_timing_generator; i++) { + uint32_t optc_dsc_state = 0; + struct timing_generator *tg = dc->res_pool->timing_generators[i]; + + if (tg->funcs->is_tg_enabled(tg)) { + if (tg->funcs->get_dsc_status) + tg->funcs->get_dsc_status(tg, &optc_dsc_state); + // Only one OPTC with DSC is ON, so if we got one result, we would exit this block. + // non-zero value is DSC enabled + if (optc_dsc_state != 0) { + tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1); + break; + } + } + } + + // Step 2: To power down DSC but skip DSC of running OPTC + for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) { + struct dcn_dsc_state s = {0}; + + dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s); + + if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) && + s.dsc_clock_en && s.dsc_fw_en) + continue; + + hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false); + } + } } void dcn10_init_hw(struct dc *dc) { - int i, j; + int i; struct abm *abm = dc->res_pool->abm; struct dmcu *dmcu = dc->res_pool->dmcu; struct dce_hwseq *hws = dc->hwseq; @@ -1468,43 +1505,8 @@ void dcn10_init_hw(struct dc *dc) dmub_enable_outbox_notification(dc); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) { - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) - continue; - - /* DP 2.0 requires that LTTPR Caps be read first */ - dp_retrieve_lttpr_cap(dc->links[i]); - - /* - * If any of the displays are lit up turn them off. - * The reason is that some MST hubs cannot be turned off - * completely until we tell them to do so. - * If not turned off, then displays connected to MST hub - * won't light up. - */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { - /* blank dp stream before power off receiver*/ - if (dc->links[i]->link_enc->funcs->get_dig_frontend) { - unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc); - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], - dc->res_pool->stream_enc[j]); - break; - } - } - } - dp_receiver_power_ctrl(dc->links[i], false); - } - } - } + if (dc->config.power_down_display_on_boot) + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -1637,7 +1639,7 @@ void dcn10_reset_hw_ctx_wrap( dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } @@ -2624,7 +2626,7 @@ static void dcn10_update_dchubp_dpp( /* new calculated dispclk, dppclk are stored in * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz. - * dcn_validate_bandwidth compute new dispclk, dppclk. + * dcn10_validate_bandwidth compute new dispclk, dppclk. * dispclk will put in use after optimize_bandwidth when * ramp_up_dispclk_with_dpp is called. * there are two places for dppclk be put in use. One location @@ -2638,7 +2640,7 @@ static void dcn10_update_dchubp_dpp( * for example, eDP + external dp, change resolution of DP from * 1920x1080x144hz to 1280x960x60hz. * before change: dispclk = 337889 dppclk = 337889 - * change mode, dcn_validate_bandwidth calculate + * change mode, dcn10_validate_bandwidth calculate * dispclk = 143122 dppclk = 143122 * update_dchubp_dpp be executed before dispclk be updated, * dispclk = 337889, but dppclk use new value dispclk /2 = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index f37551e00023..19a2dd619ec7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -978,10 +978,8 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) pool->base.mpc = NULL; } - if (pool->base.hubbub != NULL) { - kfree(pool->base.hubbub); - pool->base.hubbub = NULL; - } + kfree(pool->base.hubbub); + pool->base.hubbub = NULL; for (i = 0; i < pool->base.pipe_count; i++) { if (pool->base.opps[i] != NULL) @@ -1011,14 +1009,10 @@ static void dcn10_resource_destruct(struct dcn10_resource_pool *pool) for (i = 0; i < pool->base.res_cap->num_ddc; i++) { if (pool->base.engines[i] != NULL) dce110_engine_destroy(&pool->base.engines[i]); - if (pool->base.hw_i2cs[i] != NULL) { - kfree(pool->base.hw_i2cs[i]); - pool->base.hw_i2cs[i] = NULL; - } - if (pool->base.sw_i2cs[i] != NULL) { - kfree(pool->base.sw_i2cs[i]); - pool->base.sw_i2cs[i] = NULL; - } + kfree(pool->base.hw_i2cs[i]); + pool->base.hw_i2cs[i] = NULL; + kfree(pool->base.sw_i2cs[i]); + pool->base.sw_i2cs[i] = NULL; } for (i = 0; i < pool->base.audio_count; i++) { @@ -1320,7 +1314,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = { .destroy = dcn10_destroy_resource_pool, .link_enc_create = dcn10_link_encoder_create, .panel_cntl_create = dcn10_panel_cntl_create, - .validate_bandwidth = dcn_validate_bandwidth, + .validate_bandwidth = dcn10_validate_bandwidth, .acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer, .validate_plane = dcn10_validate_plane, .validate_global = dcn10_validate_global, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index b0c08ee6bc2c..bf4436d7aaab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -902,6 +902,19 @@ void enc1_stream_encoder_stop_dp_info_packets( } +void enc1_stream_encoder_reset_fifo( + struct stream_encoder *enc) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + /* set DIG_START to 0x1 to reset FIFO */ + REG_UPDATE(DIG_FE_CNTL, DIG_START, 1); + udelay(100); + + /* write 0 to take the FIFO out of reset */ + REG_UPDATE(DIG_FE_CNTL, DIG_START, 0); +} + void enc1_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc) @@ -1587,6 +1600,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = { enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, + .reset_fifo = + enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index 687d7e4bf7ca..a146a41f68e9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -626,6 +626,9 @@ void enc1_stream_encoder_send_immediate_sdp_message( void enc1_stream_encoder_stop_dp_info_packets( struct stream_encoder *enc); +void enc1_stream_encoder_reset_fifo( + struct stream_encoder *enc); + void enc1_stream_encoder_dp_blank( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 79b640e202eb..ef5c4c0f4d6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -162,6 +162,8 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width); REG_GET(DSCC_PPS_CONFIG2, PIC_HEIGHT, &s->dsc_pic_height); REG_GET(DSCC_PPS_CONFIG7, SLICE_BPG_OFFSET, &s->dsc_slice_bpg_offset); + REG_GET_2(DSCRM_DSC_FORWARD_CONFIG, DSCRM_DSC_FORWARD_EN, &s->dsc_fw_en, + DSCRM_DSC_OPP_PIPE_SOURCE, &s->dsc_opp_source); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 4f88376a118f..e6af99ae3d9f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2270,7 +2270,7 @@ void dcn20_reset_hw_ctx_wrap( dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index c90b8516dcc1..8c34751b0e58 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -190,6 +190,19 @@ void optc2_set_dsc_config(struct timing_generator *optc, OPTC_DSC_SLICE_WIDTH, dsc_slice_width); } +/* Get DSC-related configuration. + * dsc_mode: 0 disables DSC, other values enable DSC in specified format + */ +void optc2_get_dsc_status(struct timing_generator *optc, + uint32_t *dsc_mode) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + + REG_GET(OPTC_DATA_FORMAT_CONTROL, + OPTC_DSC_MODE, dsc_mode); +} + + /*TEMP: Need to figure out inheritance model here.*/ bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing) { @@ -579,6 +592,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc2_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = optc2_set_dwb_source, .set_odm_bypass = optc2_set_odm_bypass, .set_odm_combine = optc2_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h index be19a6885fbf..f7968b9ca16e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h @@ -98,6 +98,9 @@ void optc2_set_dsc_config(struct timing_generator *optc, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); +void optc2_get_dsc_status(struct timing_generator *optc, + uint32_t *dsc_mode); + void optc2_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index aab25ca8343a..8a70f92795c2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -593,6 +593,8 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { enc1_stream_encoder_send_immediate_sdp_message, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, + .reset_fifo = + enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c index ebd9c35c914f..7aa9aaf5db4c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c @@ -805,6 +805,8 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = { enc3_stream_encoder_update_dp_info_packets, .stop_dp_info_packets = enc1_stream_encoder_stop_dp_info_packets, + .reset_fifo = + enc1_stream_encoder_reset_fifo, .dp_blank = enc1_stream_encoder_dp_blank, .dp_unblank = diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index df2717116604..3e99bb9c70ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -437,7 +437,7 @@ void dcn30_init_hw(struct dc *dc) struct dce_hwseq *hws = dc->hwseq; struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; - int i, j; + int i; int edp_num; uint32_t backlight = MAX_BACKLIGHT_LEVEL; @@ -534,41 +534,8 @@ void dcn30_init_hw(struct dc *dc) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) { - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) - continue; - /* DP 2.0 states that LTTPR regs must be read first */ - dp_retrieve_lttpr_cap(dc->links[i]); - - /* if any of the displays are lit up turn them off */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { - /* blank dp stream before power off receiver*/ - if (dc->links[i]->link_enc->funcs->get_dig_frontend) { - unsigned int fe; - - fe = dc->links[i]->link_enc->funcs->get_dig_frontend( - dc->links[i]->link_enc); - if (fe == ENGINE_ID_UNKNOWN) - continue; - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], - dc->res_pool->stream_enc[j]); - break; - } - } - } - dp_receiver_power_ctrl(dc->links[i], false); - } - } - } + if (dc->config.power_down_display_on_boot) + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c index 5d9e6413d67a..f5e8916601d3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c @@ -332,6 +332,7 @@ static struct timing_generator_funcs dcn30_tg_funcs = { .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, .set_odm_bypass = optc3_set_odm_bypass, .set_odm_combine = optc3_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index fbaa03f26d8b..7abc36a4ff76 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_clock_gate = true, .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, - .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, + .pipe_split_policy = MPC_SPLIT_AVOID, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, @@ -1449,9 +1449,7 @@ static bool dcn301_resource_construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.extended_aux_timeout_support = true; -#ifdef CONFIG_DRM_AMD_DC_DMUB dc->caps.dmcub_support = true; -#endif /* Color pipeline capabilities */ dc->caps.color.dpp.dcn_arch = 1; @@ -1487,6 +1485,23 @@ static bool dcn301_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) { diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index fcf96cf08c76..058f5d71e037 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -1557,6 +1557,24 @@ static bool dcn302_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, + &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c index 4a9b64023675..7024aeb0884c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c @@ -1500,6 +1500,23 @@ static bool dcn303_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + /* read VBIOS LTTPR caps */ + if (ctx->dc_bios->funcs->get_lttpr_caps) { + enum bp_result bp_query_result; + uint8_t is_vbios_lttpr_enable = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable); + dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable; + } + + if (ctx->dc_bios->funcs->get_lttpr_interop) { + enum bp_result bp_query_result; + uint8_t is_vbios_interop_enabled = 0; + + bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios, &is_vbios_interop_enabled); + dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled; + } + if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; else diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 565f12dd179a..5065904c7833 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -358,8 +358,8 @@ static void dcn31_hpo_dp_stream_enc_set_stream_attribute( h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right; v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom; - hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0x80 : 0; - vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0x80 : 0; + hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0 : 0x80; + vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0 : 0x80; v_freq = hw_crtc_timing.pix_clk_100hz * 100; /* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1 diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 5dd1ce9ddb53..4206ce5bf9a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -112,7 +112,7 @@ void dcn31_init_hw(struct dc *dc) struct dc_bios *dcb = dc->ctx->dc_bios; struct resource_pool *res_pool = dc->res_pool; uint32_t backlight = MAX_BACKLIGHT_LEVEL; - int i, j; + int i; if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); @@ -192,50 +192,13 @@ void dcn31_init_hw(struct dc *dc) link->link_status.link_active = true; } - /* Power gate DSCs */ - for (i = 0; i < res_pool->res_cap->num_dsc; i++) - if (hws->funcs.dsc_pg_control != NULL) - hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); - /* Enables outbox notifications for usb4 dpia */ if (dc->res_pool->usb4_dpia_count) dmub_enable_outbox_notification(dc); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) { - uint8_t dpcd_power_state = '\0'; - enum dc_status status = DC_ERROR_UNEXPECTED; - - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) - continue; - - /* if any of the displays are lit up turn them off */ - status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, - &dpcd_power_state, sizeof(dpcd_power_state)); - if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) { - /* blank dp stream before power off receiver*/ - if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY && - dc->links[i]->link_enc->funcs->get_dig_frontend) { - unsigned int fe; - - fe = dc->links[i]->link_enc->funcs->get_dig_frontend( - dc->links[i]->link_enc); - if (fe == ENGINE_ID_UNKNOWN) - continue; - - for (j = 0; j < dc->res_pool->stream_enc_count; j++) { - if (fe == dc->res_pool->stream_enc[j]->id) { - dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i], - dc->res_pool->stream_enc[j]); - break; - } - } - } - dp_receiver_power_ctrl(dc->links[i], false); - } - } - } + if (dc->config.power_down_display_on_boot) + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -602,7 +565,7 @@ void dcn31_reset_hw_ctx_wrap( dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state); if (hws->funcs.enable_stream_gating) - hws->funcs.enable_stream_gating(dc, pipe_ctx); + hws->funcs.enable_stream_gating(dc, pipe_ctx_old); if (old_clk) old_clk->funcs->cs_power_down(old_clk); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c index 05335a8c3c2d..e175b6cc0125 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c @@ -149,4 +149,9 @@ void dcn31_hw_sequencer_construct(struct dc *dc) dc->hwss.init_hw = dcn20_fpga_init_hw; dc->hwseq->funcs.init_pipes = NULL; } + if (dc->debug.disable_z10) { + /*hw not support z10 or sw disable it*/ + dc->hwss.z10_restore = NULL; + dc->hwss.z10_save_init = NULL; + } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c index a4b1d98f0007..e8562fa11366 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c @@ -256,6 +256,7 @@ static struct timing_generator_funcs dcn31_tg_funcs = { .get_crc = optc1_get_crc, .configure_crc = optc2_configure_crc, .set_dsc_config = optc3_set_dsc_config, + .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, .set_odm_bypass = optc3_set_odm_bypass, .set_odm_combine = optc31_set_odm_combine, diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 18896294ae12..88e040687940 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -2199,6 +2199,7 @@ static bool dcn31_resource_construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.dp_hpo = true; + dc->caps.edp_dsc_support = true; dc->caps.extended_aux_timeout_support = true; dc->caps.dmcub_support = true; dc->caps.is_apu = true; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h index 6905ef1e75a6..d76251fd1566 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h @@ -73,6 +73,7 @@ struct display_mode_lib { struct vba_vars_st vba; struct dal_logger *logger; struct dml_funcs funcs; + struct _vcs_dpi_display_e2e_pipe_params_st dml_pipe_state[6]; }; void dml_init_instance(struct display_mode_lib *lib, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c new file mode 100644 index 000000000000..ece34b0b8a46 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c @@ -0,0 +1,1889 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dml_wrapper.h" +#include "resource.h" +#include "core_types.h" +#include "dsc.h" +#include "clk_mgr.h" + +#ifndef DC_LOGGER_INIT +#define DC_LOGGER_INIT +#undef DC_LOG_WARNING +#define DC_LOG_WARNING +#endif + +#define DML_WRAPPER_TRANSLATION_ +#include "dml_wrapper_translation.c" +#undef DML_WRAPPER_TRANSLATION_ + +static bool is_dual_plane(enum surface_pixel_format format) +{ + return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA; +} + +static void build_clamping_params(struct dc_stream_state *stream) +{ + stream->clamping.clamping_level = CLAMPING_FULL_RANGE; + stream->clamping.c_depth = stream->timing.display_color_depth; + stream->clamping.pixel_encoding = stream->timing.pixel_encoding; +} + +static void get_pixel_clock_parameters( + const struct pipe_ctx *pipe_ctx, + struct pixel_clk_params *pixel_clk_params) +{ + const struct dc_stream_state *stream = pipe_ctx->stream; + + /*TODO: is this halved for YCbCr 420? in that case we might want to move + * the pixel clock normalization for hdmi up to here instead of doing it + * in pll_adjust_pix_clk + */ + pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; + pixel_clk_params->encoder_object_id = stream->link->link_enc->id; + pixel_clk_params->signal_type = pipe_ctx->stream->signal; + pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1; + /* TODO: un-hardcode*/ + pixel_clk_params->requested_sym_clk = LINK_RATE_LOW * + LINK_RATE_REF_FREQ_IN_KHZ; + pixel_clk_params->flags.ENABLE_SS = 0; + pixel_clk_params->color_depth = + stream->timing.display_color_depth; + pixel_clk_params->flags.DISPLAY_BLANKED = 1; + pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding == + PIXEL_ENCODING_YCBCR420); + pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding; + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) { + pixel_clk_params->color_depth = COLOR_DEPTH_888; + } + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { + pixel_clk_params->requested_pix_clk_100hz = pixel_clk_params->requested_pix_clk_100hz / 2; + } + if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clk_params->requested_pix_clk_100hz *= 2; + +} + +static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx) +{ + get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params); + + if (pipe_ctx->clock_source) + pipe_ctx->clock_source->funcs->get_pix_clk_dividers( + pipe_ctx->clock_source, + &pipe_ctx->stream_res.pix_clk_params, + &pipe_ctx->pll_settings); + + pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding; + + resource_build_bit_depth_reduction_params(pipe_ctx->stream, + &pipe_ctx->stream->bit_depth_params); + build_clamping_params(pipe_ctx->stream); + + return DC_OK; +} + +static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream, + struct bit_depth_reduction_params *fmt_bit_depth) +{ + enum dc_dither_option option = stream->dither_option; + enum dc_pixel_encoding pixel_encoding = + stream->timing.pixel_encoding; + + memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth)); + + if (option == DITHER_OPTION_DEFAULT) { + switch (stream->timing.display_color_depth) { + case COLOR_DEPTH_666: + option = DITHER_OPTION_SPATIAL6; + break; + case COLOR_DEPTH_888: + option = DITHER_OPTION_SPATIAL8; + break; + case COLOR_DEPTH_101010: + option = DITHER_OPTION_SPATIAL10; + break; + default: + option = DITHER_OPTION_DISABLE; + } + } + + if (option == DITHER_OPTION_DISABLE) + return; + + if (option == DITHER_OPTION_TRUN6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 0; + } else if (option == DITHER_OPTION_TRUN8 || + option == DITHER_OPTION_TRUN8_SPATIAL6 || + option == DITHER_OPTION_TRUN8_FM6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 1; + } else if (option == DITHER_OPTION_TRUN10 || + option == DITHER_OPTION_TRUN10_SPATIAL6 || + option == DITHER_OPTION_TRUN10_SPATIAL8 || + option == DITHER_OPTION_TRUN10_FM8 || + option == DITHER_OPTION_TRUN10_FM6 || + option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; + } + + /* special case - Formatter can only reduce by 4 bits at most. + * When reducing from 12 to 6 bits, + * HW recommends we use trunc with round mode + * (if we did nothing, trunc to 10 bits would be used) + * note that any 12->10 bit reduction is ignored prior to DCE8, + * as the input was 10 bits. + */ + if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL6 || + option == DITHER_OPTION_FM6) { + fmt_bit_depth->flags.TRUNCATE_ENABLED = 1; + fmt_bit_depth->flags.TRUNCATE_DEPTH = 2; + fmt_bit_depth->flags.TRUNCATE_MODE = 1; + } + + /* spatial dither + * note that spatial modes 1-3 are never used + */ + if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL6 || + option == DITHER_OPTION_TRUN10_SPATIAL6 || + option == DITHER_OPTION_TRUN8_SPATIAL6) { + fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; + fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0; + fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; + fmt_bit_depth->flags.RGB_RANDOM = + (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + } else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL8 || + option == DITHER_OPTION_SPATIAL8_FM6 || + option == DITHER_OPTION_TRUN10_SPATIAL8 || + option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { + fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; + fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1; + fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; + fmt_bit_depth->flags.RGB_RANDOM = + (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + } else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM || + option == DITHER_OPTION_SPATIAL10 || + option == DITHER_OPTION_SPATIAL10_FM8 || + option == DITHER_OPTION_SPATIAL10_FM6) { + fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1; + fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2; + fmt_bit_depth->flags.HIGHPASS_RANDOM = 1; + fmt_bit_depth->flags.RGB_RANDOM = + (pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0; + } + + if (option == DITHER_OPTION_SPATIAL6 || + option == DITHER_OPTION_SPATIAL8 || + option == DITHER_OPTION_SPATIAL10) { + fmt_bit_depth->flags.FRAME_RANDOM = 0; + } else { + fmt_bit_depth->flags.FRAME_RANDOM = 1; + } + + ////////////////////// + //// temporal dither + ////////////////////// + if (option == DITHER_OPTION_FM6 || + option == DITHER_OPTION_SPATIAL8_FM6 || + option == DITHER_OPTION_SPATIAL10_FM6 || + option == DITHER_OPTION_TRUN10_FM6 || + option == DITHER_OPTION_TRUN8_FM6 || + option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) { + fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; + fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0; + } else if (option == DITHER_OPTION_FM8 || + option == DITHER_OPTION_SPATIAL10_FM8 || + option == DITHER_OPTION_TRUN10_FM8) { + fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; + fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1; + } else if (option == DITHER_OPTION_FM10) { + fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1; + fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2; + } + + fmt_bit_depth->pixel_encoding = pixel_encoding; +} + +bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx) +{ + int i; + + /* Validate DSC config, dsc count validation is already done */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i]; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dsc_config dsc_cfg; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + /* Only need to validate top pipe */ + if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC) + continue; + + dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + + if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg)) + return false; + } + return true; +} + +enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream) +{ + enum dc_status status = DC_OK; + struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream); + + if (!pipe_ctx) + return DC_ERROR_UNEXPECTED; + + + status = build_pipe_hw_param(pipe_ctx); + + return status; +} + +void dml_acquire_dsc(const struct dc *dc, + struct resource_context *res_ctx, + struct display_stream_compressor **dsc, + int pipe_idx) +{ + int i; + const struct resource_pool *pool = dc->res_pool; + struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc; + + ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */ + *dsc = NULL; + + /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */ + if (pool->res_cap->num_dsc == pool->res_cap->num_opp) { + *dsc = pool->dscs[pipe_idx]; + res_ctx->is_dsc_acquired[pipe_idx] = true; + return; + } + + /* Return old DSC to avoid the need for redo it */ + if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) { + *dsc = dsc_old; + res_ctx->is_dsc_acquired[dsc_old->inst] = true; + return ; + } + + /* Find first free DSC */ + for (i = 0; i < pool->res_cap->num_dsc; i++) + if (!res_ctx->is_dsc_acquired[i]) { + *dsc = pool->dscs[i]; + res_ctx->is_dsc_acquired[i] = true; + break; + } +} + +static bool dml_split_stream_for_mpc_or_odm( + const struct dc *dc, + struct resource_context *res_ctx, + struct pipe_ctx *pri_pipe, + struct pipe_ctx *sec_pipe, + bool odm) +{ + int pipe_idx = sec_pipe->pipe_idx; + const struct resource_pool *pool = dc->res_pool; + + *sec_pipe = *pri_pipe; + + sec_pipe->pipe_idx = pipe_idx; + sec_pipe->plane_res.mi = pool->mis[pipe_idx]; + sec_pipe->plane_res.hubp = pool->hubps[pipe_idx]; + sec_pipe->plane_res.ipp = pool->ipps[pipe_idx]; + sec_pipe->plane_res.xfm = pool->transforms[pipe_idx]; + sec_pipe->plane_res.dpp = pool->dpps[pipe_idx]; + sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst; + sec_pipe->stream_res.dsc = NULL; + if (odm) { + if (pri_pipe->next_odm_pipe) { + ASSERT(pri_pipe->next_odm_pipe != sec_pipe); + sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe; + sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe; + } + if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) { + pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe; + sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe; + } + if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) { + pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe; + sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe; + } + pri_pipe->next_odm_pipe = sec_pipe; + sec_pipe->prev_odm_pipe = pri_pipe; + ASSERT(sec_pipe->top_pipe == NULL); + + if (!sec_pipe->top_pipe) + sec_pipe->stream_res.opp = pool->opps[pipe_idx]; + else + sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp; + if (sec_pipe->stream->timing.flags.DSC == 1) { + dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx); + ASSERT(sec_pipe->stream_res.dsc); + if (sec_pipe->stream_res.dsc == NULL) + return false; + } + } else { + if (pri_pipe->bottom_pipe) { + ASSERT(pri_pipe->bottom_pipe != sec_pipe); + sec_pipe->bottom_pipe = pri_pipe->bottom_pipe; + sec_pipe->bottom_pipe->top_pipe = sec_pipe; + } + pri_pipe->bottom_pipe = sec_pipe; + sec_pipe->top_pipe = pri_pipe; + + ASSERT(pri_pipe->plane_state); + } + + return true; +} + +static struct pipe_ctx *dml_find_split_pipe( + struct dc *dc, + struct dc_state *context, + int old_index) +{ + struct pipe_ctx *pipe = NULL; + int i; + + if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[old_index]; + pipe->pipe_idx = old_index; + } + + if (!pipe) + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL + && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) { + if (context->res_ctx.pipe_ctx[i].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[i]; + pipe->pipe_idx = i; + break; + } + } + } + + /* + * May need to fix pipes getting tossed from 1 opp to another on flip + * Add for debugging transient underflow during topology updates: + * ASSERT(pipe); + */ + if (!pipe) + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { + if (context->res_ctx.pipe_ctx[i].stream == NULL) { + pipe = &context->res_ctx.pipe_ctx[i]; + pipe->pipe_idx = i; + break; + } + } + + return pipe; +} + +static void dml_release_dsc(struct resource_context *res_ctx, + const struct resource_pool *pool, + struct display_stream_compressor **dsc) +{ + int i; + + for (i = 0; i < pool->res_cap->num_dsc; i++) + if (pool->dscs[i] == *dsc) { + res_ctx->is_dsc_acquired[i] = false; + *dsc = NULL; + break; + } +} + +static int dml_get_num_mpc_splits(struct pipe_ctx *pipe) +{ + int mpc_split_count = 0; + struct pipe_ctx *other_pipe = pipe->bottom_pipe; + + while (other_pipe && other_pipe->plane_state == pipe->plane_state) { + mpc_split_count++; + other_pipe = other_pipe->bottom_pipe; + } + other_pipe = pipe->top_pipe; + while (other_pipe && other_pipe->plane_state == pipe->plane_state) { + mpc_split_count++; + other_pipe = other_pipe->top_pipe; + } + + return mpc_split_count; +} + +static bool dml_enough_pipes_for_subvp(struct dc *dc, + struct dc_state *context) +{ + int i = 0; + int num_pipes = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->stream && pipe->plane_state) + num_pipes++; + } + + // Sub-VP only possible if the number of "real" pipes is + // less than or equal to half the number of available pipes + if (num_pipes * 2 > dc->res_pool->pipe_count) + return false; + + return true; +} + +static int dml_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + int *split, + bool *merge) +{ + int i, pipe_idx, vlevel_split; + int plane_count = 0; + bool force_split = false; + bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID; + struct vba_vars_st *v = &context->bw_ctx.dml.vba; + int max_mpc_comb = v->maxMpcComb; + + if (context->stream_count > 1) { + if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) + avoid_split = true; + } else if (dc->debug.force_single_disp_pipe_split) + force_split = true; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /** + * Workaround for avoiding pipe-split in cases where we'd split + * planes that are too small, resulting in splits that aren't + * valid for the scaler. + */ + if (pipe->plane_state && + (pipe->plane_state->dst_rect.width <= 16 || + pipe->plane_state->dst_rect.height <= 16 || + pipe->plane_state->src_rect.width <= 16 || + pipe->plane_state->src_rect.height <= 16)) + avoid_split = true; + + /* TODO: fix dc bugs and remove this split threshold thing */ + if (pipe->stream && !pipe->prev_odm_pipe && + (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state)) + ++plane_count; + } + if (plane_count > dc->res_pool->pipe_count / 2) + avoid_split = true; + + /* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct dc_crtc_timing timing; + + if (!pipe->stream) + continue; + else { + timing = pipe->stream->timing; + if (timing.h_border_left + timing.h_border_right + + timing.v_border_top + timing.v_border_bottom > 0) { + avoid_split = true; + break; + } + } + } + + /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ + if (avoid_split) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) + if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 && + v->ModeSupport[vlevel][0]) + break; + /* Impossible to not split this pipe */ + if (vlevel > context->bw_ctx.dml.soc.num_states) + vlevel = vlevel_split; + else + max_mpc_comb = 0; + pipe_idx++; + } + v->maxMpcComb = max_mpc_comb; + } + + /* Split loop sets which pipe should be split based on dml outputs and dc flags */ + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + int pipe_plane = v->pipe_plane[pipe_idx]; + bool split4mpc = context->stream_count == 1 && plane_count == 1 + && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4; + + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4) + split[i] = 4; + else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2) + split[i] = 2; + + if ((pipe->stream->view_format == + VIEW_3D_FORMAT_SIDE_BY_SIDE || + pipe->stream->view_format == + VIEW_3D_FORMAT_TOP_AND_BOTTOM) && + (pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM || + pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE)) + split[i] = 2; + if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { + split[i] = 2; + v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1; + } + if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) { + split[i] = 4; + v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1; + } + /*420 format workaround*/ + if (pipe->stream->timing.h_addressable > 7680 && + pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) { + split[i] = 4; + } + + v->ODMCombineEnabled[pipe_plane] = + v->ODMCombineEnablePerState[vlevel][pipe_plane]; + + if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) { + if (dml_get_num_mpc_splits(pipe) == 1) { + /*If need split for mpc but 2 way split already*/ + if (split[i] == 4) + split[i] = 2; /* 2 -> 4 MPC */ + else if (split[i] == 2) + split[i] = 0; /* 2 -> 2 MPC */ + else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) + merge[i] = true; /* 2 -> 1 MPC */ + } else if (dml_get_num_mpc_splits(pipe) == 3) { + /*If need split for mpc but 4 way split already*/ + if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe) + || !pipe->bottom_pipe)) { + merge[i] = true; /* 4 -> 2 MPC */ + } else if (split[i] == 0 && pipe->top_pipe && + pipe->top_pipe->plane_state == pipe->plane_state) + merge[i] = true; /* 4 -> 1 MPC */ + split[i] = 0; + } else if (dml_get_num_mpc_splits(pipe)) { + /* ODM -> MPC transition */ + if (pipe->prev_odm_pipe) { + split[i] = 0; + merge[i] = true; + } + } + } else { + if (dml_get_num_mpc_splits(pipe) == 1) { + /*If need split for odm but 2 way split already*/ + if (split[i] == 4) + split[i] = 2; /* 2 -> 4 ODM */ + else if (split[i] == 2) + split[i] = 0; /* 2 -> 2 ODM */ + else if (pipe->prev_odm_pipe) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* exit ODM */ + } + } else if (dml_get_num_mpc_splits(pipe) == 3) { + /*If need split for odm but 4 way split already*/ + if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe) + || !pipe->next_odm_pipe)) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* 4 -> 2 ODM */ + } else if (split[i] == 0 && pipe->prev_odm_pipe) { + ASSERT(0); /* NOT expected yet */ + merge[i] = true; /* exit ODM */ + } + split[i] = 0; + } else if (dml_get_num_mpc_splits(pipe)) { + /* MPC -> ODM transition */ + ASSERT(0); /* NOT expected yet */ + if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { + split[i] = 0; + merge[i] = true; + } + } + } + + /* Adjust dppclk when split is forced, do not bother with dispclk */ + if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1) + v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2; + pipe_idx++; + } + + return vlevel; +} + +static void dml_set_phantom_stream_timing(struct dc *dc, + struct dc_state *context, + struct pipe_ctx *ref_pipe, + struct dc_stream_state *phantom_stream) +{ + // phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width + uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 + + dc->caps.subvp_fw_processing_delay_us + + dc->caps.subvp_pstate_allow_width_us; + uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) * + (ref_pipe->stream->timing.pix_clk_100hz * 100) / + (double)ref_pipe->stream->timing.h_total; + uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start; + + phantom_stream->dst.y = 0; + phantom_stream->dst.height = phantom_vactive; + phantom_stream->src.y = 0; + phantom_stream->src.height = phantom_vactive; + + phantom_stream->timing.v_addressable = phantom_vactive; + phantom_stream->timing.v_front_porch = 1; + phantom_stream->timing.v_total = phantom_stream->timing.v_addressable + + phantom_stream->timing.v_front_porch + + phantom_stream->timing.v_sync_width + + phantom_bp; +} + +static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc, + struct dc_state *context, + struct pipe_ctx *ref_pipe) +{ + struct dc_stream_state *phantom_stream = NULL; + + phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink); + phantom_stream->signal = SIGNAL_TYPE_VIRTUAL; + phantom_stream->dpms_off = true; + phantom_stream->mall_stream_config.type = SUBVP_PHANTOM; + phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream; + ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN; + ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream; + + /* stream has limited viewport and small timing */ + memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing)); + memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src)); + memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst)); + dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream); + + dc_add_stream_to_ctx(dc, context, phantom_stream); + dc->hwss.apply_ctx_to_hw(dc, context); + return phantom_stream; +} + +static void dml_enable_phantom_plane(struct dc *dc, + struct dc_state *context, + struct dc_stream_state *phantom_stream, + struct pipe_ctx *main_pipe) +{ + struct dc_plane_state *phantom_plane = NULL; + struct dc_plane_state *prev_phantom_plane = NULL; + struct pipe_ctx *curr_pipe = main_pipe; + + while (curr_pipe) { + if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) + phantom_plane = prev_phantom_plane; + else + phantom_plane = dc_create_plane_state(dc); + + memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address)); + memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality, + sizeof(phantom_plane->scaling_quality)); + memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect)); + memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect)); + memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect)); + memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size, + sizeof(phantom_plane->plane_size)); + memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info, + sizeof(phantom_plane->tiling_info)); + memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc)); + /* Currently compat_level is undefined in dc_state + * phantom_plane->compat_level = curr_pipe->plane_state->compat_level; + */ + phantom_plane->format = curr_pipe->plane_state->format; + phantom_plane->rotation = curr_pipe->plane_state->rotation; + phantom_plane->visible = curr_pipe->plane_state->visible; + + /* Shadow pipe has small viewport. */ + phantom_plane->clip_rect.y = 0; + phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable; + + dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context); + + curr_pipe = curr_pipe->bottom_pipe; + prev_phantom_plane = phantom_plane; + } +} + +static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context) +{ + int i = 0; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct dc_stream_state *ref_stream = pipe->stream; + // Only construct phantom stream for top pipes that have plane enabled + if (!pipe->top_pipe && pipe->plane_state && pipe->stream && + pipe->stream->mall_stream_config.type == SUBVP_NONE) { + struct dc_stream_state *phantom_stream = NULL; + + phantom_stream = dml_enable_phantom_stream(dc, context, pipe); + dml_enable_phantom_plane(dc, context, phantom_stream, pipe); + } + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && pipe->stream && + pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + pipe->stream->use_dynamic_meta = false; + pipe->plane_state->flip_immediate = false; + if (!resource_build_scaling_params(pipe)) { + // Log / remove phantom pipes since failed to build scaling params + } + } + } +} + +static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context) +{ + int i; + bool removed_pipe = false; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + // build scaling params for phantom pipes + if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) { + dc_rem_all_planes_for_stream(dc, pipe->stream, context); + dc_remove_stream_from_ctx(dc, context, pipe->stream); + removed_pipe = true; + } + + // Clear all phantom stream info + if (pipe->stream) { + pipe->stream->mall_stream_config.type = SUBVP_NONE; + pipe->stream->mall_stream_config.paired_stream = NULL; + } + } + if (removed_pipe) + dc->hwss.apply_ctx_to_hw(dc, context); +} + +/* + * If the input state contains no upstream planes for a particular pipe (i.e. only timing) + * we need to populate some "conservative" plane information as DML cannot handle "no planes" + */ +static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe) +{ + pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled; + pipe->src.source_scan = dm_horz; + pipe->src.sw_mode = dm_sw_4kb_s; + pipe->src.macro_tile_size = dm_64k_tile; + pipe->src.viewport_width = timing->h_addressable; + if (pipe->src.viewport_width > 1920) + pipe->src.viewport_width = 1920; + pipe->src.viewport_height = timing->v_addressable; + if (pipe->src.viewport_height > 1080) + pipe->src.viewport_height = 1080; + pipe->src.surface_height_y = pipe->src.viewport_height; + pipe->src.surface_width_y = pipe->src.viewport_width; + pipe->src.surface_height_c = pipe->src.viewport_height; + pipe->src.surface_width_c = pipe->src.viewport_width; + pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256; + pipe->src.source_format = dm_444_32; + pipe->dest.recout_width = pipe->src.viewport_width; + pipe->dest.recout_height = pipe->src.viewport_height; + pipe->dest.full_recout_width = pipe->dest.recout_width; + pipe->dest.full_recout_height = pipe->dest.recout_height; + pipe->scale_ratio_depth.lb_depth = dm_lb_16; + pipe->scale_ratio_depth.hscl_ratio = 1.0; + pipe->scale_ratio_depth.vscl_ratio = 1.0; + pipe->scale_ratio_depth.scl_enable = 0; + pipe->scale_taps.htaps = 1; + pipe->scale_taps.vtaps = 1; + pipe->dest.vtotal_min = timing->v_total; + pipe->dest.vtotal_max = timing->v_total; + + if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) { + pipe->src.viewport_width /= 2; + pipe->dest.recout_width /= 2; + } else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) { + pipe->src.viewport_width /= 4; + pipe->dest.recout_width /= 4; + } + + pipe->src.dcc = false; + pipe->src.dcc_rate = 1; +} + +/* + * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its + * hsplit group is equal to its own pipe ID + * Otherwise, all pipes part of the same blending tree have the same hsplit group + * ID as the top most pipe + * + * If the pipe ctx is ODM combined, then similar logic follows + */ +static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe) +{ + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; + + if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state + == dc_pipe_ctx->plane_state) { + struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe; + int split_idx = 0; + + while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state + == dc_pipe_ctx->plane_state) { + first_pipe = first_pipe->top_pipe; + split_idx++; + } + + /* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */ + if (split_idx == 0) + e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx; + else if (split_idx == 1) + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; + else if (split_idx == 2) + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx; + + } else if (dc_pipe_ctx->prev_odm_pipe) { + struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe; + + while (first_pipe->prev_odm_pipe) + first_pipe = first_pipe->prev_odm_pipe; + e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx; + } +} + +static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale) +{ + const struct dc_plane_state *pln = dc_pipe_ctx->plane_state; + const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data; + + e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate; + e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln) + || (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln) + || e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled; + + /* stereo is not split */ + if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE || + pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) { + e2e_pipe->pipe.src.is_hsplit = false; + e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx; + } + + e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90 + || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz; + e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y; + e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y; + e2e_pipe->pipe.src.viewport_width = scl->viewport.width; + e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width; + e2e_pipe->pipe.src.viewport_height = scl->viewport.height; + e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height; + e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width; + e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height; + e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width; + e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height; + e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width; + e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height; + + if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA + || pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch; + e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch; + e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch; + e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c; + } else { + e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch; + e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch; + } + e2e_pipe->pipe.src.dcc = pln->dcc.enable; + e2e_pipe->pipe.src.dcc_rate = 1; + e2e_pipe->pipe.dest.recout_width = scl->recout.width; + e2e_pipe->pipe.dest.recout_height = scl->recout.height; + e2e_pipe->pipe.dest.full_recout_height = scl->recout.height; + e2e_pipe->pipe.dest.full_recout_width = scl->recout.width; + if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1) + e2e_pipe->pipe.dest.full_recout_width *= 2; + else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1) + e2e_pipe->pipe.dest.full_recout_width *= 4; + else { + struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe; + + while (split_pipe && split_pipe->plane_state == pln) { + e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; + split_pipe = split_pipe->bottom_pipe; + } + split_pipe = dc_pipe_ctx->top_pipe; + while (split_pipe && split_pipe->plane_state == pln) { + e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width; + split_pipe = split_pipe->top_pipe; + } + } + + e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16; + e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32); + e2e_pipe->pipe.scale_ratio_depth.scl_enable = + scl->ratios.vert.value != dc_fixpt_one.value + || scl->ratios.horz.value != dc_fixpt_one.value + || scl->ratios.vert_c.value != dc_fixpt_one.value + || scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/ + || always_scale; /*support always scale*/ + e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps; + e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c; + e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps; + e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c; + + /* Currently compat_level is not defined. Commenting it until further resolution + * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) { + swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle, + &e2e_pipe->pipe.src.sw_mode); + e2e_pipe->pipe.src.macro_tile_size = + swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle); + } else { + gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode, + pln->compat_level, + &e2e_pipe->pipe.src.sw_mode); + e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile; + }*/ + + e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format); +} + +static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe) +{ + /* + * For graphic plane, cursor number is 1, nv12 is 0 + * bw calculations due to cursor on/off + */ + if (dc_pipe_ctx->plane_state && + (dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || + dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)) + e2e_pipe->pipe.src.num_cursors = 0; + else + e2e_pipe->pipe.src.num_cursors = 1; + + e2e_pipe->pipe.src.cur0_src_width = 256; + e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit; +} + +static int populate_dml_pipes_from_context_base( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int pipe_cnt, i; + bool synchronized_vblank = true; + struct resource_context *res_ctx = &context->res_ctx; + + for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) { + if (!res_ctx->pipe_ctx[i].stream) + continue; + + if (pipe_cnt < 0) { + pipe_cnt = i; + continue; + } + + if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream) + continue; + + if (dc->debug.disable_timing_sync || + (!resource_are_streams_timing_synchronizable( + res_ctx->pipe_ctx[pipe_cnt].stream, + res_ctx->pipe_ctx[i].stream) && + !resource_are_vblanks_synchronizable( + res_ctx->pipe_ctx[pipe_cnt].stream, + res_ctx->pipe_ctx[i].stream))) { + synchronized_vblank = false; + break; + } + } + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing; + + struct audio_check aud_check = {0}; + if (!res_ctx->pipe_ctx[i].stream) + continue; + + /* todo: + pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0; + pipes[pipe_cnt].pipe.src.dcc = 0; + pipes[pipe_cnt].pipe.src.vm = 0;*/ + + pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + + pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC; + /* todo: rotation?*/ + pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h; + if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) { + pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true; + /* 1/2 vblank */ + pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active = + (timing->v_total - timing->v_addressable + - timing->v_border_top - timing->v_border_bottom) / 2; + /* 36 bytes dp, 32 hdmi */ + pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes = + dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32; + } + pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; + + dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest); + pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min; + pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max; + + pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst; + + pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]); + + populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]); + + pipes[pipe_cnt].dout.dp_lanes = 4; + pipes[pipe_cnt].dout.is_virtual = 0; + pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal); + if (pipes[pipe_cnt].dout.output_type < 0) { + pipes[pipe_cnt].dout.output_type = dm_dp; + pipes[pipe_cnt].dout.is_virtual = 1; + } + + populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout); + + if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) + pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; + + /* todo: default max for now, until there is logic reflecting this in dc*/ + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + /*fill up the audio sample rate (unit in kHz)*/ + get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check); + pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000; + + populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]); + + if (!res_ctx->pipe_ctx[i].plane_state) { + populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe); + } else { + populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale); + } + + pipe_cnt++; + } + + /* populate writeback information */ + if (dc->res_pool) + dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes); + + return pipe_cnt; +} + +static int dml_populate_dml_pipes_from_context( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + bool fast_validate) +{ + int i, pipe_cnt; + struct resource_context *res_ctx = &context->res_ctx; + struct pipe_ctx *pipe; + + populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate); + + for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { + struct dc_crtc_timing *timing; + + if (!res_ctx->pipe_ctx[i].stream) + continue; + pipe = &res_ctx->pipe_ctx[i]; + timing = &pipe->stream->timing; + + pipes[pipe_cnt].pipe.src.gpuvm = true; + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0; + pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0; + pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch; + + pipes[pipe_cnt].dout.dsc_input_bpc = 0; + if (pipes[pipe_cnt].dout.dsc_enable) { + switch (timing->display_color_depth) { + case COLOR_DEPTH_888: + pipes[pipe_cnt].dout.dsc_input_bpc = 8; + break; + case COLOR_DEPTH_101010: + pipes[pipe_cnt].dout.dsc_input_bpc = 10; + break; + case COLOR_DEPTH_121212: + pipes[pipe_cnt].dout.dsc_input_bpc = 12; + break; + default: + ASSERT(0); + break; + } + } + pipe_cnt++; + } + dc->config.enable_4to1MPC = false; + if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) { + if (is_dual_plane(pipe->plane_state->format) + && pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) { + dc->config.enable_4to1MPC = true; + } else if (!is_dual_plane(pipe->plane_state->format)) { + context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192; + pipes[0].pipe.src.unbounded_req_mode = true; + } + } + + return pipe_cnt; +} + +static void dml_full_validate_bw_helper(struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *vlevel, + int *split, + bool *merge, + int *pipe_cnt) +{ + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + /* + * DML favors voltage over p-state, but we're more interested in + * supporting p-state over voltage. We can't support p-state in + * prefetch mode > 0 so try capping the prefetch mode to start. + */ + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = + dm_allow_self_refresh_and_mclk_switch; + *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); + /* This may adjust vlevel and maxMpcComb */ + if (*vlevel < context->bw_ctx.dml.soc.num_states) + *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); + + /* Conditions for setting up phantom pipes for SubVP: + * 1. Not force disable SubVP + * 2. Full update (i.e. !fast_validate) + * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) + * 4. Display configuration passes validation + * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) + */ + if (!dc->debug.force_disable_subvp && + dml_enough_pipes_for_subvp(dc, context) && + *vlevel < context->bw_ctx.dml.soc.num_states && + (vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported || + dc->debug.force_subvp_mclk_switch)) { + + dml_add_phantom_pipes(dc, context); + + /* Create input to DML based on new context which includes phantom pipes + * TODO: Input to DML should mark which pipes are phantom + */ + *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false); + *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); + if (*vlevel < context->bw_ctx.dml.soc.num_states) { + memset(split, 0, sizeof(split)); + memset(merge, 0, sizeof(merge)); + *vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge); + } + + // If SubVP pipe config is unsupported (or cannot be used for UCLK switching) + // remove phantom pipes and repopulate dml pipes + if (*vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { + dml_remove_phantom_pipes(dc, context); + *pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false); + } + } +} + +static void dcn20_adjust_adaptive_sync_v_startup( + const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start) +{ + struct dc_crtc_timing patched_crtc_timing; + uint32_t asic_blank_end = 0; + uint32_t asic_blank_start = 0; + uint32_t newVstartup = 0; + + patched_crtc_timing = *dc_crtc_timing; + + if (patched_crtc_timing.flags.INTERLACE == 1) { + if (patched_crtc_timing.v_front_porch < 2) + patched_crtc_timing.v_front_porch = 2; + } else { + if (patched_crtc_timing.v_front_porch < 1) + patched_crtc_timing.v_front_porch = 1; + } + + /* blank_start = frame end - front porch */ + asic_blank_start = patched_crtc_timing.v_total - + patched_crtc_timing.v_front_porch; + + /* blank_end = blank_start - active */ + asic_blank_end = asic_blank_start - + patched_crtc_timing.v_border_bottom - + patched_crtc_timing.v_addressable - + patched_crtc_timing.v_border_top; + + newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start); + + *vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start); +} + +static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) +{ + return (pipe_ctx->stream_res.hpo_dp_stream_enc && + pipe_ctx->stream->link->hpo_dp_link_enc && + dc_is_dp_signal(pipe_ctx->stream->signal)); +} + +static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) +{ + int i; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; +#if defined (CONFIG_DRM_AMD_DC_DP2_0) + if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) + return true; +#endif + } + return false; +} + +static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context) +{ + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us; + } +} + +static bool dml_internal_validate( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *vlevel_out, + bool fast_validate) +{ + bool out = false; + bool repopulate_pipes = false; + int split[MAX_PIPES] = { 0 }; + bool merge[MAX_PIPES] = { false }; + bool newly_split[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx, vlevel; + struct vba_vars_st *vba = &context->bw_ctx.dml.vba; + + ASSERT(pipes); + if (!pipes) + return false; + + // For each full update, remove all existing phantom pipes first + dml_remove_phantom_pipes(dc, context); + + dml_update_soc_for_wm_a(dc, context); + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state) { + // On initial pass through DML, we intend to use MALL for SS on all + // (non-PSR) surfaces with none using MALL for P-State + // 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state + //if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) + // pipe->plane_state->mall_plane_config.use_mall_for_ss = true; + } + } + pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); + + if (!fast_validate) { + dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt); + } + + if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || + vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) { + /* + * If mode is unsupported or there's still no p-state support then + * fall back to favoring voltage. + * + * We don't actually support prefetch mode 2, so require that we + * at least support prefetch mode 1. + */ + context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = + dm_allow_self_refresh; + + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + if (vlevel < context->bw_ctx.dml.soc.num_states) { + memset(split, 0, sizeof(split)); + memset(merge, 0, sizeof(merge)); + vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); + } + } + + dml_log_mode_support_params(&context->bw_ctx.dml); + + if (vlevel == context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *mpo_pipe = pipe->bottom_pipe; + + if (!pipe->stream) + continue; + + /* We only support full screen mpo with ODM */ + if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled + && pipe->plane_state && mpo_pipe + && memcmp(&mpo_pipe->plane_res.scl_data.recout, + &pipe->plane_res.scl_data.recout, + sizeof(struct rect)) != 0) { + ASSERT(mpo_pipe->plane_state != pipe->plane_state); + goto validate_fail; + } + pipe_idx++; + } + + /* merge pipes if necessary */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /*skip pipes that don't need merging*/ + if (!merge[i]) + continue; + + /* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */ + if (pipe->prev_odm_pipe) { + /*split off odm pipe*/ + pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe; + if (pipe->next_odm_pipe) + pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe; + + pipe->bottom_pipe = NULL; + pipe->next_odm_pipe = NULL; + pipe->plane_state = NULL; + pipe->stream = NULL; + pipe->top_pipe = NULL; + pipe->prev_odm_pipe = NULL; + if (pipe->stream_res.dsc) + dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc); + memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); + memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); + repopulate_pipes = true; + } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) { + struct pipe_ctx *top_pipe = pipe->top_pipe; + struct pipe_ctx *bottom_pipe = pipe->bottom_pipe; + + top_pipe->bottom_pipe = bottom_pipe; + if (bottom_pipe) + bottom_pipe->top_pipe = top_pipe; + + pipe->top_pipe = NULL; + pipe->bottom_pipe = NULL; + pipe->plane_state = NULL; + pipe->stream = NULL; + memset(&pipe->plane_res, 0, sizeof(pipe->plane_res)); + memset(&pipe->stream_res, 0, sizeof(pipe->stream_res)); + repopulate_pipes = true; + } else + ASSERT(0); /* Should never try to merge master pipe */ + + } + + for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *hsplit_pipe = NULL; + bool odm; + int old_index = -1; + + if (!pipe->stream || newly_split[i]) + continue; + + pipe_idx++; + odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled; + + if (!pipe->plane_state && !odm) + continue; + + if (split[i]) { + if (odm) { + if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; + else if (old_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->pipe_idx; + } else { + if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx; + else if (old_pipe->bottom_pipe && + old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->pipe_idx; + } + hsplit_pipe = dml_find_split_pipe(dc, context, old_index); + ASSERT(hsplit_pipe); + if (!hsplit_pipe) + goto validate_fail; + + if (!dml_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + pipe, hsplit_pipe, odm)) + goto validate_fail; + + newly_split[hsplit_pipe->pipe_idx] = true; + repopulate_pipes = true; + } + if (split[i] == 4) { + struct pipe_ctx *pipe_4to1; + + if (odm && old_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->pipe_idx; + else if (!odm && old_pipe->bottom_pipe && + old_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->pipe_idx; + else + old_index = -1; + pipe_4to1 = dml_find_split_pipe(dc, context, old_index); + ASSERT(pipe_4to1); + if (!pipe_4to1) + goto validate_fail; + if (!dml_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + pipe, pipe_4to1, odm)) + goto validate_fail; + newly_split[pipe_4to1->pipe_idx] = true; + + if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe + && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe) + old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx; + else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->bottom_pipe && + old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state) + old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx; + else + old_index = -1; + pipe_4to1 = dml_find_split_pipe(dc, context, old_index); + ASSERT(pipe_4to1); + if (!pipe_4to1) + goto validate_fail; + if (!dml_split_stream_for_mpc_or_odm( + dc, &context->res_ctx, + hsplit_pipe, pipe_4to1, odm)) + goto validate_fail; + newly_split[pipe_4to1->pipe_idx] = true; + } + if (odm) + dml_build_mapped_resource(dc, context, pipe->stream); + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state) { + if (!resource_build_scaling_params(pipe)) + goto validate_fail; + } + } + + /* Actual dsc count per stream dsc validation*/ + if (!dml_validate_dsc(dc, context)) { + vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; + goto validate_fail; + } + + if (repopulate_pipes) + pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + *vlevel_out = vlevel; + *pipe_cnt_out = pipe_cnt; + + out = true; + goto validate_out; + +validate_fail: + out = false; + +validate_out: + return out; +} + +static void dml_calculate_dlg_params( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + int i, pipe_idx; + int plane_count; + + /* Writeback MCIF_WB arbitration parameters */ + if (dc->res_pool) + dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); + + context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000; + context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000; + context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; + context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; + context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; + context->bw_ctx.bw.dcn.clk.p_state_change_support = + context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] + != dm_dram_clock_change_unsupported; + + context->bw_ctx.bw.dcn.clk.dppclk_khz = 0; + /* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks + * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ? + DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW; + */ + plane_count = 0; + for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (context->res_ctx.pipe_ctx[i].plane_state) + plane_count++; + } + + /* Commented out as per above error for now. + if (plane_count == 0) + context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW; + */ + context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context); + /* TODO : Uncomment the below line and make changes + * as per DML nomenclature once it is available. + * context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = context->bw_ctx.dml.vba.fclk_pstate_support; + */ + + if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) + context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) { + // Phantom pipe requires that DET_SIZE = 0 and no unbounded requests + context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0; + context->res_ctx.pipe_ctx[i].unbounded_req = false; + } else { + context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes; + context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode; + } + + if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; + context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = + pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; + context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; + pipe_idx++; + } + /*save a original dppclock copy*/ + context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz; + context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz; + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000; + context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000; + context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes + - context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2; + + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml, + &context->res_ctx.pipe_ctx[i].dlg_regs, + &context->res_ctx.pipe_ctx[i].ttu_regs, + pipes, + pipe_cnt, + pipe_idx, + cstate_en, + context->bw_ctx.bw.dcn.clk.p_state_change_support, + false, false, true); + + context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml, + &context->res_ctx.pipe_ctx[i].rq_regs, + &pipes[pipe_idx].pipe); + pipe_idx++; + } +} + +static void dml_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + int i, pipe_idx, vlevel_temp = 0; + + double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz; + double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; + unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed; + bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != + dm_dram_clock_change_unsupported; + + /* Set B: + * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present, + * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark + * calculations to cover bootup clocks. + * DCFCLK: soc.clock_limits[2] when available + * UCLK: soc.clock_limits[2] when available + */ + if (context->bw_ctx.dml.soc.num_states > 2) { + vlevel_temp = 2; + dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz; + } else + dcfclk = 615; //DCFCLK Vmin_lv + + pipes[0].clks_cfg.voltage = vlevel_temp; + pipes[0].clks_cfg.dcfclk_mhz = dcfclk; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; + + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us; + } + context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 4; + context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8; + + /* Set D: + * All clocks min. + * DCFCLK: Min, as reported by PM FW when available + * UCLK : Min, as reported by PM FW when available + * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr) + */ + + if (context->bw_ctx.dml.soc.num_states > 2) { + vlevel_temp = 0; + dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; + } else + dcfclk = 615; //DCFCLK Vmin_lv + + pipes[0].clks_cfg.voltage = vlevel_temp; + pipes[0].clks_cfg.dcfclk_mhz = dcfclk; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz; + + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) { + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us; + } + context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 4; + context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8; + + /* Set C, for Dummy P-State: + * All clocks min. + * DCFCLK: Min, as reported by PM FW, when available + * UCLK : Min, as reported by PM FW, when available + * pstate latency as per UCLK state dummy pstate latency + */ + if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) { + unsigned int min_dram_speed_mts_margin = 160; + + if ((!pstate_en)) + min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16; + + /* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */ + for (i = 3; i > 0; i--) + if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) + break; + + context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us; + context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us; + } + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + //context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + + /* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */ + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 4; + context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8; + + if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) { + /* The only difference between A and C is p-state latency, if p-state is not supported + * with full p-state latency we want to calculate DLG based on dummy p-state latency, + * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation. + */ + context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0; + } else { + /* Set A: + * All clocks min. + * DCFCLK: Min, as reported by PM FW, when available + * UCLK: Min, as reported by PM FW, when available + */ + dml_update_soc_for_wm_a(dc, context); + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + } + + pipes[0].clks_cfg.voltage = vlevel; + pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation; + pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz; + + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt); + pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + + if (dc->config.forced_clocks) { + pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz; + pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz; + } + if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0; + if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) + pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0; + + pipe_idx++; + } + + context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod; + + dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); + + if (!pstate_en) + /* Restore full p-state latency */ + context->bw_ctx.dml.soc.dram_clock_change_latency_us = + dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; +} + +bool dml_validate(struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + bool out = false; + + BW_VAL_TRACE_SETUP(); + + int vlevel = 0; + int pipe_cnt = 0; + display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state; + DC_LOGGER_INIT(dc->ctx->logger); + + BW_VAL_TRACE_COUNT(); + + out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + + if (pipe_cnt == 0) + goto validate_out; + + if (!out) + goto validate_fail; + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + goto validate_out; + } + + dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); + + BW_VAL_TRACE_END_WATERMARKS(); + + goto validate_out; + +validate_fail: + DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n", + dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states])); + + BW_VAL_TRACE_SKIP(fail); + out = false; + +validate_out: + BW_VAL_TRACE_FINISH(); + + return out; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c new file mode 100644 index 000000000000..4ec5310a2962 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper_translation.c @@ -0,0 +1,284 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifdef DML_WRAPPER_TRANSLATION_ + +static void gfx10array_mode_to_dml_params( + enum array_mode_values array_mode, + enum legacy_tiling_compat_level compat_level, + unsigned int *sw_mode) +{ + switch (array_mode) { + case DC_ARRAY_LINEAR_ALLIGNED: + case DC_ARRAY_LINEAR_GENERAL: + *sw_mode = dm_sw_linear; + break; + case DC_ARRAY_2D_TILED_THIN1: +// DC_LEGACY_TILING_ADDR_GEN_ZERO - undefined as per current code hence removed +#if 0 + if (compat_level == DC_LEGACY_TILING_ADDR_GEN_ZERO) + *sw_mode = dm_sw_gfx7_2d_thin_l_vp; + else + *sw_mode = dm_sw_gfx7_2d_thin_gl; +#endif + break; + default: + ASSERT(0); /* Not supported */ + break; + } +} + +static void swizzle_to_dml_params( + enum swizzle_mode_values swizzle, + unsigned int *sw_mode) +{ + switch (swizzle) { + case DC_SW_LINEAR: + *sw_mode = dm_sw_linear; + break; + case DC_SW_4KB_S: + *sw_mode = dm_sw_4kb_s; + break; + case DC_SW_4KB_S_X: + *sw_mode = dm_sw_4kb_s_x; + break; + case DC_SW_4KB_D: + *sw_mode = dm_sw_4kb_d; + break; + case DC_SW_4KB_D_X: + *sw_mode = dm_sw_4kb_d_x; + break; + case DC_SW_64KB_S: + *sw_mode = dm_sw_64kb_s; + break; + case DC_SW_64KB_S_X: + *sw_mode = dm_sw_64kb_s_x; + break; + case DC_SW_64KB_S_T: + *sw_mode = dm_sw_64kb_s_t; + break; + case DC_SW_64KB_D: + *sw_mode = dm_sw_64kb_d; + break; + case DC_SW_64KB_D_X: + *sw_mode = dm_sw_64kb_d_x; + break; + case DC_SW_64KB_D_T: + *sw_mode = dm_sw_64kb_d_t; + break; + case DC_SW_64KB_R_X: + *sw_mode = dm_sw_64kb_r_x; + break; + case DC_SW_VAR_S: + *sw_mode = dm_sw_var_s; + break; + case DC_SW_VAR_S_X: + *sw_mode = dm_sw_var_s_x; + break; + case DC_SW_VAR_D: + *sw_mode = dm_sw_var_d; + break; + case DC_SW_VAR_D_X: + *sw_mode = dm_sw_var_d_x; + break; + + default: + ASSERT(0); /* Not supported */ + break; + } +} + +static void dc_timing_to_dml_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_dest_params_st *dest) +{ + dest->hblank_start = timing->h_total - timing->h_front_porch; + dest->hblank_end = dest->hblank_start + - timing->h_addressable + - timing->h_border_left + - timing->h_border_right; + dest->vblank_start = timing->v_total - timing->v_front_porch; + dest->vblank_end = dest->vblank_start + - timing->v_addressable + - timing->v_border_top + - timing->v_border_bottom; + dest->htotal = timing->h_total; + dest->vtotal = timing->v_total; + dest->hactive = timing->h_addressable; + dest->vactive = timing->v_addressable; + dest->interlaced = timing->flags.INTERLACE; + dest->pixel_rate_mhz = timing->pix_clk_100hz/10000.0; + if (timing->timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + dest->pixel_rate_mhz *= 2; +} + +static enum odm_combine_mode get_dml_odm_combine(const struct pipe_ctx *pipe) +{ + int odm_split_count = 0; + enum odm_combine_mode combine_mode = dm_odm_combine_mode_disabled; + struct pipe_ctx *next_pipe = pipe->next_odm_pipe; + + // Traverse pipe tree to determine odm split count + while (next_pipe) { + odm_split_count++; + next_pipe = next_pipe->next_odm_pipe; + } + pipe = pipe->prev_odm_pipe; + while (pipe) { + odm_split_count++; + pipe = pipe->prev_odm_pipe; + } + + // Translate split to DML odm combine factor + switch (odm_split_count) { + case 1: + combine_mode = dm_odm_combine_mode_2to1; + break; + case 3: + combine_mode = dm_odm_combine_mode_4to1; + break; + default: + combine_mode = dm_odm_combine_mode_disabled; + } + + return combine_mode; +} + +static int get_dml_output_type(enum signal_type dc_signal) +{ + int dml_output_type = -1; + + switch (dc_signal) { + case SIGNAL_TYPE_DISPLAY_PORT_MST: + case SIGNAL_TYPE_DISPLAY_PORT: + dml_output_type = dm_dp; + break; + case SIGNAL_TYPE_EDP: + dml_output_type = dm_edp; + break; + case SIGNAL_TYPE_HDMI_TYPE_A: + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + dml_output_type = dm_hdmi; + break; + default: + break; + } + + return dml_output_type; +} + +static void populate_color_depth_and_encoding_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_output_params_st *dout) +{ + int output_bpc = 0; + + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + output_bpc = 6; + break; + case COLOR_DEPTH_888: + output_bpc = 8; + break; + case COLOR_DEPTH_101010: + output_bpc = 10; + break; + case COLOR_DEPTH_121212: + output_bpc = 12; + break; + case COLOR_DEPTH_141414: + output_bpc = 14; + break; + case COLOR_DEPTH_161616: + output_bpc = 16; + break; + case COLOR_DEPTH_999: + output_bpc = 9; + break; + case COLOR_DEPTH_111111: + output_bpc = 11; + break; + default: + output_bpc = 8; + break; + } + + switch (timing->pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + dout->output_format = dm_444; + dout->output_bpp = output_bpc * 3; + break; + case PIXEL_ENCODING_YCBCR420: + dout->output_format = dm_420; + dout->output_bpp = (output_bpc * 3.0) / 2; + break; + case PIXEL_ENCODING_YCBCR422: + if (timing->flags.DSC && !timing->dsc_cfg.ycbcr422_simple) + dout->output_format = dm_n422; + else + dout->output_format = dm_s422; + dout->output_bpp = output_bpc * 2; + break; + default: + dout->output_format = dm_444; + dout->output_bpp = output_bpc * 3; + } +} + +static enum source_format_class dc_source_format_to_dml_source_format(enum surface_pixel_format dc_format) +{ + enum source_format_class dml_format = dm_444_32; + + switch (dc_format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + dml_format = dm_420_8; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + dml_format = dm_420_10; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + dml_format = dm_444_64; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + dml_format = dm_444_16; + break; + case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS: + dml_format = dm_444_8; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + dml_format = dm_rgbe_alpha; + break; + default: + dml_format = dm_444_32; + break; + } + + return dml_format; +} + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c index 3ee858f311d1..122ba291a7ef 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c @@ -61,16 +61,6 @@ static double dsc_roundf(double num) return (int)(num); } -static double dsc_ceil(double num) -{ - double retval = (int)num; - - if (retval != num && num > 0) - retval = num + 1; - - return (int)retval; -} - static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, enum max_min max_min, float bpp) { @@ -268,24 +258,3 @@ void _do_calc_rc_params(struct rc_params *rc, rc->rc_buf_thresh[13] = 8064; } -u32 _do_bytes_per_pixel_calc(int slice_width, - u16 drm_bpp, - bool is_navite_422_or_420) -{ - float bpp; - u32 bytes_per_pixel; - double d_bytes_per_pixel; - - dc_assert_fp_enabled(); - - bpp = ((float)drm_bpp / 16.0); - d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width; - // TODO: Make sure the formula for calculating this is precise (ceiling - // vs. floor, and at what point they should be applied) - if (is_navite_422_or_420) - d_bytes_per_pixel /= 2; - - bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000); - - return bytes_per_pixel; -} diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h index b93b95409fbe..cad244c023cd 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h @@ -78,10 +78,6 @@ struct qp_entry { typedef struct qp_entry qp_table[]; -u32 _do_bytes_per_pixel_calc(int slice_width, - u16 drm_bpp, - bool is_navite_422_or_420); - void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_comp bpc, diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 0321b4446e05..9c74564cbd8d 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -455,6 +455,7 @@ static bool intersect_dsc_caps( if (pixel_encoding == PIXEL_ENCODING_YCBCR422 || pixel_encoding == PIXEL_ENCODING_YCBCR420) dsc_common_caps->bpp_increment_div = min(dsc_common_caps->bpp_increment_div, (uint32_t)8); + dsc_common_caps->edp_sink_max_bits_per_pixel = dsc_sink_caps->edp_max_bits_per_pixel; dsc_common_caps->is_dp = dsc_sink_caps->is_dp; return true; } @@ -513,6 +514,13 @@ static bool decide_dsc_bandwidth_range( range->min_target_bpp_x16 = preferred_bpp_x16; } } + /* TODO - make this value generic to all signal types */ + else if (dsc_caps->edp_sink_max_bits_per_pixel) { + /* apply max bpp limitation from edp sink */ + range->max_target_bpp_x16 = MIN(dsc_caps->edp_sink_max_bits_per_pixel, + max_bpp_x16); + range->min_target_bpp_x16 = min_bpp_x16; + } else { range->max_target_bpp_x16 = max_bpp_x16; range->min_target_bpp_x16 = min_bpp_x16; @@ -574,7 +582,7 @@ static bool decide_dsc_target_bpp_x16( return *target_bpp_x16 != 0; } -#define MIN_AVAILABLE_SLICES_SIZE 4 +#define MIN_AVAILABLE_SLICES_SIZE 6 static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *available_slices) { @@ -860,6 +868,10 @@ static bool setup_dsc_config( min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first? is_dsc_possible = (min_slices_h <= max_slices_h); + + if (min_slices_h == 0 && max_slices_h == 0) + is_dsc_possible = false; + if (!is_dsc_possible) goto done; diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index b19d3aeb5962..e97cf09be9d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -60,31 +60,3 @@ void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps) pps->dsc_version_minor); DC_FP_END(); } - -/** - * calc_dsc_bytes_per_pixel - calculate bytes per pixel - * @pps: DRM struct with all required DSC values - * - * Based on the information inside drm_dsc_config, this function calculates the - * total of bytes per pixel. - * - * @note This calculation requires float point operation, most of it executes - * under kernel_fpu_{begin,end}. - * - * Return: - * Return the number of bytes per pixel - */ -u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps) - -{ - u32 ret; - u16 drm_bpp = pps->bits_per_pixel; - int slice_width = pps->slice_width; - bool is_navite_422_or_420 = pps->native_422 || pps->native_420; - - DC_FP_START(); - ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp, - is_navite_422_or_420); - DC_FP_END(); - return ret; -} diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h index c2340e001b57..80921c1c0d53 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h @@ -30,7 +30,6 @@ #include "dml/dsc/rc_calc_fpu.h" void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps); -u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c index 1e19dd674e5a..7e306aa3e2b9 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c @@ -100,8 +100,7 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par int ret; struct rc_params rc; struct drm_dsc_config dsc_cfg; - - dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps); + unsigned long long tmp; calc_rc_params(&rc, pps); dsc_params->pps = *pps; @@ -113,6 +112,9 @@ int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_par dsc_cfg.mux_word_size = dsc_params->pps.bits_per_component <= 10 ? 48 : 64; ret = drm_dsc_compute_rc_parameters(&dsc_cfg); + tmp = (unsigned long long)dsc_cfg.slice_chunk_size * 0x10000000 + (dsc_cfg.slice_width - 1); + do_div(tmp, (uint32_t)dsc_cfg.slice_width); //ROUND-UP + dsc_params->bytes_per_pixel = (uint32_t)tmp; copy_pps_fields(&dsc_params->pps, &dsc_cfg); dsc_params->rc_buffer_model_size = dsc_cfg.rc_bits; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index 806f3041db14..337c0161e72d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -619,7 +619,7 @@ struct dcn_ip_params { }; extern const struct dcn_ip_params dcn10_ip_defaults; -bool dcn_validate_bandwidth( +bool dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, bool fast_validate); diff --git a/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h new file mode 100644 index 000000000000..5dcfbd8e2697 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/inc/dml_wrapper.h @@ -0,0 +1,34 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DML_WRAPPER_H_ +#define DML_WRAPPER_H_ + +#include "dc.h" +#include "dml/display_mode_vba.h" + +bool dml_validate(struct dc *dc, struct dc_state *context, bool fast_validate); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index f94135c6e3c2..346f0ba73e86 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -61,6 +61,8 @@ struct dcn_dsc_state { uint32_t dsc_pic_height; uint32_t dsc_slice_bpg_offset; uint32_t dsc_chunk_size; + uint32_t dsc_fw_en; + uint32_t dsc_opp_source; }; @@ -88,6 +90,7 @@ struct dsc_enc_caps { int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */ int32_t max_slice_width; uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ + uint32_t edp_sink_max_bits_per_pixel; bool is_dp; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index c88e113b94d1..073f8b667eff 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -164,6 +164,10 @@ struct stream_encoder_funcs { void (*stop_dp_info_packets)( struct stream_encoder *enc); + void (*reset_fifo)( + struct stream_encoder *enc + ); + void (*dp_blank)( struct dc_link *link, struct stream_encoder *enc); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 7390baf916b5..c29320b3855d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -290,6 +290,8 @@ struct timing_generator_funcs { enum optc_dsc_mode dsc_mode, uint32_t dsc_bytes_per_pixel, uint32_t dsc_slice_width); + void (*get_dsc_status)(struct timing_generator *optc, + uint32_t *dsc_mode); void (*set_odm_bypass)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); void (*set_odm_combine)(struct timing_generator *optc, int *opp_id, int opp_cnt, struct dc_crtc_timing *timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h index 10dcf6a5e9b1..a4e43b4826e0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h @@ -36,7 +36,7 @@ * Initialise link encoder resource tracking. */ void link_enc_cfg_init( - struct dc *dc, + const struct dc *dc, struct dc_state *state); /* diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index cd204eef073b..83855b8a32e9 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -360,6 +360,8 @@ struct dmub_srv_hw_funcs { uint32_t (*get_gpint_dataout)(struct dmub_srv *dmub); + void (*clear_inbox0_ack_register)(struct dmub_srv *dmub); + uint32_t (*read_inbox0_ack_register)(struct dmub_srv *dmub); void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data); uint32_t (*get_current_time)(struct dmub_srv *dmub); @@ -409,6 +411,7 @@ struct dmub_srv { struct dmub_srv_base_funcs funcs; struct dmub_srv_hw_funcs hw_funcs; struct dmub_rb inbox1_rb; + uint32_t inbox1_last_wptr; /** * outbox1_rb is accessed without locks (dal & dc) * and to be used only in dmub_srv_stat_get_notification() @@ -735,6 +738,45 @@ bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_ bool dmub_srv_should_detect(struct dmub_srv *dmub); +/** + * dmub_srv_send_inbox0_cmd() - Send command to DMUB using INBOX0 + * @dmub: the dmub service + * @data: the data to be sent in the INBOX0 command + * + * Send command by writing directly to INBOX0 WPTR + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - hw_init false or hw function does not exist + */ +enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, union dmub_inbox0_data_register data); + +/** + * dmub_srv_wait_for_inbox0_ack() - wait for DMUB to ACK INBOX0 command + * @dmub: the dmub service + * @timeout_us: the maximum number of microseconds to wait + * + * Wait for DMUB to ACK the INBOX0 message + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - hw_init false or hw function does not exist + * DMUB_STATUS_TIMEOUT - wait for ack timed out + */ +enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us); + +/** + * dmub_srv_wait_for_inbox0_ack() - clear ACK register for INBOX0 + * @dmub: the dmub service + * + * Clear ACK register for INBOX0 + * + * Return: + * DMUB_STATUS_OK - success + * DMUB_STATUS_INVALID - hw_init false or hw function does not exist + */ +enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub); + #if defined(__cplusplus) } #endif diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index c29a67ccef17..7eec65090862 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0x1d82d23e +#define DMUB_FW_VERSION_GIT_HASH 0x465e619a #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 91 +#define DMUB_FW_VERSION_REVISION 94 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 @@ -173,13 +173,6 @@ extern "C" { #endif /** - * Number of nanoseconds per DMUB tick. - * DMCUB_TIMER_CURRENT increments in DMUB ticks, which are 10ns by default. - * If DMCUB_TIMER_WINDOW is non-zero this will no longer be true. - */ -#define NS_PER_DMUB_TICK 10 - -/** * union dmub_addr - DMUB physical/virtual 64-bit address. */ union dmub_addr { @@ -208,10 +201,9 @@ union dmub_psr_debug_flags { uint32_t use_hw_lock_mgr : 1; /** - * Unused. - * TODO: Remove. + * Use TPS3 signal when restore main link. */ - uint32_t log_line_nums : 1; + uint32_t force_wakeup_by_tps3 : 1; } bitfields; /** @@ -1550,10 +1542,14 @@ struct dmub_cmd_psr_copy_settings_data { * Currently the support is only for 0 or 1 */ uint8_t panel_inst; + /* + * DSC enable status in driver + */ + uint8_t dsc_enable_status; /** - * Explicit padding to 4 byte boundary. + * Explicit padding to 3 byte boundary. */ - uint8_t pad3[4]; + uint8_t pad3[3]; }; /** @@ -2722,7 +2718,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb) static inline bool dmub_rb_push_front(struct dmub_rb *rb, const union dmub_rb_cmd *cmd) { - uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t); + uint64_t volatile *dst = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->wrpt); const uint64_t *src = (const uint64_t *)cmd; uint8_t i; @@ -2840,7 +2836,7 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb, static inline bool dmub_rb_out_front(struct dmub_rb *rb, union dmub_rb_out_cmd *cmd) { - const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t); + const uint64_t volatile *src = (const uint64_t volatile *)((uint8_t *)(rb->base_address) + rb->rptr); uint64_t *dst = (uint64_t *)cmd; uint8_t i; @@ -2888,7 +2884,7 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb) uint32_t wptr = rb->wrpt; while (rptr != wptr) { - uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t); + uint64_t volatile *data = (uint64_t volatile *)((uint8_t *)(rb->base_address) + rptr); //uint64_t volatile *p = (uint64_t volatile *)data; uint64_t temp; uint8_t i; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 56d400ffa7ac..f673a1c1777a 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -100,24 +100,9 @@ void dmub_flush_buffer_mem(const struct dmub_fb *fb) } static const struct dmub_fw_meta_info * -dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) +dmub_get_fw_meta_info_from_blob(const uint8_t *blob, uint32_t blob_size, uint32_t meta_offset) { const union dmub_fw_meta *meta; - const uint8_t *blob = NULL; - uint32_t blob_size = 0; - uint32_t meta_offset = 0; - - if (params->fw_bss_data && params->bss_data_size) { - /* Legacy metadata region. */ - blob = params->fw_bss_data; - blob_size = params->bss_data_size; - meta_offset = DMUB_FW_META_OFFSET; - } else if (params->fw_inst_const && params->inst_const_size) { - /* Combined metadata region. */ - blob = params->fw_inst_const; - blob_size = params->inst_const_size; - meta_offset = 0; - } if (!blob || !blob_size) return NULL; @@ -134,6 +119,32 @@ dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) return &meta->info; } +static const struct dmub_fw_meta_info * +dmub_get_fw_meta_info(const struct dmub_srv_region_params *params) +{ + const struct dmub_fw_meta_info *info = NULL; + + if (params->fw_bss_data && params->bss_data_size) { + /* Legacy metadata region. */ + info = dmub_get_fw_meta_info_from_blob(params->fw_bss_data, + params->bss_data_size, + DMUB_FW_META_OFFSET); + } else if (params->fw_inst_const && params->inst_const_size) { + /* Combined metadata region - can be aligned to 16-bytes. */ + uint32_t i; + + for (i = 0; i < 16; ++i) { + info = dmub_get_fw_meta_info_from_blob( + params->fw_inst_const, params->inst_const_size, i); + + if (info) + break; + } + } + + return info; +} + static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) { struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; @@ -598,6 +609,8 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub, enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) { + struct dmub_rb flush_rb; + if (!dmub->hw_init) return DMUB_STATUS_INVALID; @@ -606,9 +619,14 @@ enum dmub_status dmub_srv_cmd_execute(struct dmub_srv *dmub) * been flushed to framebuffer memory. Otherwise DMCUB might * read back stale, fully invalid or partially invalid data. */ - dmub_rb_flush_pending(&dmub->inbox1_rb); + flush_rb = dmub->inbox1_rb; + flush_rb.rptr = dmub->inbox1_last_wptr; + dmub_rb_flush_pending(&flush_rb); + + dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); + + dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt; - dmub->hw_funcs.set_inbox1_wptr(dmub, dmub->inbox1_rb.wrpt); return DMUB_STATUS_OK; } @@ -831,3 +849,38 @@ bool dmub_srv_should_detect(struct dmub_srv *dmub) return dmub->hw_funcs.should_detect(dmub); } + +enum dmub_status dmub_srv_clear_inbox0_ack(struct dmub_srv *dmub) +{ + if (!dmub->hw_init || dmub->hw_funcs.clear_inbox0_ack_register) + return DMUB_STATUS_INVALID; + + dmub->hw_funcs.clear_inbox0_ack_register(dmub); + return DMUB_STATUS_OK; +} + +enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t timeout_us) +{ + uint32_t i = 0; + uint32_t ack = 0; + + if (!dmub->hw_init || !dmub->hw_funcs.read_inbox0_ack_register) + return DMUB_STATUS_INVALID; + + for (i = 0; i <= timeout_us; i++) { + ack = dmub->hw_funcs.read_inbox0_ack_register(dmub); + if (ack) + return DMUB_STATUS_OK; + } + return DMUB_STATUS_TIMEOUT; +} + +enum dmub_status dmub_srv_send_inbox0_cmd(struct dmub_srv *dmub, + union dmub_inbox0_data_register data) +{ + if (!dmub->hw_init || dmub->hw_funcs.send_inbox0_cmd) + return DMUB_STATUS_INVALID; + + dmub->hw_funcs.send_inbox0_cmd(dmub, data); + return DMUB_STATUS_OK; +} diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 4de59b66bb1a..a2b80514d83e 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -35,6 +35,7 @@ #define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C #define DP_BRANCH_DEVICE_ID_006037 0x006037 +#define DP_DEVICE_ID_38EC11 0x38EC11 enum ddc_result { DDC_RESULT_UNKNOWN = 0, DDC_RESULT_SUCESSFULL, @@ -117,4 +118,7 @@ struct av_sync_data { uint8_t aud_del_ins3;/* DPCD 0002Dh */ }; +static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0}; +static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0}; + #endif /* __DAL_DDC_SERVICE_TYPES_H__ */ diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h index 370fad883e33..f093b49c5e6e 100644 --- a/drivers/gpu/drm/amd/display/include/logger_types.h +++ b/drivers/gpu/drm/amd/display/include/logger_types.h @@ -72,9 +72,7 @@ #define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__) #define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__) #define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__) -#if defined(CONFIG_DRM_AMD_DC_DCN) #define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__) -#endif struct dal_logger; @@ -126,9 +124,7 @@ enum dc_log_type { LOG_MAX_HW_POINTS, LOG_ALL_TF_CHANNELS, LOG_SAMPLE_1DLUT, -#if defined(CONFIG_DRM_AMD_DC_DCN) LOG_DP2, -#endif LOG_SECTION_TOTAL_COUNT }; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f1a46d16f7ea..4b9e68a79f06 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -98,7 +98,8 @@ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_ACP, AMD_IP_BLOCK_TYPE_VCN, AMD_IP_BLOCK_TYPE_MES, - AMD_IP_BLOCK_TYPE_JPEG + AMD_IP_BLOCK_TYPE_JPEG, + AMD_IP_BLOCK_TYPE_NUM, }; enum amd_clockgating_state { diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h index 6d0052ce6bed..da6d380c948b 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_offset.h @@ -354,5 +354,12 @@ #define mmMP1_SMN_EXT_SCRATCH7 0x03c7 #define mmMP1_SMN_EXT_SCRATCH7_BASE_IDX 0 +/* + * addressBlock: mp_SmuMp1Pub_MmuDec + * base address: 0x0 + */ +#define smnMP1_PMI_3_START 0x3030204 +#define smnMP1_PMI_3_FIFO 0x3030208 +#define smnMP1_PMI_3 0x3030600 #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h index 136fb5de6a4c..a5ae2a801254 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_sh_mask.h @@ -959,5 +959,17 @@ #define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0 #define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL +// MP1_PMI_3_START +#define MP1_PMI_3_START__ENABLE_MASK 0x80000000L +// MP1_PMI_3_FIFO +#define MP1_PMI_3_FIFO__DEPTH_MASK 0x00000fffL + +// MP1_PMI_3_START +#define MP1_PMI_3_START__ENABLE__SHIFT 0x0000001f +// MP1_PMI_3_FIFO +#define MP1_PMI_3_FIFO__DEPTH__SHIFT 0x00000000 + + + #endif diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index c84bd7b2cf59..ac941f62cbed 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -33,12 +33,11 @@ #include <linux/dma-fence.h> struct pci_dev; +struct amdgpu_device; #define KGD_MAX_QUEUES 128 struct kfd_dev; -struct kgd_dev; - struct kgd_mem; enum kfd_preempt_type { @@ -228,61 +227,61 @@ struct tile_config { */ struct kfd2kgd_calls { /* Register access functions */ - void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid, + void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); - int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, u32 pasid, + int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid, unsigned int vmid); - int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id); + int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id); - int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, + int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, struct mm_struct *mm); - int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd, + int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t doorbell_off); - int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd, + int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm); - int (*hqd_dump)(struct kgd_dev *kgd, + int (*hqd_dump)(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); - int (*hqd_sdma_dump)(struct kgd_dev *kgd, + int (*hqd_sdma_dump)(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, uint32_t (**dump)[2], uint32_t *n_regs); - bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); - - int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type, - unsigned int timeout, uint32_t pipe_id, + bool (*hqd_is_occupied)(struct amdgpu_device *adev, + uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); - bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd); + int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, + uint32_t reset_type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id); + + bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); - int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd, + int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd, unsigned int timeout); - int (*address_watch_disable)(struct kgd_dev *kgd); - int (*address_watch_execute)(struct kgd_dev *kgd, + int (*address_watch_disable)(struct amdgpu_device *adev); + int (*address_watch_execute)(struct amdgpu_device *adev, unsigned int watch_point_id, uint32_t cntl_val, uint32_t addr_hi, uint32_t addr_lo); - int (*wave_control_execute)(struct kgd_dev *kgd, + int (*wave_control_execute)(struct amdgpu_device *adev, uint32_t gfx_index_val, uint32_t sq_cmd); - uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd, + uint32_t (*address_watch_get_offset)(struct amdgpu_device *adev, unsigned int watch_point_id, unsigned int reg_offset); - bool (*get_atc_vmid_pasid_mapping_info)( - struct kgd_dev *kgd, + bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); @@ -290,16 +289,16 @@ struct kfd2kgd_calls { * passed to the shader by the CP. It's the user mode driver's * responsibility. */ - void (*set_scratch_backing_va)(struct kgd_dev *kgd, + void (*set_scratch_backing_va)(struct amdgpu_device *adev, uint64_t va, uint32_t vmid); - void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, + void (*set_vm_context_page_table_base)(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base); - uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd); + uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev); - void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt, - int *max_waves_per_cu); - void (*program_trap_handler_settings)(struct kgd_dev *kgd, + void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, + int *wave_cnt, int *max_waves_per_cu); + void (*program_trap_handler_settings)(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); }; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 03581d5b1836..08362d506534 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -927,6 +927,13 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block { int ret = 0; const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; + + if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state) { + dev_dbg(adev->dev, "IP block%d already in the target %s state!", + block_type, gate ? "gate" : "ungate"); + return 0; + } switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: @@ -979,6 +986,9 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block break; } + if (!ret) + atomic_set(&adev->pm.pwr_state[block_type], pwr_state); + return ret; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 41472ed99253..49df4c20f09e 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3759,5 +3759,7 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) adev, &amdgpu_debugfs_pm_prv_buffer_fops, adev->pm.smu_prv_buffer_size); + + amdgpu_smu_stb_debug_fs_init(adev); #endif } diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 98f1b3d8c1d5..16e3f72d31b9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -417,6 +417,12 @@ struct amdgpu_dpm { enum amd_dpm_forced_level forced_level; }; +enum ip_power_state { + POWER_STATE_UNKNOWN, + POWER_STATE_ON, + POWER_STATE_OFF, +}; + struct amdgpu_pm { struct mutex mutex; u32 current_sclk; @@ -452,6 +458,8 @@ struct amdgpu_pm { struct i2c_adapter smu_i2c; struct mutex smu_i2c_mutex; struct list_head pm_attr_list; + + atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM]; }; #define R600_SSTU_DFLT 0 diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 3557f4e7fc30..f738f7dc20c9 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -324,6 +324,7 @@ enum smu_table_id SMU_TABLE_OVERDRIVE, SMU_TABLE_I2C_COMMANDS, SMU_TABLE_PACE, + SMU_TABLE_ECCINFO, SMU_TABLE_COUNT, }; @@ -340,6 +341,7 @@ struct smu_table_context void *max_sustainable_clocks; struct smu_bios_boot_up_values boot_values; void *driver_pptable; + void *ecc_table; struct smu_table tables[SMU_TABLE_COUNT]; /* * The driver table is just a staging buffer for @@ -472,6 +474,12 @@ struct cmn2asic_mapping { int map_to; }; +struct stb_context { + uint32_t stb_buf_size; + bool enabled; + spinlock_t lock; +}; + #define WORKLOAD_POLICY_MAX 7 struct smu_context { @@ -559,6 +567,8 @@ struct smu_context uint16_t cpu_core_num; struct smu_user_dpm_profile user_dpm_profile; + + struct stb_context stb_context; }; struct i2c_adapter; @@ -1261,6 +1271,17 @@ struct pptable_funcs { * of SMUBUS table. */ int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size); + + /** + * @get_ecc_table: message SMU to get ECC INFO table. + */ + ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); + + + /** + * @stb_collect_info: Collects Smart Trace Buffers data. + */ + int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size); }; typedef enum { @@ -1397,6 +1418,9 @@ int smu_set_light_sbr(struct smu_context *smu, bool enable); int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, uint64_t event_arg); +int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc); +int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size); +void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h index a017983ff1fa..0f67c56c2863 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h +++ b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h @@ -140,6 +140,8 @@ #define MAX_SW_I2C_COMMANDS 24 +#define ALDEBARAN_UMC_CHANNEL_NUM 32 + typedef enum { I2C_CONTROLLER_PORT_0, //CKSVII2C0 I2C_CONTROLLER_PORT_1, //CKSVII2C1 @@ -507,6 +509,19 @@ typedef struct { uint32_t MmHubPadding[8]; // SMU internal use } AvfsDebugTable_t; +typedef struct { + uint64_t mca_umc_status; + uint64_t mca_umc_addr; + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + + uint32_t eccPadding; +} EccInfo_t; + +typedef struct { + EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM]; +} EccInfoTable_t; + // These defines are used with the following messages: // SMC_MSG_TransferTableDram2Smu // SMC_MSG_TransferTableSmu2Dram @@ -517,6 +532,7 @@ typedef struct { #define TABLE_SMU_METRICS 4 #define TABLE_DRIVER_SMU_CONFIG 5 #define TABLE_I2C_COMMANDS 6 -#define TABLE_COUNT 7 +#define TABLE_ECCINFO 7 +#define TABLE_COUNT 8 #endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h index e5d3b0d1a032..44af23ae059e 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -27,7 +27,9 @@ #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 -#define SMU13_DRIVER_IF_VERSION_ALDE 0x07 +#define SMU13_DRIVER_IF_VERSION_ALDE 0x08 + +#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms /* MP Apertures */ #define MP0_Public 0x03800000 @@ -216,7 +218,6 @@ int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) int smu_v13_0_baco_enter(struct smu_context *smu); int smu_v13_0_baco_exit(struct smu_context *smu); -int smu_v13_0_mode1_reset(struct smu_context *smu); int smu_v13_0_mode2_reset(struct smu_context *smu); int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 8d796ed3b7d1..20cb234d5061 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -1551,7 +1551,7 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks) static int pp_asic_reset_mode_2(void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; + int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c index 258c573acc97..1f406f21b452 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c @@ -1024,8 +1024,6 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t min_freq, max_freq = 0; uint32_t ret = 0; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now); @@ -1038,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, else i = 1; - size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", + size += sprintf(buf + size, "0: %uMhz %s\n", data->gfx_min_freq_limit/100, i == 0 ? "*" : ""); - size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + size += sprintf(buf + size, "1: %uMhz %s\n", i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK, i == 1 ? "*" : ""); - size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", + size += sprintf(buf + size, "2: %uMhz %s\n", data->gfx_max_freq_limit/100, i == 2 ? "*" : ""); break; @@ -1052,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now); for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->entries[i].clk / 100, ((mclk_table->entries[i].clk / 100) @@ -1067,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq); } break; @@ -1083,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr, if (ret) return ret; - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", min_freq, max_freq); } break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index aceebf584225..611969bf4520 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -4914,8 +4914,6 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, int size = 0; uint32_t i, now, clock, pcie_speed; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock); @@ -4928,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4943,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4957,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, now = i; for (i = 0; i < pcie_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s\n", i, (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" : (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" : (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "", @@ -4965,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_SCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); for (i = 0; i < odn_sclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_sclk_table->entries[i].clock/100, odn_sclk_table->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); for (i = 0; i < odn_mclk_table->num_of_pl; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n", + size += sprintf(buf + size, "%d: %10uMHz %10umV\n", i, odn_mclk_table->entries[i].clock/100, odn_mclk_table->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.sclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mclk_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index 8e28a8eecefc..03bf8f069222 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1550,8 +1550,6 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, uint32_t i, now; int size = 0; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, @@ -1561,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_SCLK_INDEX); for (i = 0; i < sclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->entries[i].clk / 100, (i == now) ? "*" : ""); break; @@ -1573,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr, CURR_MCLK_INDEX); for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100, (SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c index c981fc2882f0..e6336654c565 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c @@ -4639,8 +4639,6 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, int i, now, size = 0, count = 0; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: if (data->registry_data.sclk_dpm_key_disabled) @@ -4654,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, else count = sclk_table->count; for (i = 0; i < count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, sclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4665,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now); for (i = 0; i < mclk_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, mclk_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4676,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now); for (i = 0; i < soc_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, soc_table->dpm_levels[i].value / 100, (i == now) ? "*" : ""); break; @@ -4688,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now); for (i = 0; i < dcef_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, dcef_table->dpm_levels[i].value / 100, (dcef_table->dpm_levels[i].value / 100 == now) ? "*" : ""); @@ -4702,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i]; - size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i, + size += sprintf(buf + size, "%d: %s %s %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -4721,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk / 100, podn_vdd_dep->entries[i].vddc); } break; case OD_MCLK: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk; for (i = 0; i < podn_vdd_dep->count; i++) - size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n", + size += sprintf(buf + size, "%d: %10uMhz %10umV\n", i, podn_vdd_dep->entries[i].clk/100, podn_vdd_dep->entries[i].vddc); } break; case OD_RANGE: if (hwmgr->od_enabled) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.gfx_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.engineClock/100); - size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n", + size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n", data->golden_dpm_table.mem_table.dpm_levels[0].value/100, hwmgr->platform_descriptor.overdriveLimit.memoryClock/100); - size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n", + size += sprintf(buf + size, "VDDC: %7umV %11umV\n", data->odn_dpm_table.min_vddc, data->odn_dpm_table.max_vddc); } diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c index f7e783e1c888..a2f4d6773d45 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c @@ -2246,8 +2246,6 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, int i, now, size = 0; struct pp_clock_levels_with_latency clocks; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: PP_ASSERT_WITH_CODE( @@ -2260,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get gfx clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2276,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get memory clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : ""); break; @@ -2294,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get soc clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; @@ -2312,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr, "Attempt to get dcef clk levels Failed!", return -1); for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : ""); break; diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c index 03e63be4ee27..85d55ab4e369 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c @@ -3366,8 +3366,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, int ret = 0; uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width; - phm_get_sysfs_buf(&buf, &size); - switch (type) { case PP_SCLK: ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now); @@ -3376,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_sclks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3394,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_memclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3412,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_socclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3430,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); for (i = 0; i < fclk_dpm_table->count; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, fclk_dpm_table->dpm_levels[i].value, fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : ""); break; @@ -3442,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, return ret); if (vega20_get_dcefclocks(hwmgr, &clocks)) { - size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n", + size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n", now / 100); break; } for (i = 0; i < clocks.num_levels; i++) - size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", + size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, (clocks.data[i].clocks_in_khz == now * 10) ? "*" : ""); break; @@ -3462,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, gen_speed = pptable->PcieGenSpeed[i]; lane_width = pptable->PcieLaneCount[i]; - size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i, + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, (gen_speed == 0) ? "2.5GT/s," : (gen_speed == 1) ? "5.0GT/s," : (gen_speed == 2) ? "8.0GT/s," : @@ -3483,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, case OD_SCLK: if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_SCLK"); + size += sprintf(buf + size, "0: %10uMhz\n", od_table->GfxclkFmin); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "1: %10uMhz\n", od_table->GfxclkFmax); } break; case OD_MCLK: if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK"); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + size += sprintf(buf + size, "%s:\n", "OD_MCLK"); + size += sprintf(buf + size, "1: %10uMhz\n", od_table->UclkFmax); } @@ -3507,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE"); - size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n", + size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE"); + size += sprintf(buf + size, "0: %10uMhz %10dmV\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n", + size += sprintf(buf + size, "1: %10uMhz %10dmV\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2 / VOLTAGE_SCALE); - size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n", + size += sprintf(buf + size, "2: %10uMhz %10dmV\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3 / VOLTAGE_SCALE); } @@ -3522,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, break; case OD_RANGE: - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sprintf(buf + size, "%s:\n", "OD_RANGE"); if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id && od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value, od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value); } if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { - size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_UCLK_FMAX].min_value, od8_settings[OD8_SETTING_UCLK_FMAX].max_value); } @@ -3543,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id && od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) { - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value, od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value); - size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value, od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 01168b8955bf..edcf2738748a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1153,6 +1153,8 @@ static int smu_smc_hw_setup(struct smu_context *smu) case IP_VERSION(11, 5, 0): case IP_VERSION(11, 0, 12): ret = smu_system_features_control(smu, true); + if (ret) + dev_err(adev->dev, "Failed system features control!\n"); break; default: break; @@ -1277,8 +1279,10 @@ static int smu_smc_hw_setup(struct smu_context *smu) } ret = smu_notify_display_change(smu); - if (ret) + if (ret) { + dev_err(adev->dev, "Failed to notify display change!\n"); return ret; + } /* * Set min deep sleep dce fclk with bootup value from vbios via @@ -1286,8 +1290,6 @@ static int smu_smc_hw_setup(struct smu_context *smu) */ ret = smu_set_min_dcef_deep_sleep(smu, smu->smu_table.boot_values.dcefclk / 100); - if (ret) - return ret; return ret; } @@ -1468,7 +1470,7 @@ static int smu_disable_dpms(struct smu_context *smu) dev_err(adev->dev, "Failed to disable smu features.\n"); } - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) && + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); @@ -3072,6 +3074,20 @@ int smu_set_light_sbr(struct smu_context *smu, bool enable) return ret; } +int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) +{ + int ret = -EOPNOTSUPP; + + mutex_lock(&smu->mutex); + if (smu->ppt_funcs && + smu->ppt_funcs->get_ecc_info) + ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); + mutex_unlock(&smu->mutex); + + return ret; + +} + static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) { struct smu_context *smu = handle; @@ -3161,3 +3177,107 @@ int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, return ret; } + +int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) +{ + + if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled) + return -EOPNOTSUPP; + + /* Confirm the buffer allocated is of correct size */ + if (size != smu->stb_context.stb_buf_size) + return -EINVAL; + + /* + * No need to lock smu mutex as we access STB directly through MMIO + * and not going through SMU messaging route (for now at least). + * For registers access rely on implementation internal locking. + */ + return smu->ppt_funcs->stb_collect_info(smu, buf, size); +} + +#if defined(CONFIG_DEBUG_FS) + +static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct smu_context *smu = &adev->smu; + unsigned char *buf; + int r; + + buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size); + if (r) + goto out; + + filp->private_data = buf; + + return 0; + +out: + kvfree(buf); + return r; +} + +static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t size, + loff_t *pos) +{ + struct amdgpu_device *adev = filp->f_inode->i_private; + struct smu_context *smu = &adev->smu; + + + if (!filp->private_data) + return -EINVAL; + + return simple_read_from_buffer(buf, + size, + pos, filp->private_data, + smu->stb_context.stb_buf_size); +} + +static int smu_stb_debugfs_release(struct inode *inode, struct file *filp) +{ + kvfree(filp->private_data); + filp->private_data = NULL; + + return 0; +} + +/* + * We have to define not only read method but also + * open and release because .read takes up to PAGE_SIZE + * data each time so and so is invoked multiple times. + * We allocate the STB buffer in .open and release it + * in .release + */ +static const struct file_operations smu_stb_debugfs_fops = { + .owner = THIS_MODULE, + .open = smu_stb_debugfs_open, + .read = smu_stb_debugfs_read, + .release = smu_stb_debugfs_release, + .llseek = default_llseek, +}; + +#endif + +void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + + struct smu_context *smu = &adev->smu; + + if (!smu->stb_context.stb_buf_size) + return; + + debugfs_create_file_size("amdgpu_smu_stb_dump", + S_IRUSR, + adev_to_drm(adev)->primary->debugfs_root, + adev, + &smu_stb_debugfs_fops, + smu->stb_context.stb_buf_size); +#endif + +} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index cbc3f99e8573..2238ee19c222 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -309,6 +309,7 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, { int ret = 0, size = 0; uint32_t cur_value = 0; + int i; smu_cmn_get_sysfs_buf(&buf, &size); @@ -334,8 +335,6 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, size += sysfs_emit_at(buf, size, "VDDC: %7umV %10umV\n", CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX); break; - case SMU_GFXCLK: - case SMU_SCLK: case SMU_FCLK: case SMU_MCLK: case SMU_SOCCLK: @@ -346,6 +345,25 @@ static int cyan_skillfish_print_clk_levels(struct smu_context *smu, return ret; size += sysfs_emit_at(buf, size, "0: %uMhz *\n", cur_value); break; + case SMU_SCLK: + case SMU_GFXCLK: + ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + return ret; + if (cur_value == CYAN_SKILLFISH_SCLK_MAX) + i = 2; + else if (cur_value == CYAN_SKILLFISH_SCLK_MIN) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MIN, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : cyan_skillfish_sclk_default, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", CYAN_SKILLFISH_SCLK_MAX, + i == 2 ? "*" : ""); + break; default: dev_warn(smu->adev->dev, "Unsupported clock type\n"); return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 71161f6b78fe..60a557068ea4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1265,7 +1265,7 @@ static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { uint16_t *curve_settings; - int i, size = 0, ret = 0; + int i, levels, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; @@ -1319,14 +1319,17 @@ static int navi10_print_clk_levels(struct smu_context *smu, freq_values[1] = cur_value; mark_index = cur_value == freq_values[0] ? 0 : cur_value == freq_values[2] ? 2 : 1; - if (mark_index != 1) - freq_values[1] = (freq_values[0] + freq_values[2]) / 2; - for (i = 0; i < 3; i++) { + levels = 3; + if (mark_index != 1) { + levels = 2; + freq_values[1] = freq_values[2]; + } + + for (i = 0; i < levels; i++) { size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, freq_values[i], i == mark_index ? "*" : ""); } - } break; case SMU_PCIE: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index a4108025fe29..a673e05853fe 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -80,6 +80,9 @@ (*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\ } while(0) +/* STB FIFO depth is in 64bit units */ +#define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8 + static int get_table_size(struct smu_context *smu) { if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) @@ -650,6 +653,8 @@ static int sienna_cichlid_allocate_dpm_context(struct smu_context *smu) return 0; } +static void sienna_cichlid_stb_init(struct smu_context *smu); + static int sienna_cichlid_init_smc_tables(struct smu_context *smu) { int ret = 0; @@ -662,6 +667,8 @@ static int sienna_cichlid_init_smc_tables(struct smu_context *smu) if (ret) return ret; + sienna_cichlid_stb_init(smu); + return smu_v11_0_init_smc_tables(smu); } @@ -1171,7 +1178,7 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask) { struct amdgpu_device *adev = smu->adev; - int ret = 0, size = 0; + int ret = 0; uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; soft_min_level = mask ? (ffs(mask) - 1) : 0; @@ -1216,7 +1223,7 @@ forec_level_out: if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK)) amdgpu_gfx_off_ctrl(adev, true); - return size; + return 0; } static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu) @@ -2135,7 +2142,13 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu, static int sienna_cichlid_run_btc(struct smu_context *smu) { - return smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); + int res; + + res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); + if (res) + dev_err(smu->adev->dev, "RunDcBtc failed!\n"); + + return res; } static int sienna_cichlid_baco_enter(struct smu_context *smu) @@ -3619,6 +3632,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, gpu_metrics->energy_accumulator = use_metrics_v2 ? metrics_v2->EnergyAccumulator : metrics->EnergyAccumulator; + if (metrics->CurrGfxVoltageOffset) + gpu_metrics->voltage_gfx = + (155000 - 625 * metrics->CurrGfxVoltageOffset) / 100; + if (metrics->CurrMemVidOffset) + gpu_metrics->voltage_mem = + (155000 - 625 * metrics->CurrMemVidOffset) / 100; + if (metrics->CurrSocVoltageOffset) + gpu_metrics->voltage_soc = + (155000 - 625 * metrics->CurrSocVoltageOffset) / 100; + average_gfx_activity = use_metrics_v2 ? metrics_v2->AverageGfxActivity : metrics->AverageGfxActivity; if (average_gfx_activity <= SMU_11_0_7_GFX_BUSY_THRESHOLD) gpu_metrics->average_gfxclk_frequency = @@ -3793,6 +3816,53 @@ static int sienna_cichlid_set_mp1_state(struct smu_context *smu, return ret; } +static void sienna_cichlid_stb_init(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t reg; + + reg = RREG32_PCIE(MP1_Public | smnMP1_PMI_3_START); + smu->stb_context.enabled = REG_GET_FIELD(reg, MP1_PMI_3_START, ENABLE); + + /* STB is disabled */ + if (!smu->stb_context.enabled) + return; + + spin_lock_init(&smu->stb_context.lock); + + /* STB buffer size in bytes as function of FIFO depth */ + reg = RREG32_PCIE(MP1_Public | smnMP1_PMI_3_FIFO); + smu->stb_context.stb_buf_size = 1 << REG_GET_FIELD(reg, MP1_PMI_3_FIFO, DEPTH); + smu->stb_context.stb_buf_size *= SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES; + + dev_info(smu->adev->dev, "STB initialized to %d entries", + smu->stb_context.stb_buf_size / SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES); + +} + +int sienna_cichlid_stb_get_data_direct(struct smu_context *smu, + void *buf, + uint32_t size) +{ + uint32_t *p = buf; + struct amdgpu_device *adev = smu->adev; + + /* No need to disable interrupts for now as we don't lock it yet from ISR */ + spin_lock(&smu->stb_context.lock); + + /* + * Read the STB FIFO in units of 32bit since this is the accessor window + * (register width) we have. + */ + buf = ((char *) buf) + size; + while ((void *)p < buf) + *p++ = cpu_to_le32(RREG32_PCIE(MP1_Public | smnMP1_PMI_3)); + + spin_unlock(&smu->stb_context.lock); + + return 0; +} + static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask, .set_default_dpm_table = sienna_cichlid_set_default_dpm_table, @@ -3882,6 +3952,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .interrupt_work = smu_v11_0_interrupt_work, .gpo_control = sienna_cichlid_gpo_control, .set_mp1_state = sienna_cichlid_set_mp1_state, + .stb_collect_info = sienna_cichlid_stb_get_data_direct, }; void sienna_cichlid_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 421f38e8dada..c02ed65ffa38 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -683,6 +683,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; + uint32_t min, max; memset(&metrics, 0, sizeof(metrics)); @@ -743,6 +744,13 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (ret) return ret; break; + case SMU_GFXCLK: + case SMU_SCLK: + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value); + if (ret) { + return ret; + } + break; default: break; } @@ -768,6 +776,24 @@ static int vangogh_print_clk_levels(struct smu_context *smu, if (!cur_value_match_level) size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value); break; + case SMU_GFXCLK: + case SMU_SCLK: + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + break; default: break; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 59a7d276541d..6e781cee8bb6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -78,6 +78,12 @@ #define smnPCIE_ESM_CTRL 0x111003D0 +/* + * SMU support ECCTABLE since version 68.42.0, + * use this to check ECCTALE feature whether support + */ +#define SUPPORT_ECCTABLE_SMU_VERSION 0x00442a00 + static const struct smu_temperature_range smu13_thermal_policy[] = { {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, @@ -190,6 +196,7 @@ static const struct cmn2asic_mapping aldebaran_table_map[SMU_TABLE_COUNT] = { TAB_MAP(SMU_METRICS), TAB_MAP(DRIVER_SMU_CONFIG), TAB_MAP(I2C_COMMANDS), + TAB_MAP(ECCINFO), }; static const uint8_t aldebaran_throttler_map[] = { @@ -223,6 +230,9 @@ static int aldebaran_tables_init(struct smu_context *smu) SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; @@ -235,6 +245,10 @@ static int aldebaran_tables_init(struct smu_context *smu) return -ENOMEM; } + smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); + if (!smu_table->ecc_table) + return -ENOMEM; + return 0; } @@ -1765,6 +1779,98 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v1_3); } +static int aldebaran_check_ecc_table_support(struct smu_context *smu) +{ + uint32_t if_version = 0xff, smu_version = 0xff; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); + if (ret) { + /* return not support if failed get smu_version */ + ret = -EOPNOTSUPP; + } + + if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION) + ret = -EOPNOTSUPP; + + return ret; +} + +static ssize_t aldebaran_get_ecc_info(struct smu_context *smu, + void *table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + EccInfoTable_t *ecc_table = NULL; + struct ecc_info_per_ch *ecc_info_per_channel = NULL; + int i, ret = 0; + struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table; + + ret = aldebaran_check_ecc_table_support(smu); + if (ret) + return ret; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ECCINFO, + 0, + smu_table->ecc_table, + false); + if (ret) { + dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n"); + return ret; + } + + ecc_table = (EccInfoTable_t *)smu_table->ecc_table; + + for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) { + ecc_info_per_channel = &(eccinfo->ecc[i]); + ecc_info_per_channel->ce_count_lo_chip = + ecc_table->EccInfo[i].ce_count_lo_chip; + ecc_info_per_channel->ce_count_hi_chip = + ecc_table->EccInfo[i].ce_count_hi_chip; + ecc_info_per_channel->mca_umc_status = + ecc_table->EccInfo[i].mca_umc_status; + ecc_info_per_channel->mca_umc_addr = + ecc_table->EccInfo[i].mca_umc_addr; + } + + return ret; +} + +static int aldebaran_mode1_reset(struct smu_context *smu) +{ + u32 smu_version, fatal_err, param; + int ret = 0; + struct amdgpu_device *adev = smu->adev; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + fatal_err = 0; + param = SMU_RESET_MODE_1; + + /* + * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07 + */ + smu_cmn_get_smc_version(smu, NULL, &smu_version); + if (smu_version < 0x00440700) { + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); + } + else { + /* fatal error triggered by ras, PMFW supports the flag + from 68.44.0 */ + if ((smu_version >= 0x00442c00) && ras && + atomic_read(&ras->in_recovery)) + fatal_err = 1; + + param |= (fatal_err << 16); + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GfxDeviceDriverReset, param, NULL); + } + + if (!ret) + msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); + + return ret; +} + static int aldebaran_mode2_reset(struct smu_context *smu) { u32 smu_version; @@ -1925,13 +2031,14 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .get_gpu_metrics = aldebaran_get_gpu_metrics, .mode1_reset_is_support = aldebaran_is_mode1_reset_supported, .mode2_reset_is_support = aldebaran_is_mode2_reset_supported, - .mode1_reset = smu_v13_0_mode1_reset, + .mode1_reset = aldebaran_mode1_reset, .set_mp1_state = aldebaran_set_mp1_state, .mode2_reset = aldebaran_mode2_reset, .wait_for_event = smu_v13_0_wait_for_event, .i2c_init = aldebaran_i2c_control_init, .i2c_fini = aldebaran_i2c_control_fini, .send_hbm_bad_pages_num = aldebaran_smu_send_hbm_bad_page_num, + .get_ecc_info = aldebaran_get_ecc_info, }; void aldebaran_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 35145db6eedf..55421ea622fb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -60,8 +60,6 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); #define SMU13_VOLTAGE_SCALE 4 -#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms - #define LINK_WIDTH_MAX 6 #define LINK_SPEED_MAX 3 @@ -430,8 +428,10 @@ int smu_v13_0_fini_smc_tables(struct smu_context *smu) kfree(smu_table->hardcode_pptable); smu_table->hardcode_pptable = NULL; + kfree(smu_table->ecc_table); kfree(smu_table->metrics_table); kfree(smu_table->watermarks_table); + smu_table->ecc_table = NULL; smu_table->metrics_table = NULL; smu_table->watermarks_table = NULL; smu_table->metrics_time = 0; @@ -1424,25 +1424,6 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu) return ret; } -int smu_v13_0_mode1_reset(struct smu_context *smu) -{ - u32 smu_version; - int ret = 0; - /* - * PM FW support SMU_MSG_GfxDeviceDriverReset from 68.07 - */ - smu_cmn_get_smc_version(smu, NULL, &smu_version); - if (smu_version < 0x00440700) - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL); - else - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_1, NULL); - - if (!ret) - msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS); - - return ret; -} - static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu, uint64_t event_arg) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 8215bbf5ed7c..caf1775d48ef 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -697,6 +697,11 @@ static int yellow_carp_get_current_clk_freq(struct smu_context *smu, case SMU_FCLK: return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, value); + case SMU_GFXCLK: + case SMU_SCLK: + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetGfxclkFrequency, 0, value); + break; default: return -EINVAL; } @@ -967,6 +972,7 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, { int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; + uint32_t min, max; smu_cmn_get_sysfs_buf(&buf, &size); @@ -1005,6 +1011,27 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu, cur_value == value ? "*" : ""); } break; + case SMU_GFXCLK: + case SMU_SCLK: + ret = yellow_carp_get_current_clk_freq(smu, clk_type, &cur_value); + if (ret) + goto print_clk_out; + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + if (cur_value == max) + i = 2; + else if (cur_value == min) + i = 0; + else + i = 1; + size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, + i == 0 ? "*" : ""); + size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", + i == 1 ? cur_value : YELLOW_CARP_UMD_PSTATE_GFXCLK, + i == 1 ? "*" : ""); + size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, + i == 2 ? "*" : ""); + break; default: break; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h index b3ad8352c68a..a9205a8ea3ad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.h @@ -24,5 +24,6 @@ #define __YELLOW_CARP_PPT_H__ extern void yellow_carp_set_ppt_funcs(struct smu_context *smu); +#define YELLOW_CARP_UMD_PSTATE_GFXCLK 1100 #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 843d2cbfc71d..500af6f8adcb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -97,7 +97,7 @@ static void smu_cmn_read_arg(struct smu_context *smu, * smu: a pointer to SMU context * * Returns the status of the SMU, which could be, - * 0, the SMU is busy with your previous command; + * 0, the SMU is busy with your command; * 1, execution status: success, execution result: success; * 0xFF, execution status: success, execution result: failure; * 0xFE, unknown command; @@ -139,9 +139,13 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, const char *message = smu_get_message_name(smu, msg); switch (reg_c2pmsg_90) { - case SMU_RESP_NONE: + case SMU_RESP_NONE: { + u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66); + u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82); dev_err_ratelimited(adev->dev, - "SMU: I'm not done with your previous command!"); + "SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X", + msg_idx, prm); + } break; case SMU_RESP_OK: /* The SMU executed the command. It completed with a @@ -348,7 +352,7 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu, __smu_cmn_send_msg(smu, (uint16_t) index, param); reg = __smu_cmn_poll_stat(smu); res = __smu_cmn_reg2errno(smu, reg); - if (res == -EREMOTEIO) + if (res != 0) __smu_cmn_reg_print_error(smu, reg, index, param, msg); if (read_arg) smu_cmn_read_arg(smu, read_arg); diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index b53fee6f1c17..65f172807a0d 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf) if (rc) return rc; - return sprintf(buf, "%u\n", reg & 1); + return sprintf(buf, "%u\n", reg); } static DEVICE_ATTR_RO(vga_pw); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 6f18f143dd30..cefd0cbf9deb 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -216,8 +216,13 @@ void drm_gem_cma_free(struct drm_gem_cma_object *cma_obj) dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map); drm_prime_gem_destroy(gem_obj, cma_obj->sgt); } else if (cma_obj->vaddr) { - dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, - cma_obj->vaddr, cma_obj->paddr); + if (cma_obj->map_noncoherent) + dma_free_noncoherent(gem_obj->dev->dev, cma_obj->base.size, + cma_obj->vaddr, cma_obj->paddr, + DMA_TO_DEVICE); + else + dma_free_wc(gem_obj->dev->dev, cma_obj->base.size, + cma_obj->vaddr, cma_obj->paddr); } drm_gem_object_release(gem_obj); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 7915047cb041..621924116eb4 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -10,6 +10,7 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/vmalloc.h> +#include <linux/module.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index c9a9d74f338c..c313a5b4549c 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -404,8 +404,17 @@ int drm_syncobj_find_fence(struct drm_file *file_private, if (*fence) { ret = dma_fence_chain_find_seqno(fence, point); - if (!ret) + if (!ret) { + /* If the requested seqno is already signaled + * drm_syncobj_find_fence may return a NULL + * fence. To make sure the recipient gets + * signalled, use a new fence instead. + */ + if (!*fence) + *fence = dma_fence_get_stub(); + goto out; + } dma_fence_put(*fence); } else { ret = -EINVAL; diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index cd818a629183..00e53de4812b 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev) { struct drm_device *dev = hv_get_drvdata(hdev); struct hyperv_drm_device *hv = to_hv(dev); + struct pci_dev *pdev; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); - vmbus_free_mmio(hv->mem->start, hv->fb_size); + + /* + * Free allocated MMIO memory only on Gen2 VMs. + * On Gen1 VMs, release the PCI device + */ + if (efi_enabled(EFI_BOOT)) { + vmbus_free_mmio(hv->mem->start, hv->fb_size); + } else { + pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, + PCI_DEVICE_ID_HYPERV_VIDEO, NULL); + if (!pdev) { + drm_err(dev, "Unable to find PCI Hyper-V video\n"); + return -ENODEV; + } + pci_release_region(pdev, 0); + pci_dev_put(pdev); + } return 0; } diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 84b6fc70cbf5..a4c94dc2e216 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -21,7 +21,7 @@ config DRM_I915 select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI select SYNC_FILE - select IOSF_MBI + select IOSF_MBI if X86 select CRC32 select SND_HDA_I915 if SND_HDA_CORE select CEC_CORE if CEC_NOTIFIER diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 5c1af130cb6d..3b5857da4123 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -30,7 +30,7 @@ subdir-ccflags-y += -I$(srctree)/$(src) # Please keep these build lists sorted! # core driver code -i915-y += i915_drv.o \ +i915-y += i915_driver.o \ i915_config.o \ i915_irq.o \ i915_getparam.o \ @@ -153,6 +153,7 @@ gem-y += \ gem/i915_gem_throttle.o \ gem/i915_gem_tiling.o \ gem/i915_gem_ttm.o \ + gem/i915_gem_ttm_move.o \ gem/i915_gem_ttm_pm.o \ gem/i915_gem_userptr.o \ gem/i915_gem_wait.o \ @@ -172,6 +173,7 @@ i915-y += \ i915_trace_points.o \ i915_ttm_buddy_manager.o \ i915_vma.o \ + i915_vma_snapshot.o \ intel_wopcm.o # general-purpose microcontroller (GuC) support @@ -225,6 +227,8 @@ i915-y += \ display/intel_hotplug.o \ display/intel_lpe_audio.o \ display/intel_overlay.o \ + display/intel_pch_display.o \ + display/intel_pch_refclk.o \ display/intel_plane_initial.o \ display/intel_psr.o \ display/intel_quirks.o \ diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index b1439ba78f67..2194f74101ae 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -60,22 +60,11 @@ static const u32 vlv_primary_formats[] = { DRM_FORMAT_XBGR16161616F, }; -static const u64 i9xx_format_modifiers[] = { - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_C8: @@ -92,13 +81,8 @@ static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, static bool i965_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_C8: @@ -272,7 +256,7 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) u32 alignment = intel_surf_alignment(fb, 0); int cpp = fb->format->cpp[0]; - while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].stride) { + while ((src_x + src_w) * cpp > plane_state->view.color_plane[0].mapping_stride) { if (offset == 0) { drm_dbg_kms(&dev_priv->drm, "Unable to find suitable display surface offset due to X-tiling\n"); @@ -418,38 +402,25 @@ static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, return DIV_ROUND_UP(pixel_rate * num, den); } -static void i9xx_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static void i9xx_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - u32 linear_offset; - int x = plane_state->view.color_plane[0].x; - int y = plane_state->view.color_plane[0].y; - int crtc_x = plane_state->uapi.dst.x1; - int crtc_y = plane_state->uapi.dst.y1; - int crtc_w = drm_rect_width(&plane_state->uapi.dst); - int crtc_h = drm_rect_height(&plane_state->uapi.dst); unsigned long irqflags; - u32 dspaddr_offset; - u32 dspcntr; - - dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); - - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - - if (DISPLAY_VER(dev_priv) >= 4) - dspaddr_offset = plane_state->view.color_plane[0].offset; - else - dspaddr_offset = linear_offset; spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), - plane_state->view.color_plane[0].stride); + plane_state->view.color_plane[0].mapping_stride); if (DISPLAY_VER(dev_priv) < 4) { + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + int crtc_w = drm_rect_width(&plane_state->uapi.dst); + int crtc_h = drm_rect_height(&plane_state->uapi.dst); + /* * PLANE_A doesn't actually have a full window * generator but let's assume we still need to @@ -459,7 +430,39 @@ static void i9xx_update_plane(struct intel_plane *plane, (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), ((crtc_h - 1) << 16) | (crtc_w - 1)); - } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { + } + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void i9xx_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + int x = plane_state->view.color_plane[0].x; + int y = plane_state->view.color_plane[0].y; + u32 dspcntr, dspaddr_offset, linear_offset; + unsigned long irqflags; + + dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + if (DISPLAY_VER(dev_priv) >= 4) + dspaddr_offset = plane_state->view.color_plane[0].offset; + else + dspaddr_offset = linear_offset; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + int crtc_w = drm_rect_width(&plane_state->uapi.dst); + int crtc_h = drm_rect_height(&plane_state->uapi.dst); + intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), @@ -493,8 +496,22 @@ static void i9xx_update_plane(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void i9xx_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +static void i830_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + /* + * On i830/i845 all registers are self-arming [ALM040]. + * + * Additional breakage on i830 causes register reads to return + * the last latched value instead of the last written value [ALM026]. + */ + i9xx_plane_update_noarm(plane, crtc_state, plane_state); + i9xx_plane_update_arm(plane, crtc_state, plane_state); +} + +static void i9xx_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; @@ -768,6 +785,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) struct intel_plane *plane; const struct drm_plane_funcs *plane_funcs; unsigned int supported_rotations; + const u64 *modifiers; const u32 *formats; int num_formats; int ret, zpos; @@ -789,12 +807,10 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->id = PLANE_PRIMARY; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); - plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); - if (plane->has_fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; - } + if (i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane)) + plane->fbc = &dev_priv->fbc; + if (plane->fbc) + plane->fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { formats = vlv_primary_formats; @@ -851,8 +867,13 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->max_stride = ilk_primary_max_stride; } - plane->update_plane = i9xx_update_plane; - plane->disable_plane = i9xx_disable_plane; + if (IS_I830(dev_priv) || IS_I845G(dev_priv)) { + plane->update_arm = i830_plane_update_arm; + } else { + plane->update_noarm = i9xx_plane_update_noarm; + plane->update_arm = i9xx_plane_update_arm; + } + plane->disable_arm = i9xx_plane_disable_arm; plane->get_hw_state = i9xx_plane_get_hw_state; plane->check_plane = i9xx_plane_check; @@ -875,21 +896,26 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->disable_flip_done = ilk_primary_disable_flip_done; } + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); + if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, - i9xx_format_modifiers, + modifiers, DRM_PLANE_TYPE_PRIMARY, "primary %c", pipe_name(pipe)); else ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, - i9xx_format_modifiers, + modifiers, DRM_PLANE_TYPE_PRIMARY, "plane %c", plane_name(plane->i9xx_plane)); + + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 168c84a74d30..5781e9fac8b4 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -28,6 +28,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_mipi_dsi.h> +#include "icl_dsi.h" #include "intel_atomic.h" #include "intel_backlight.h" #include "intel_combo_phy.h" @@ -36,6 +37,7 @@ #include "intel_ddi.h" #include "intel_de.h" #include "intel_dsi.h" +#include "intel_dsi_vbt.h" #include "intel_panel.h" #include "intel_vdsc.h" #include "skl_scaler.h" @@ -183,6 +185,8 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host, if (enable_lpdt) tmp |= LP_DATA_TRANSFER; + else + tmp &= ~LP_DATA_TRANSFER; tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK); tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT); @@ -696,10 +700,7 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder, intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); for_each_dsi_phy(phy, intel_dsi->phys) { - if (DISPLAY_VER(dev_priv) >= 12) - val |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); - else - val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); + val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy); } intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val); @@ -1135,8 +1136,6 @@ static void gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - /* step 4a: power up all lanes of the DDI used by DSI */ gen11_dsi_power_up_lanes(encoder); @@ -1162,8 +1161,7 @@ gen11_dsi_enable_port_and_phy(struct intel_encoder *encoder, gen11_dsi_configure_transcoder(encoder, crtc_state); /* Step 4l: Gate DDI clocks */ - if (DISPLAY_VER(dev_priv) == 11) - gen11_dsi_gate_clocks(encoder); + gen11_dsi_gate_clocks(encoder); } static void gen11_dsi_powerup_panel(struct intel_encoder *encoder) @@ -1232,7 +1230,9 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state, /* step5: program and powerup panel */ gen11_dsi_powerup_panel(encoder); - intel_dsc_enable(encoder, pipe_config); + intel_dsc_dsi_pps_write(encoder, pipe_config); + + intel_dsc_enable(pipe_config); /* step6c: configure transcoder timings */ gen11_dsi_set_transcoder_timings(encoder, pipe_config); @@ -1271,7 +1271,8 @@ static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder) if (DISPLAY_VER(i915) == 13) { for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(i915, TGL_DSI_CHKN_REG(port), - TGL_DSI_CHKN_LSHS_GB, 0x4); + TGL_DSI_CHKN_LSHS_GB_MASK, + TGL_DSI_CHKN_LSHS_GB(4)); } } @@ -1628,7 +1629,7 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, /* FIXME: initialize from VBT */ vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; - ret = intel_dsc_compute_params(encoder, crtc_state); + ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/icl_dsi.h b/drivers/gpu/drm/i915/display/icl_dsi.h new file mode 100644 index 000000000000..b4861b56b5b2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/icl_dsi.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __ICL_DSI_H__ +#define __ICL_DSI_H__ + +struct drm_i915_private; +struct intel_crtc_state; + +void icl_dsi_init(struct drm_i915_private *i915); +void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); + +#endif /* __ICL_DSI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index cdc68fb51ba6..e3a0bfb7be84 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -39,6 +39,7 @@ #include "intel_atomic_plane.h" #include "intel_cdclk.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_pm.h" #include "intel_sprite.h" @@ -469,31 +470,72 @@ skl_next_plane_to_commit(struct intel_atomic_state *state, return NULL; } -void intel_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +void intel_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - trace_intel_update_plane(&plane->base, crtc); + trace_intel_plane_update_noarm(&plane->base, crtc); + + if (plane->update_noarm) + plane->update_noarm(plane, crtc_state, plane_state); +} + +void intel_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + trace_intel_plane_update_arm(&plane->base, crtc); if (crtc_state->uapi.async_flip && plane->async_flip) plane->async_flip(plane, crtc_state, plane_state, true); else - plane->update_plane(plane, crtc_state, plane_state); + plane->update_arm(plane, crtc_state, plane_state); } -void intel_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +void intel_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - trace_intel_disable_plane(&plane->base, crtc); - plane->disable_plane(plane, crtc_state); + trace_intel_plane_disable_arm(&plane->base, crtc); + plane->disable_arm(plane, crtc_state); } -void skl_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc) +void intel_update_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + u32 update_mask = new_crtc_state->update_planes; + struct intel_plane_state *new_plane_state; + struct intel_plane *plane; + int i; + + if (new_crtc_state->uapi.async_flip) + return; + + /* + * Since we only write non-arming registers here, + * the order does not matter even for skl+. + */ + for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) { + if (crtc->pipe != plane->pipe || + !(update_mask & BIT(plane->id))) + continue; + + /* TODO: for mailbox updates this should be skipped */ + if (new_plane_state->uapi.visible || + new_plane_state->planar_slave) + intel_plane_update_noarm(plane, new_crtc_state, new_plane_state); + } +} + +void skl_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -515,17 +557,20 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state, struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, plane); + /* + * TODO: for mailbox updates intel_plane_update_noarm() + * would have to be called here as well. + */ if (new_plane_state->uapi.visible || - new_plane_state->planar_slave) { - intel_update_plane(plane, new_crtc_state, new_plane_state); - } else { - intel_disable_plane(plane, new_crtc_state); - } + new_plane_state->planar_slave) + intel_plane_update_arm(plane, new_crtc_state, new_plane_state); + else + intel_plane_disable_arm(plane, new_crtc_state); } } -void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc) +void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc) { struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); @@ -539,10 +584,14 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; + /* + * TODO: for mailbox updates intel_plane_update_noarm() + * would have to be called here as well. + */ if (new_plane_state->uapi.visible) - intel_update_plane(plane, new_crtc_state, new_plane_state); + intel_plane_update_arm(plane, new_crtc_state, new_plane_state); else - intel_disable_plane(plane, new_crtc_state); + intel_plane_disable_arm(plane, new_crtc_state); } } diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 62e5a2a77fd4..7907f601598e 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -30,20 +30,25 @@ void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state, struct intel_crtc *crtc); void intel_plane_copy_hw_state(struct intel_plane_state *plane_state, const struct intel_plane_state *from_plane_state); -void intel_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state); -void intel_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); +void intel_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +void intel_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); struct intel_plane *intel_plane_alloc(void); void intel_plane_free(struct intel_plane *plane); struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); -void skl_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc); -void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, - struct intel_crtc *crtc); +void intel_update_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void skl_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void i9xx_arm_planes_on_crtc(struct intel_atomic_state *state, + struct intel_crtc *crtc); int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 03e8c05a74f6..03c3111ebdf0 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -62,6 +62,15 @@ * struct &i915_audio_component_audio_ops @audio_ops is called from i915 driver. */ +struct intel_audio_funcs { + void (*audio_codec_enable)(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); + void (*audio_codec_disable)(struct intel_encoder *encoder, + const struct intel_crtc_state *old_crtc_state, + const struct drm_connector_state *old_conn_state); +}; + /* DP N/M table */ #define LC_810M 810000 #define LC_540M 540000 @@ -388,7 +397,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; const struct dp_aud_n_m *nm; @@ -436,7 +445,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; enum port port = encoder->port; int n, rate; @@ -494,7 +503,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n", transcoder_name(cpu_transcoder)); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); /* Disable timestamps */ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder)); @@ -512,7 +521,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder, tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder); intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp); - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); } static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder, @@ -641,7 +650,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, "Enable audio codec on transcoder %s, %u bytes ELD\n", transcoder_name(cpu_transcoder), drm_eld_size(eld)); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); /* Enable Audio WA for 4k DSC usecases */ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP)) @@ -679,7 +688,7 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder, /* Enable timestamps */ hsw_audio_config_update(encoder, crtc_state); - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); } static void ilk_audio_codec_disable(struct intel_encoder *encoder, @@ -826,7 +835,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_connector *connector = conn_state->connector; const struct drm_display_mode *adjusted_mode = @@ -848,17 +857,17 @@ void intel_audio_codec_enable(struct intel_encoder *encoder, connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; - if (dev_priv->audio_funcs) - dev_priv->audio_funcs->audio_codec_enable(encoder, + if (dev_priv->audio.funcs) + dev_priv->audio.funcs->audio_codec_enable(encoder, crtc_state, conn_state); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); encoder->audio_connector = connector; /* referred in audio callbacks */ - dev_priv->av_enc_map[pipe] = encoder; - mutex_unlock(&dev_priv->av_mutex); + dev_priv->audio.encoder_map[pipe] = encoder; + mutex_unlock(&dev_priv->audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { @@ -888,20 +897,20 @@ void intel_audio_codec_disable(struct intel_encoder *encoder, const struct drm_connector_state *old_conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); enum port port = encoder->port; enum pipe pipe = crtc->pipe; - if (dev_priv->audio_funcs) - dev_priv->audio_funcs->audio_codec_disable(encoder, + if (dev_priv->audio.funcs) + dev_priv->audio.funcs->audio_codec_disable(encoder, old_crtc_state, old_conn_state); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); encoder->audio_connector = NULL; - dev_priv->av_enc_map[pipe] = NULL; - mutex_unlock(&dev_priv->av_mutex); + dev_priv->audio.encoder_map[pipe] = NULL; + mutex_unlock(&dev_priv->audio.mutex); if (acomp && acomp->base.audio_ops && acomp->base.audio_ops->pin_eld_notify) { @@ -931,19 +940,53 @@ static const struct intel_audio_funcs hsw_audio_funcs = { }; /** - * intel_init_audio_hooks - Set up chip specific audio hooks + * intel_audio_hooks_init - Set up chip specific audio hooks * @dev_priv: device private */ -void intel_init_audio_hooks(struct drm_i915_private *dev_priv) +void intel_audio_hooks_init(struct drm_i915_private *dev_priv) { if (IS_G4X(dev_priv)) { - dev_priv->audio_funcs = &g4x_audio_funcs; + dev_priv->audio.funcs = &g4x_audio_funcs; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - dev_priv->audio_funcs = &ilk_audio_funcs; + dev_priv->audio.funcs = &ilk_audio_funcs; } else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) { - dev_priv->audio_funcs = &hsw_audio_funcs; + dev_priv->audio.funcs = &hsw_audio_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { - dev_priv->audio_funcs = &ilk_audio_funcs; + dev_priv->audio.funcs = &ilk_audio_funcs; + } +} + +struct aud_ts_cdclk_m_n { + u8 m; + u16 n; +}; + +void intel_audio_cdclk_change_pre(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 13) + intel_de_rmw(i915, AUD_TS_CDCLK_M, AUD_TS_CDCLK_M_EN, 0); +} + +static void get_aud_ts_cdclk_m_n(int refclk, int cdclk, struct aud_ts_cdclk_m_n *aud_ts) +{ + if (refclk == 24000) + aud_ts->m = 12; + else + aud_ts->m = 15; + + aud_ts->n = cdclk * aud_ts->m / 24000; +} + +void intel_audio_cdclk_change_post(struct drm_i915_private *i915) +{ + struct aud_ts_cdclk_m_n aud_ts; + + if (DISPLAY_VER(i915) >= 13) { + get_aud_ts_cdclk_m_n(i915->cdclk.hw.ref, i915->cdclk.hw.cdclk, &aud_ts); + + intel_de_write(i915, AUD_TS_CDCLK_N, aud_ts.n); + intel_de_write(i915, AUD_TS_CDCLK_M, aud_ts.m | AUD_TS_CDCLK_M_EN); + drm_dbg_kms(&i915->drm, "aud_ts_cdclk set to M=%u, N=%u\n", aud_ts.m, aud_ts.n); } } @@ -1014,13 +1057,13 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK); - if (dev_priv->audio_power_refcount++ == 0) { + if (dev_priv->audio.power_refcount++ == 0) { if (DISPLAY_VER(dev_priv) >= 9) { intel_de_write(dev_priv, AUD_FREQ_CNTRL, - dev_priv->audio_freq_cntrl); + dev_priv->audio.freq_cntrl); drm_dbg_kms(&dev_priv->drm, "restored AUD_FREQ_CNTRL to 0x%x\n", - dev_priv->audio_freq_cntrl); + dev_priv->audio.freq_cntrl); } /* Force CDCLK to 2*BCLK as long as we need audio powered. */ @@ -1041,7 +1084,7 @@ static void i915_audio_component_put_power(struct device *kdev, struct drm_i915_private *dev_priv = kdev_to_i915(kdev); /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ - if (--dev_priv->audio_power_refcount == 0) + if (--dev_priv->audio.power_refcount == 0) if (IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, false); @@ -1093,7 +1136,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev) /* * get the intel_encoder according to the parameter port and pipe * intel_encoder is saved by the index of pipe - * MST & (pipe >= 0): return the av_enc_map[pipe], + * MST & (pipe >= 0): return the audio.encoder_map[pipe], * when port is matched * MST & (pipe < 0): this is invalid * Non-MST & (pipe >= 0): only pipe = 0 (the first device entry) @@ -1108,10 +1151,10 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, /* MST */ if (pipe >= 0) { if (drm_WARN_ON(&dev_priv->drm, - pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) + pipe >= ARRAY_SIZE(dev_priv->audio.encoder_map))) return NULL; - encoder = dev_priv->av_enc_map[pipe]; + encoder = dev_priv->audio.encoder_map[pipe]; /* * when bootup, audio driver may not know it is * MST or not. So it will poll all the port & pipe @@ -1127,7 +1170,7 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv, return NULL; for_each_pipe(dev_priv, pipe) { - encoder = dev_priv->av_enc_map[pipe]; + encoder = dev_priv->audio.encoder_map[pipe]; if (encoder == NULL) continue; @@ -1145,7 +1188,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, int pipe, int rate) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); - struct i915_audio_component *acomp = dev_priv->audio_component; + struct i915_audio_component *acomp = dev_priv->audio.component; struct intel_encoder *encoder; struct intel_crtc *crtc; unsigned long cookie; @@ -1155,7 +1198,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, return 0; cookie = i915_audio_component_get_power(kdev); - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); /* 1. get the pipe */ encoder = get_saved_enc(dev_priv, port, pipe); @@ -1174,7 +1217,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port, hsw_audio_config_update(encoder, crtc->config); unlock: - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); i915_audio_component_put_power(kdev, cookie); return err; } @@ -1188,13 +1231,13 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, const u8 *eld; int ret = -EINVAL; - mutex_lock(&dev_priv->av_mutex); + mutex_lock(&dev_priv->audio.mutex); intel_encoder = get_saved_enc(dev_priv, port, pipe); if (!intel_encoder) { drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n", port_name(port)); - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); return ret; } @@ -1206,7 +1249,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port, memcpy(buf, eld, min(max_bytes, ret)); } - mutex_unlock(&dev_priv->av_mutex); + mutex_unlock(&dev_priv->audio.mutex); return ret; } @@ -1241,7 +1284,7 @@ static int i915_audio_component_bind(struct device *i915_kdev, BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) acomp->aud_sample_rate[i] = 0; - dev_priv->audio_component = acomp; + dev_priv->audio.component = acomp; drm_modeset_unlock_all(&dev_priv->drm); return 0; @@ -1256,14 +1299,14 @@ static void i915_audio_component_unbind(struct device *i915_kdev, drm_modeset_lock_all(&dev_priv->drm); acomp->base.ops = NULL; acomp->base.dev = NULL; - dev_priv->audio_component = NULL; + dev_priv->audio.component = NULL; drm_modeset_unlock_all(&dev_priv->drm); device_link_remove(hda_kdev, i915_kdev); - if (dev_priv->audio_power_refcount) + if (dev_priv->audio.power_refcount) drm_err(&dev_priv->drm, "audio power refcount %d after unbind\n", - dev_priv->audio_power_refcount); + dev_priv->audio.power_refcount); } static const struct component_ops i915_audio_component_bind_ops = { @@ -1327,10 +1370,13 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "use AUD_FREQ_CNTRL of 0x%x (init value 0x%x)\n", aud_freq, aud_freq_init); - dev_priv->audio_freq_cntrl = aud_freq; + dev_priv->audio.freq_cntrl = aud_freq; } - dev_priv->audio_component_registered = true; + /* init with current cdclk */ + intel_audio_cdclk_change_post(dev_priv); + + dev_priv->audio.component_registered = true; } /** @@ -1342,11 +1388,11 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) */ static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv) { - if (!dev_priv->audio_component_registered) + if (!dev_priv->audio.component_registered) return; component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops); - dev_priv->audio_component_registered = false; + dev_priv->audio.component_registered = false; } /** @@ -1368,7 +1414,7 @@ void intel_audio_init(struct drm_i915_private *dev_priv) */ void intel_audio_deinit(struct drm_i915_private *dev_priv) { - if ((dev_priv)->lpe_audio.platdev != NULL) + if ((dev_priv)->audio.lpe.platdev != NULL) intel_lpe_audio_teardown(dev_priv); else i915_audio_component_cleanup(dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_audio.h b/drivers/gpu/drm/i915/display/intel_audio.h index a3657c7a7ba2..63b22131dc45 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.h +++ b/drivers/gpu/drm/i915/display/intel_audio.h @@ -11,13 +11,15 @@ struct drm_i915_private; struct intel_crtc_state; struct intel_encoder; -void intel_init_audio_hooks(struct drm_i915_private *dev_priv); +void intel_audio_hooks_init(struct drm_i915_private *dev_priv); void intel_audio_codec_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_audio_codec_disable(struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state); +void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv); +void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv); void intel_audio_init(struct drm_i915_private *dev_priv); void intel_audio_deinit(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 8d9d888e9316..2da4aacc956b 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -27,6 +27,9 @@ struct intel_qgv_info { u8 num_points; u8 num_psf_points; u8 t_bl; + u8 max_numchannels; + u8 channel_width; + u8 deinterleave; }; static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, @@ -42,7 +45,7 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */ else dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */ - sp->dclk = dclk_ratio * dclk_reference; + sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000); val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); if (val & DG1_GEAR_TYPE) @@ -69,6 +72,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, int point) { u32 val = 0, val2 = 0; + u16 dclk; int ret; ret = sandybridge_pcode_read(dev_priv, @@ -78,7 +82,8 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, if (ret) return ret; - sp->dclk = val & 0xffff; + dclk = val & 0xffff; + sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000); sp->t_rp = (val & 0xff0000) >> 16; sp->t_rcd = (val & 0xff000000) >> 24; @@ -133,7 +138,8 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, } static int icl_get_qgv_points(struct drm_i915_private *dev_priv, - struct intel_qgv_info *qi) + struct intel_qgv_info *qi, + bool is_y_tile) { const struct dram_info *dram_info = &dev_priv->dram_info; int i, ret; @@ -141,20 +147,44 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->num_points = dram_info->num_qgv_points; qi->num_psf_points = dram_info->num_psf_gv_points; - if (DISPLAY_VER(dev_priv) == 12) + if (DISPLAY_VER(dev_priv) >= 12) switch (dram_info->type) { case INTEL_DRAM_DDR4: - qi->t_bl = 4; + qi->t_bl = is_y_tile ? 8 : 4; + qi->max_numchannels = 2; + qi->channel_width = 64; + qi->deinterleave = is_y_tile ? 1 : 2; break; case INTEL_DRAM_DDR5: - qi->t_bl = 8; + qi->t_bl = is_y_tile ? 16 : 8; + qi->max_numchannels = 4; + qi->channel_width = 32; + qi->deinterleave = is_y_tile ? 1 : 2; + break; + case INTEL_DRAM_LPDDR4: + if (IS_ROCKETLAKE(dev_priv)) { + qi->t_bl = 8; + qi->max_numchannels = 4; + qi->channel_width = 32; + qi->deinterleave = 2; + break; + } + fallthrough; + case INTEL_DRAM_LPDDR5: + qi->t_bl = 16; + qi->max_numchannels = 8; + qi->channel_width = 16; + qi->deinterleave = is_y_tile ? 2 : 4; break; default: qi->t_bl = 16; + qi->max_numchannels = 1; break; } - else if (DISPLAY_VER(dev_priv) == 11) + else if (DISPLAY_VER(dev_priv) == 11) { qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8; + qi->max_numchannels = 1; + } if (drm_WARN_ON(&dev_priv->drm, qi->num_points > ARRAY_SIZE(qi->points))) @@ -193,12 +223,6 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, return 0; } -static int icl_calc_bw(int dclk, int num, int den) -{ - /* multiples of 16.666MHz (100/6) */ - return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6); -} - static int adl_calc_psf_bw(int clk) { /* @@ -240,7 +264,7 @@ static const struct intel_sa_info tgl_sa_info = { }; static const struct intel_sa_info rkl_sa_info = { - .deburst = 16, + .deburst = 8, .deprogbwlimit = 20, /* GB/s */ .displayrtids = 128, .derating = 10, @@ -265,35 +289,130 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel struct intel_qgv_info qi = {}; bool is_y_tile = true; /* assume y tile may be used */ int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); - int deinterleave; - int ipqdepth, ipqdepthpch; + int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw; + int num_groups = ARRAY_SIZE(dev_priv->max_bw); int i, ret; - ret = icl_get_qgv_points(dev_priv, &qi); + ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); if (ret) { drm_dbg_kms(&dev_priv->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } - deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); dclk_max = icl_sagv_max_dclk(&qi); + maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10); + ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); + qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); + + for (i = 0; i < num_groups; i++) { + struct intel_bw_info *bi = &dev_priv->max_bw[i]; + int clpchgroup; + int j; + + clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; + bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + + bi->num_qgv_points = qi.num_points; + bi->num_psf_gv_points = qi.num_psf_points; + + for (j = 0; j < qi.num_points; j++) { + const struct intel_qgv_point *sp = &qi.points[j]; + int ct, bw; + + /* + * Max row cycle time + * + * FIXME what is the logic behind the + * assumed burst length? + */ + ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + + (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); + bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); - ipqdepthpch = 16; + bi->deratedbw[j] = min(maxdebw, + bw * (100 - sa->derating) / 100); + + drm_dbg_kms(&dev_priv->drm, + "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", + i, j, bi->num_planes, bi->deratedbw[j]); + } + } + /* + * In case if SAGV is disabled in BIOS, we always get 1 + * SAGV point, but we can't send PCode commands to restrict it + * as it will fail and pointless anyway. + */ + if (qi.num_points == 1) + dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; + else + dev_priv->sagv_status = I915_SAGV_ENABLED; + + return 0; +} + +static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) +{ + struct intel_qgv_info qi = {}; + const struct dram_info *dram_info = &dev_priv->dram_info; + bool is_y_tile = true; /* assume y tile may be used */ + int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); + int ipqdepth, ipqdepthpch = 16; + int dclk_max; + int maxdebw, peakbw; + int clperchgroup; + int num_groups = ARRAY_SIZE(dev_priv->max_bw); + int i, ret; + + ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); + if (ret) { + drm_dbg_kms(&dev_priv->drm, + "Failed to get memory subsystem information, ignoring bandwidth limits"); + return ret; + } + + if (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5) + num_channels *= 2; + + qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); + + if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12) + qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1); + + if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels) + drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels."); + if (qi.max_numchannels != 0) + num_channels = min_t(u8, num_channels, qi.max_numchannels); + + dclk_max = icl_sagv_max_dclk(&qi); + + peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max; + maxdebw = min(sa->deprogbwlimit * 1000, peakbw * 6 / 10); /* 60% */ - maxdebw = min(sa->deprogbwlimit * 1000, - icl_calc_bw(dclk_max, 16, 1) * 6 / 10); /* 60% */ ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels); + /* + * clperchgroup = 4kpagespermempage * clperchperblock, + * clperchperblock = 8 / num_channels * interleave + */ + clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave; - for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) { + for (i = 0; i < num_groups; i++) { struct intel_bw_info *bi = &dev_priv->max_bw[i]; + struct intel_bw_info *bi_next; int clpchgroup; int j; - clpchgroup = (sa->deburst * deinterleave / num_channels) << i; - bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + if (i < num_groups - 1) + bi_next = &dev_priv->max_bw[i + 1]; + + clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; + + if (i < num_groups - 1 && clpchgroup < clperchgroup) + bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1; + else + bi_next->num_planes = 0; bi->num_qgv_points = qi.num_points; bi->num_psf_gv_points = qi.num_psf_points; @@ -310,7 +429,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel */ ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd + (clpchgroup - 1) * qi.t_bl + sp->t_rdpre); - bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct); + bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct); bi->deratedbw[j] = min(maxdebw, bw * (100 - sa->derating) / 100); @@ -329,9 +448,6 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel "BW%d / PSF GV %d: num_planes=%d bw=%u\n", i, j, bi->num_planes, bi->psf_bw[j]); } - - if (bi->num_planes == 1) - break; } /* @@ -395,6 +511,34 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, return 0; } +static unsigned int tgl_max_bw(struct drm_i915_private *dev_priv, + int num_planes, int qgv_point) +{ + int i; + + /* + * Let's return max bw for 0 planes + */ + num_planes = max(1, num_planes); + + for (i = ARRAY_SIZE(dev_priv->max_bw) - 1; i >= 0; i--) { + const struct intel_bw_info *bi = + &dev_priv->max_bw[i]; + + /* + * Pcode will not expose all QGV points when + * SAGV is forced to off/min/med/max. + */ + if (qgv_point >= bi->num_qgv_points) + return UINT_MAX; + + if (num_planes <= bi->num_planes) + return bi->deratedbw[qgv_point]; + } + + return dev_priv->max_bw[0].deratedbw[qgv_point]; +} + static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, int psf_gv_point) { @@ -412,13 +556,13 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv) if (IS_DG2(dev_priv)) dg2_get_bw_info(dev_priv); else if (IS_ALDERLAKE_P(dev_priv)) - icl_get_bw_info(dev_priv, &adlp_sa_info); + tgl_get_bw_info(dev_priv, &adlp_sa_info); else if (IS_ALDERLAKE_S(dev_priv)) - icl_get_bw_info(dev_priv, &adls_sa_info); + tgl_get_bw_info(dev_priv, &adls_sa_info); else if (IS_ROCKETLAKE(dev_priv)) - icl_get_bw_info(dev_priv, &rkl_sa_info); + tgl_get_bw_info(dev_priv, &rkl_sa_info); else if (DISPLAY_VER(dev_priv) == 12) - icl_get_bw_info(dev_priv, &tgl_sa_info); + tgl_get_bw_info(dev_priv, &tgl_sa_info); else if (DISPLAY_VER(dev_priv) == 11) icl_get_bw_info(dev_priv, &icl_sa_info); } @@ -490,7 +634,7 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) data_rate += bw_state->data_rate[pipe]; - if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active()) + if (DISPLAY_VER(dev_priv) >= 13 && intel_vtd_active(dev_priv)) data_rate = data_rate * 105 / 100; return data_rate; @@ -746,7 +890,10 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) for (i = 0; i < num_qgv_points; i++) { unsigned int max_data_rate; - max_data_rate = icl_max_bw(dev_priv, num_active_planes, i); + if (DISPLAY_VER(dev_priv) > 11) + max_data_rate = tgl_max_bw(dev_priv, num_active_planes, i); + else + max_data_rate = icl_max_bw(dev_priv, num_active_planes, i); /* * We need to know which qgv point gives us * maximum bandwidth in order to disable SAGV diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 868dd43a7542..91c19e0a98d7 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -24,6 +24,7 @@ #include <linux/time.h> #include "intel_atomic.h" +#include "intel_audio.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_de.h" @@ -1975,6 +1976,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_psr_pause(intel_dp); } + intel_audio_cdclk_change_pre(dev_priv); + /* * Lock aux/gmbus while we change cdclk in case those * functions use cdclk. Not all platforms/ports do, @@ -2003,6 +2006,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, intel_psr_resume(intel_dp); } + intel_audio_cdclk_change_post(dev_priv); + if (drm_WARN(&dev_priv->drm, intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index 5359b7305a78..840f13b75492 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -26,7 +26,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dpll.h" -#include "intel_dsi.h" +#include "vlv_dsi_pll.h" #define CTM_COEFF_SIGN (1ULL << 63) @@ -552,8 +552,8 @@ static void i9xx_load_lut_8(struct intel_crtc *crtc, lut = blob->data; for (i = 0; i < 256; i++) - intel_de_write(dev_priv, PALETTE(pipe, i), - i9xx_lut_8(&lut[i])); + intel_de_write_fw(dev_priv, PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); } static void i9xx_load_luts(const struct intel_crtc_state *crtc_state) @@ -576,15 +576,15 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size - 1; i++) { - intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0), - i965_lut_10p6_ldw(&lut[i])); - intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1), - i965_lut_10p6_udw(&lut[i])); + intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 0), + i965_lut_10p6_ldw(&lut[i])); + intel_de_write_fw(dev_priv, PALETTE(pipe, 2 * i + 1), + i965_lut_10p6_udw(&lut[i])); } - intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); - intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); - intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red); + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green); + intel_de_write_fw(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue); } static void i965_load_luts(const struct intel_crtc_state *crtc_state) @@ -618,8 +618,8 @@ static void ilk_load_lut_8(struct intel_crtc *crtc, lut = blob->data; for (i = 0; i < 256; i++) - intel_de_write(dev_priv, LGC_PALETTE(pipe, i), - i9xx_lut_8(&lut[i])); + intel_de_write_fw(dev_priv, LGC_PALETTE(pipe, i), + i9xx_lut_8(&lut[i])); } static void ilk_load_lut_10(struct intel_crtc *crtc, @@ -631,8 +631,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) - intel_de_write(dev_priv, PREC_PALETTE(pipe, i), - ilk_lut_10(&lut[i])); + intel_de_write_fw(dev_priv, PREC_PALETTE(pipe, i), + ilk_lut_10(&lut[i])); } static void ilk_load_luts(const struct intel_crtc_state *crtc_state) @@ -681,16 +681,16 @@ static void ivb_load_lut_10(struct intel_crtc *crtc, const struct drm_color_lut *entry = &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++); - intel_de_write(dev_priv, PREC_PAL_DATA(pipe), - ilk_lut_10(entry)); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), prec_index++); + intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe), + ilk_lut_10(entry)); } /* * Reset the index, otherwise it prevents the legacy palette to be * written properly. */ - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); } /* On BDW+ the index auto increment mode actually works */ @@ -704,23 +704,23 @@ static void bdw_load_lut_10(struct intel_crtc *crtc, int i, lut_size = drm_color_lut_size(blob); enum pipe pipe = crtc->pipe; - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), - prec_index | PAL_PREC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), + prec_index | PAL_PREC_AUTO_INCREMENT); for (i = 0; i < hw_lut_size; i++) { /* We discard half the user entries in split gamma mode */ const struct drm_color_lut *entry = &lut[i * (lut_size - 1) / (hw_lut_size - 1)]; - intel_de_write(dev_priv, PREC_PAL_DATA(pipe), - ilk_lut_10(entry)); + intel_de_write_fw(dev_priv, PREC_PAL_DATA(pipe), + ilk_lut_10(entry)); } /* * Reset the index, otherwise it prevents the legacy palette to be * written properly. */ - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); } static void ivb_load_lut_ext_max(const struct intel_crtc_state *crtc_state) @@ -821,9 +821,9 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) * ignore the index bits, so we need to reset it to index 0 * separately. */ - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), - PRE_CSC_GAMC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + PRE_CSC_GAMC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { /* @@ -839,15 +839,15 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state) * ToDo: Extend to max 7.0. Enable 32 bit input value * as compared to just 16 to achieve this. */ - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), - lut[i].green); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), + lut[i].green); } /* Clamp values > 1.0. */ while (i++ < 35) - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); } static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state) @@ -862,21 +862,21 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat * ignore the index bits, so we need to reset it to index 0 * separately. */ - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), - PRE_CSC_GAMC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), + PRE_CSC_GAMC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { u32 v = (i << 16) / (lut_size - 1); - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), v); } /* Clamp values > 1.0. */ while (i++ < 35) - intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16); - intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0); } static void glk_load_luts(const struct intel_crtc_state *crtc_state) @@ -1071,10 +1071,10 @@ static void chv_load_cgm_degamma(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0), - chv_cgm_degamma_ldw(&lut[i])); - intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1), - chv_cgm_degamma_udw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0), + chv_cgm_degamma_ldw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1), + chv_cgm_degamma_udw(&lut[i])); } } @@ -1105,10 +1105,10 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; for (i = 0; i < lut_size; i++) { - intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0), - chv_cgm_gamma_ldw(&lut[i])); - intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1), - chv_cgm_gamma_udw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0), + chv_cgm_gamma_ldw(&lut[i])); + intel_de_write_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1), + chv_cgm_gamma_udw(&lut[i])); } } @@ -1131,8 +1131,8 @@ static void chv_load_luts(const struct intel_crtc_state *crtc_state) else i965_load_luts(crtc_state); - intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe), - crtc_state->cgm_mode); + intel_de_write_fw(dev_priv, CGM_PIPE_MODE(crtc->pipe), + crtc_state->cgm_mode); } void intel_color_load_luts(const struct intel_crtc_state *crtc_state) @@ -1808,7 +1808,7 @@ static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read(dev_priv, PALETTE(pipe, i)); + u32 val = intel_de_read_fw(dev_priv, PALETTE(pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -1843,15 +1843,15 @@ static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size - 1; i++) { - u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0)); - u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1)); + u32 ldw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 0)); + u32 udw = intel_de_read_fw(dev_priv, PALETTE(pipe, 2 * i + 1)); i965_lut_10p6_pack(&lut[i], ldw, udw); } - lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0))); - lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1))); - lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2))); + lut[i].red = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 0))); + lut[i].green = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 1))); + lut[i].blue = i965_lut_11p6_max_pack(intel_de_read_fw(dev_priv, PIPEGCMAX(pipe, 2))); return blob; } @@ -1886,8 +1886,8 @@ static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); - u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); + u32 ldw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0)); + u32 udw = intel_de_read_fw(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1)); chv_cgm_gamma_pack(&lut[i], ldw, udw); } @@ -1922,7 +1922,7 @@ static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < LEGACY_LUT_LENGTH; i++) { - u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i)); + u32 val = intel_de_read_fw(dev_priv, LGC_PALETTE(pipe, i)); i9xx_lut_8_pack(&lut[i], val); } @@ -1947,7 +1947,7 @@ static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc) lut = blob->data; for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i)); + u32 val = intel_de_read_fw(dev_priv, PREC_PALETTE(pipe, i)); ilk_lut_10_pack(&lut[i], val); } @@ -1999,16 +1999,16 @@ static struct drm_property_blob *bdw_read_lut_10(struct intel_crtc *crtc, lut = blob->data; - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), - prec_index | PAL_PREC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), + prec_index | PAL_PREC_AUTO_INCREMENT); for (i = 0; i < lut_size; i++) { - u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe)); + u32 val = intel_de_read_fw(dev_priv, PREC_PAL_DATA(pipe)); ilk_lut_10_pack(&lut[i], val); } - intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_INDEX(pipe), 0); return blob; } @@ -2050,17 +2050,17 @@ icl_read_lut_multi_segment(struct intel_crtc *crtc) lut = blob->data; - intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), - PAL_PREC_AUTO_INCREMENT); + intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), + PAL_PREC_AUTO_INCREMENT); for (i = 0; i < 9; i++) { - u32 ldw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); - u32 udw = intel_de_read(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 ldw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); + u32 udw = intel_de_read_fw(dev_priv, PREC_PAL_MULTI_SEG_DATA(pipe)); icl_lut_multi_seg_pack(&lut[i], ldw, udw); } - intel_de_write(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0); + intel_de_write_fw(dev_priv, PREC_PAL_MULTI_SEG_INDEX(pipe), 0); /* * FIXME readouts from PAL_PREC_DATA register aren't giving diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index 634e8d449457..f628e0542933 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -301,7 +301,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv, val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy)); val &= ~PWR_DOWN_LN_MASK; - val |= lane_mask << PWR_DOWN_LN_SHIFT; + val |= lane_mask; intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val); } diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 1c161eeed82f..f0f28572dfdc 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -45,6 +45,7 @@ #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hotplug.h" +#include "intel_pch_display.h" /* Here's the desired hotplug mode */ #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ @@ -143,7 +144,7 @@ static void intel_crt_get_config(struct intel_encoder *encoder, static void hsw_crt_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + lpt_pch_get_config(pipe_config); hsw_ddi_get_config(encoder, pipe_config); @@ -152,8 +153,6 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC); pipe_config->hw.adjusted_mode.flags |= intel_crt_get_flags(encoder); - - pipe_config->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); } /* Note: The caller is required to filter out dpms modes not supported by the @@ -247,6 +246,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); intel_crtc_vblank_off(old_crtc_state); @@ -261,10 +261,9 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state, pch_post_disable_crt(state, encoder, old_crtc_state, old_conn_state); - lpt_disable_pch_transcoder(dev_priv); - lpt_disable_iclkip(dev_priv); + lpt_pch_disable(state, crtc); - intel_ddi_fdi_post_disable(state, encoder, old_crtc_state, old_conn_state); + hsw_fdi_disable(encoder); drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder); @@ -316,7 +315,7 @@ static void hsw_enable_crt(struct intel_atomic_state *state, intel_enable_transcoder(crtc_state); - lpt_pch_enable(crtc_state); + lpt_pch_enable(state, crtc); intel_crtc_vblank_on(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 254e67141a77..243d5cc29734 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -3,16 +3,18 @@ * Copyright © 2020 Intel Corporation */ #include <linux/kernel.h> +#include <linux/pm_qos.h> #include <linux/slab.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_plane.h> #include <drm/drm_plane_helper.h> +#include <drm/drm_vblank_work.h> #include "i915_trace.h" #include "i915_vgpu.h" - +#include "icl_dsi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" #include "intel_color.h" @@ -167,6 +169,8 @@ static void intel_crtc_destroy(struct drm_crtc *_crtc) { struct intel_crtc *crtc = to_intel_crtc(_crtc); + cpu_latency_qos_remove_request(&crtc->vblank_pm_qos); + drm_crtc_cleanup(&crtc->base); kfree(crtc); } @@ -344,6 +348,8 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) intel_crtc_crc_init(crtc); + cpu_latency_qos_add_request(&crtc->vblank_pm_qos, PM_QOS_DEFAULT_VALUE); + drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe); return 0; @@ -354,6 +360,65 @@ fail: return ret; } +static bool intel_crtc_needs_vblank_work(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->hw.active && + !intel_crtc_needs_modeset(crtc_state) && + !crtc_state->preload_luts && + (crtc_state->uapi.color_mgmt_changed || + crtc_state->update_pipe); +} + +static void intel_crtc_vblank_work(struct kthread_work *base) +{ + struct drm_vblank_work *work = to_drm_vblank_work(base); + struct intel_crtc_state *crtc_state = + container_of(work, typeof(*crtc_state), vblank_work); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + trace_intel_crtc_vblank_work_start(crtc); + + intel_color_load_luts(crtc_state); + + if (crtc_state->uapi.event) { + spin_lock_irq(&crtc->base.dev->event_lock); + drm_crtc_send_vblank_event(&crtc->base, crtc_state->uapi.event); + crtc_state->uapi.event = NULL; + spin_unlock_irq(&crtc->base.dev->event_lock); + } + + trace_intel_crtc_vblank_work_end(crtc); +} + +static void intel_crtc_vblank_work_init(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + drm_vblank_work_init(&crtc_state->vblank_work, &crtc->base, + intel_crtc_vblank_work); + /* + * Interrupt latency is critical for getting the vblank + * work executed as early as possible during the vblank. + */ + cpu_latency_qos_update_request(&crtc->vblank_pm_qos, 0); +} + +void intel_wait_for_vblank_workers(struct intel_atomic_state *state) +{ + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + if (!intel_crtc_needs_vblank_work(crtc_state)) + continue; + + drm_vblank_work_flush(&crtc_state->vblank_work); + cpu_latency_qos_update_request(&crtc->vblank_pm_qos, + PM_QOS_DEFAULT_VALUE); + } +} + int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -387,7 +452,7 @@ static int intel_mode_vblank_start(const struct drm_display_mode *mode) * until a subsequent call to intel_pipe_update_end(). That is done to * avoid random delays. */ -void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) +void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -402,10 +467,17 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state) if (new_crtc_state->uapi.async_flip) return; - if (new_crtc_state->vrr.enable) - vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); - else + if (intel_crtc_needs_vblank_work(new_crtc_state)) + intel_crtc_vblank_work_init(new_crtc_state); + + if (new_crtc_state->vrr.enable) { + if (intel_vrr_is_push_sent(new_crtc_state)) + vblank_start = intel_vrr_vmin_vblank_start(new_crtc_state); + else + vblank_start = intel_vrr_vmax_vblank_start(new_crtc_state); + } else { vblank_start = intel_mode_vblank_start(adjusted_mode); + } /* FIXME needs to be calibrated sensibly */ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, @@ -554,7 +626,11 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) * Would be slightly nice to just grab the vblank count and arm the * event outside of the critical section - the spinlock might spin for a * while ... */ - if (new_crtc_state->uapi.event) { + if (intel_crtc_needs_vblank_work(new_crtc_state)) { + drm_vblank_work_schedule(&new_crtc_state->vblank_work, + drm_crtc_accurate_vblank_count(&crtc->base) + 1, + false); + } else if (new_crtc_state->uapi.event) { drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base) != 0); @@ -566,11 +642,24 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) new_crtc_state->uapi.event = NULL; } - local_irq_enable(); - - /* Send VRR Push to terminate Vblank */ + /* + * Send VRR Push to terminate Vblank. If we are already in vblank + * this has to be done _after_ sampling the frame counter, as + * otherwise the push would immediately terminate the vblank and + * the sampled frame counter would correspond to the next frame + * instead of the current frame. + * + * There is a tiny race here (iff vblank evasion failed us) where + * we might sample the frame counter just before vmax vblank start + * but the push would be sent just after it. That would cause the + * push to affect the next frame instead of the current frame, + * which would cause the next frame to terminate already at vmin + * vblank start instead of vmax vblank start. + */ intel_vrr_send_push(new_crtc_state); + local_irq_enable(); + if (intel_vgpu_active(dev_priv)) return; diff --git a/drivers/gpu/drm/i915/display/intel_crtc.h b/drivers/gpu/drm/i915/display/intel_crtc.h index a5ae997581aa..a0039fdb1eb0 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.h +++ b/drivers/gpu/drm/i915/display/intel_crtc.h @@ -9,10 +9,14 @@ #include <linux/types.h> enum pipe; +struct drm_display_mode; struct drm_i915_private; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, + int usecs); u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state); int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe); struct intel_crtc_state *intel_crtc_state_alloc(struct intel_crtc *crtc); @@ -21,5 +25,8 @@ void intel_crtc_state_reset(struct intel_crtc_state *crtc_state, u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc); void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state); void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state); +void intel_pipe_update_start(struct intel_crtc_state *new_crtc_state); +void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); +void intel_wait_for_vblank_workers(struct intel_atomic_state *state); #endif diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 11842f212613..16d34685d83f 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -28,11 +28,6 @@ static const u32 intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -static const u64 cursor_format_modifiers[] = { - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - static u32 intel_cursor_base(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = @@ -195,7 +190,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, { return CURSOR_ENABLE | CURSOR_FORMAT_ARGB | - CURSOR_STRIDE(plane_state->view.color_plane[0].stride); + CURSOR_STRIDE(plane_state->view.color_plane[0].mapping_stride); } static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) @@ -234,7 +229,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, } drm_WARN_ON(&i915->drm, plane_state->uapi.visible && - plane_state->view.color_plane[0].stride != fb->pitches[0]); + plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]); switch (fb->pitches[0]) { case 256: @@ -253,9 +248,10 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, return 0; } -static void i845_update_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +/* TODO: split into noarm+arm pair */ +static void i845_cursor_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); u32 cntl = 0, base = 0, pos = 0, size = 0; @@ -298,10 +294,10 @@ static void i845_update_cursor(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void i845_disable_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +static void i845_cursor_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - i845_update_cursor(plane, crtc_state, NULL); + i845_cursor_update_arm(plane, crtc_state, NULL); } static bool i845_cursor_get_hw_state(struct intel_plane *plane, @@ -455,7 +451,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, } drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && - plane_state->view.color_plane[0].stride != fb->pitches[0]); + plane_state->view.color_plane[0].mapping_stride != fb->pitches[0]); if (fb->pitches[0] != drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { @@ -488,9 +484,10 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, return 0; } -static void i9xx_update_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +/* TODO: split into noarm+arm pair */ +static void i9xx_cursor_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -562,10 +559,10 @@ static void i9xx_update_cursor(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void i9xx_disable_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +static void i9xx_cursor_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { - i9xx_update_cursor(plane, crtc_state, NULL); + i9xx_cursor_update_arm(plane, crtc_state, NULL); } static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, @@ -605,8 +602,10 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - return modifier == DRM_FORMAT_MOD_LINEAR && - format == DRM_FORMAT_ARGB8888; + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) + return false; + + return format == DRM_FORMAT_ARGB8888; } static int @@ -717,10 +716,12 @@ intel_legacy_cursor_update(struct drm_plane *_plane, */ crtc_state->active_planes = new_crtc_state->active_planes; - if (new_plane_state->uapi.visible) - intel_update_plane(plane, crtc_state, new_plane_state); - else - intel_disable_plane(plane, crtc_state); + if (new_plane_state->uapi.visible) { + intel_plane_update_noarm(plane, crtc_state, new_plane_state); + intel_plane_update_arm(plane, crtc_state, new_plane_state); + } else { + intel_plane_disable_arm(plane, crtc_state); + } intel_plane_unpin_fb(old_plane_state); @@ -754,6 +755,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, { struct intel_plane *cursor; int ret, zpos; + u64 *modifiers; cursor = intel_plane_alloc(); if (IS_ERR(cursor)) @@ -766,14 +768,14 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { cursor->max_stride = i845_cursor_max_stride; - cursor->update_plane = i845_update_cursor; - cursor->disable_plane = i845_disable_cursor; + cursor->update_arm = i845_cursor_update_arm; + cursor->disable_arm = i845_cursor_disable_arm; cursor->get_hw_state = i845_cursor_get_hw_state; cursor->check_plane = i845_check_cursor; } else { cursor->max_stride = i9xx_cursor_max_stride; - cursor->update_plane = i9xx_update_cursor; - cursor->disable_plane = i9xx_disable_cursor; + cursor->update_arm = i9xx_cursor_update_arm; + cursor->disable_arm = i9xx_cursor_disable_arm; cursor->get_hw_state = i9xx_cursor_get_hw_state; cursor->check_plane = i9xx_check_cursor; } @@ -784,13 +786,18 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) cursor->cursor.size = ~0; + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_NONE); + ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 0, &intel_cursor_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), - cursor_format_modifiers, + modifiers, DRM_PLANE_TYPE_CURSOR, "cursor %c", pipe_name(pipe)); + + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index cfb567df71b3..f9e7e3d1c7d0 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -321,10 +321,11 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config) { int dotclock; + /* CRT dotclock is determined via other means */ if (pipe_config->has_pch_encoder) - dotclock = intel_dotclock_calculate(pipe_config->port_clock, - &pipe_config->fdi_m_n); - else if (intel_crtc_has_dp_encoder(pipe_config)) + return; + + if (intel_crtc_has_dp_encoder(pipe_config)) dotclock = intel_dotclock_calculate(pipe_config->port_clock, &pipe_config->dp_m_n); else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24) @@ -1039,7 +1040,6 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - int level = intel_ddi_level(encoder, crtc_state, 0); const struct intel_ddi_buf_trans *trans; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); int n_entries, ln; @@ -1068,32 +1068,36 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder, intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val); /* Program PORT_TX_DW2 */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy)); - val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | - RCOMP_SCALAR_MASK); - val |= SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel); - val |= SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel); - /* Program Rcomp scalar for every table entry */ - val |= RCOMP_SCALAR(0x98); - intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val); + for (ln = 0; ln < 4; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); + + intel_de_rmw(dev_priv, ICL_PORT_TX_DW2_LN(ln, phy), + SWING_SEL_UPPER_MASK | SWING_SEL_LOWER_MASK | RCOMP_SCALAR_MASK, + SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel) | + SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel) | + RCOMP_SCALAR(0x98)); + } /* Program PORT_TX_DW4 */ /* We cannot write to GRP. It would overwrite individual loadgen. */ for (ln = 0; ln < 4; ln++) { - val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); - val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | - CURSOR_COEFF_MASK); - val |= POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1); - val |= POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2); - val |= CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff); - intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); + int level = intel_ddi_level(encoder, crtc_state, ln); + + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), + POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | CURSOR_COEFF_MASK, + POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1) | + POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2) | + CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff)); } /* Program PORT_TX_DW7 */ - val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN(0, phy)); - val &= ~N_SCALAR_MASK; - val |= N_SCALAR(trans->entries[level].icl.dw7_n_scalar); - intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val); + for (ln = 0; ln < 4; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); + + intel_de_rmw(dev_priv, ICL_PORT_TX_DW7_LN(ln, phy), + N_SCALAR_MASK, + N_SCALAR(trans->entries[level].icl.dw7_n_scalar)); + } } static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, @@ -1124,16 +1128,14 @@ static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder, * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0) */ for (ln = 0; ln < 4; ln++) { - val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy)); - val &= ~LOADGEN_SELECT; - val |= icl_combo_phy_loadgen_select(crtc_state, ln); - intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val); + intel_de_rmw(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), + LOADGEN_SELECT, + icl_combo_phy_loadgen_select(crtc_state, ln)); } /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */ - val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy)); - val |= SUS_CLOCK_CONFIG; - intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val); + intel_de_rmw(dev_priv, ICL_PORT_CL_DW5(phy), + 0, SUS_CLOCK_CONFIG); /* 4. Clear training enable to change swing values */ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy)); @@ -1154,10 +1156,8 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); - int level = intel_ddi_level(encoder, crtc_state, 0); const struct intel_ddi_buf_trans *trans; int n_entries, ln; - u32 val; if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) return; @@ -1166,53 +1166,51 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; - /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port)); - val &= ~CRI_USE_FS32; - intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port)); - val &= ~CRI_USE_FS32; - intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val); + intel_de_rmw(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), + CRI_USE_FS32, 0); + intel_de_rmw(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), + CRI_USE_FS32, 0); } /* Program MG_TX_SWINGCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port)); - val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; - val |= CRI_TXDEEMPH_OVERRIDE_17_12( - trans->entries[level].mg.cri_txdeemph_override_17_12); - intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port)); - val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; - val |= CRI_TXDEEMPH_OVERRIDE_17_12( - trans->entries[level].mg.cri_txdeemph_override_17_12); - intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val); + int level; + + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); + + intel_de_rmw(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_17_12_MASK, + CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); + + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); + + intel_de_rmw(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_17_12_MASK, + CRI_TXDEEMPH_OVERRIDE_17_12(trans->entries[level].mg.cri_txdeemph_override_17_12)); } /* Program MG_TX_DRVCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port)); - val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | - CRI_TXDEEMPH_OVERRIDE_5_0_MASK); - val |= CRI_TXDEEMPH_OVERRIDE_5_0( - trans->entries[level].mg.cri_txdeemph_override_5_0) | - CRI_TXDEEMPH_OVERRIDE_11_6( - trans->entries[level].mg.cri_txdeemph_override_11_6) | - CRI_TXDEEMPH_OVERRIDE_EN; - intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port)); - val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | - CRI_TXDEEMPH_OVERRIDE_5_0_MASK); - val |= CRI_TXDEEMPH_OVERRIDE_5_0( - trans->entries[level].mg.cri_txdeemph_override_5_0) | - CRI_TXDEEMPH_OVERRIDE_11_6( - trans->entries[level].mg.cri_txdeemph_override_11_6) | - CRI_TXDEEMPH_OVERRIDE_EN; - intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val); + int level; + + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); + + intel_de_rmw(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK, + CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_EN); + + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); + + intel_de_rmw(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), + CRI_TXDEEMPH_OVERRIDE_11_6_MASK | + CRI_TXDEEMPH_OVERRIDE_5_0_MASK, + CRI_TXDEEMPH_OVERRIDE_11_6(trans->entries[level].mg.cri_txdeemph_override_11_6) | + CRI_TXDEEMPH_OVERRIDE_5_0(trans->entries[level].mg.cri_txdeemph_override_5_0) | + CRI_TXDEEMPH_OVERRIDE_EN); /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ } @@ -1223,50 +1221,34 @@ static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder, * values from table for which TX1 and TX2 enabled. */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port)); - if (crtc_state->port_clock < 300000) - val |= CFG_LOW_RATE_LKREN_EN; - else - val &= ~CFG_LOW_RATE_LKREN_EN; - intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val); + intel_de_rmw(dev_priv, MG_CLKHUB(ln, tc_port), + CFG_LOW_RATE_LKREN_EN, + crtc_state->port_clock < 300000 ? CFG_LOW_RATE_LKREN_EN : 0); } /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port)); - val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; - if (crtc_state->port_clock <= 500000) { - val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; - } else { - val |= CFG_AMI_CK_DIV_OVERRIDE_EN | - CFG_AMI_CK_DIV_OVERRIDE_VAL(1); - } - intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val); - - val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port)); - val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; - if (crtc_state->port_clock <= 500000) { - val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; - } else { - val |= CFG_AMI_CK_DIV_OVERRIDE_EN | - CFG_AMI_CK_DIV_OVERRIDE_VAL(1); - } - intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val); + intel_de_rmw(dev_priv, MG_TX1_DCC(ln, tc_port), + CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | + CFG_AMI_CK_DIV_OVERRIDE_EN, + crtc_state->port_clock > 500000 ? + CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | + CFG_AMI_CK_DIV_OVERRIDE_EN : 0); + + intel_de_rmw(dev_priv, MG_TX2_DCC(ln, tc_port), + CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK | + CFG_AMI_CK_DIV_OVERRIDE_EN, + crtc_state->port_clock > 500000 ? + CFG_AMI_CK_DIV_OVERRIDE_VAL(1) | + CFG_AMI_CK_DIV_OVERRIDE_EN : 0); } /* Program MG_TX_PISO_READLOAD with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = intel_de_read(dev_priv, - MG_TX1_PISO_READLOAD(ln, tc_port)); - val |= CRI_CALCINIT; - intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), - val); - - val = intel_de_read(dev_priv, - MG_TX2_PISO_READLOAD(ln, tc_port)); - val |= CRI_CALCINIT; - intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), - val); + intel_de_rmw(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port), + 0, CRI_CALCINIT); + intel_de_rmw(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port), + 0, CRI_CALCINIT); } } @@ -1275,9 +1257,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); - int level = intel_ddi_level(encoder, crtc_state, 0); const struct intel_ddi_buf_trans *trans; - u32 val, dpcnt_mask, dpcnt_val; int n_entries, ln; if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder))) @@ -1287,33 +1267,36 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans)) return; - dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK | - DKL_TX_DE_EMPAHSIS_COEFF_MASK | - DKL_TX_VSWING_CONTROL_MASK); - dpcnt_val = DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing); - dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis); - dpcnt_val |= DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot); - for (ln = 0; ln < 2; ln++) { + int level; + intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0); - /* All the registers are RMW */ - val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port)); - val &= ~dpcnt_mask; - val |= dpcnt_val; - intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val); + level = intel_ddi_level(encoder, crtc_state, 2*ln+0); + + intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), + DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK, + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); + + level = intel_ddi_level(encoder, crtc_state, 2*ln+1); - val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port)); - val &= ~dpcnt_mask; - val |= dpcnt_val; - intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val); + intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), + DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK, + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); - val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port)); - val &= ~DKL_TX_DP20BITMODE; - intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val); + intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), + DKL_TX_DP20BITMODE, 0); } } @@ -1938,7 +1921,7 @@ void intel_ddi_enable_clock(struct intel_encoder *encoder, encoder->enable_clock(encoder, crtc_state); } -static void intel_ddi_disable_clock(struct intel_encoder *encoder) +void intel_ddi_disable_clock(struct intel_encoder *encoder) { if (encoder->disable_clock) encoder->disable_clock(encoder); @@ -2385,7 +2368,10 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 5.k Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); - intel_dsc_enable(encoder, crtc_state); + + intel_dsc_dp_pps_write(encoder, crtc_state); + + intel_dsc_enable(crtc_state); } static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2519,8 +2505,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 7.l Configure and enable FEC if needed */ intel_ddi_enable_fec(encoder, crtc_state); + + intel_dsc_dp_pps_write(encoder, crtc_state); + if (!crtc_state->bigjoiner) - intel_dsc_enable(encoder, crtc_state); + intel_dsc_enable(crtc_state); } static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2585,8 +2574,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, if (!is_mst) intel_ddi_enable_pipe_clock(encoder, crtc_state); + intel_dsc_dp_pps_write(encoder, crtc_state); + if (!crtc_state->bigjoiner) - intel_dsc_enable(encoder, crtc_state); + intel_dsc_enable(crtc_state); } static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2824,12 +2815,10 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, } if (old_crtc_state->bigjoiner_linked_crtc) { - struct intel_atomic_state *state = - to_intel_atomic_state(old_crtc_state->uapi.state); - struct intel_crtc *slave = + struct intel_crtc *slave_crtc = old_crtc_state->bigjoiner_linked_crtc; const struct intel_crtc_state *old_slave_crtc_state = - intel_atomic_get_old_crtc_state(state, slave); + intel_atomic_get_old_crtc_state(state, slave_crtc); intel_crtc_vblank_off(old_slave_crtc_state); @@ -2866,41 +2855,6 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, intel_tc_port_put_link(dig_port); } -void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - u32 val; - - /* - * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) - * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, - * step 13 is the correct place for it. Step 18 is where it was - * originally before the BUN. - */ - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_RX_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - - intel_disable_ddi_buf(encoder, old_crtc_state); - intel_ddi_disable_clock(encoder); - - val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); - val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); - val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); - intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); - - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_PCDCLK; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); - - val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - val &= ~FDI_RX_PLL_ENABLE; - intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); -} - static void trans_port_sync_stop_link_train(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) @@ -3095,6 +3049,12 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state, intel_dp->link_trained = false; + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); + + intel_drrs_disable(intel_dp, old_crtc_state); + intel_psr_disable(intel_dp, old_crtc_state); intel_edp_backlight_off(old_conn_state); /* Disable the decompression in DP Sink */ intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state, @@ -3112,6 +3072,10 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state, struct drm_i915_private *i915 = to_i915(encoder->base.dev); struct drm_connector *connector = old_conn_state->connector; + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); + if (!intel_hdmi_handle_sink_scrambling(encoder, connector, false, false)) drm_dbg_kms(&i915->drm, @@ -3119,25 +3083,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state, connector->base.id, connector->name); } -static void intel_pre_disable_ddi(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - struct intel_dp *intel_dp; - - if (old_crtc_state->has_audio) - intel_audio_codec_disable(encoder, old_crtc_state, - old_conn_state); - - if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI)) - return; - - intel_dp = enc_to_intel_dp(encoder); - intel_drrs_disable(intel_dp, old_crtc_state); - intel_psr_disable(intel_dp, old_crtc_state); -} - static void intel_disable_ddi(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, @@ -3195,8 +3140,14 @@ intel_ddi_update_prepare(struct intel_atomic_state *state, intel_tc_port_get_link(enc_to_dig_port(encoder), required_lanes); - if (crtc_state && crtc_state->hw.active) + if (crtc_state && crtc_state->hw.active) { + struct intel_crtc *slave_crtc = crtc_state->bigjoiner_linked_crtc; + intel_update_active_dpll(state, crtc, encoder); + + if (slave_crtc) + intel_update_active_dpll(state, slave_crtc, encoder); + } } static void @@ -3552,18 +3503,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder))) return; - if (pipe_config->bigjoiner_slave) { - /* read out pipe settings from master */ - enum transcoder save = pipe_config->cpu_transcoder; - - /* Our own transcoder needs to be disabled when reading it in intel_ddi_read_func_ctl() */ - WARN_ON(pipe_config->output_types); - pipe_config->cpu_transcoder = (enum transcoder)pipe_config->bigjoiner_linked_crtc->pipe; - intel_ddi_read_func_ctl(encoder, pipe_config); - pipe_config->cpu_transcoder = save; - } else { - intel_ddi_read_func_ctl(encoder, pipe_config); - } + intel_ddi_read_func_ctl(encoder, pipe_config); intel_ddi_mso_get_config(encoder, pipe_config); @@ -3591,8 +3531,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder, dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; } - if (!pipe_config->bigjoiner_slave) - ddi_dotclock_get(pipe_config); + ddi_dotclock_get(pipe_config); if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) pipe_config->lane_lat_optim_mask = @@ -4472,7 +4411,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) encoder->enable = intel_enable_ddi; encoder->pre_pll_enable = intel_ddi_pre_pll_enable; encoder->pre_enable = intel_ddi_pre_enable; - encoder->pre_disable = intel_pre_disable_ddi; encoder->disable = intel_disable_ddi; encoder->post_disable = intel_ddi_post_disable; encoder->update_pipe = intel_ddi_update_pipe; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index d6971717ef9c..c2fea6562917 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -6,11 +6,11 @@ #ifndef __INTEL_DDI_H__ #define __INTEL_DDI_H__ -#include "intel_display.h" #include "i915_reg.h" struct drm_connector_state; struct drm_i915_private; +struct intel_atomic_state; struct intel_connector; struct intel_crtc; struct intel_crtc_state; @@ -18,6 +18,8 @@ struct intel_dp; struct intel_dpll_hw_state; struct intel_encoder; struct intel_shared_dpll; +enum pipe; +enum port; enum transcoder; i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder, @@ -30,6 +32,7 @@ void intel_ddi_fdi_post_disable(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state); void intel_ddi_enable_clock(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +void intel_ddi_disable_clock(struct intel_encoder *encoder); void intel_ddi_get_clock(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, struct intel_shared_dpll *pll); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ec403e46a328..badf035efaeb 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -70,11 +70,10 @@ #include "gt/gen8_ppgtt.h" -#include "pxp/intel_pxp.h" - #include "g4x_dp.h" #include "g4x_hdmi.h" #include "i915_drv.h" +#include "icl_dsi.h" #include "intel_acpi.h" #include "intel_atomic.h" #include "intel_atomic_plane.h" @@ -96,6 +95,8 @@ #include "intel_hotplug.h" #include "intel_overlay.h" #include "intel_panel.h" +#include "intel_pch_display.h" +#include "intel_pch_refclk.h" #include "intel_pcode.h" #include "intel_pipe_crc.h" #include "intel_plane_initial.h" @@ -103,19 +104,15 @@ #include "intel_pps.h" #include "intel_psr.h" #include "intel_quirks.h" -#include "intel_sbi.h" #include "intel_sprite.h" #include "intel_tc.h" #include "intel_vga.h" #include "i9xx_plane.h" #include "skl_scaler.h" #include "skl_universal_plane.h" +#include "vlv_dsi_pll.h" #include "vlv_sideband.h" - -static void i9xx_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); -static void ilk_pch_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); +#include "vlv_dsi.h" static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); @@ -341,6 +338,14 @@ is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) is_trans_port_sync_slave(crtc_state); } +static struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->bigjoiner_slave) + return crtc_state->bigjoiner_linked_crtc; + else + return to_intel_crtc(crtc_state->uapi.crtc); +} + static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -454,80 +459,6 @@ static void assert_planes_disabled(struct intel_crtc *crtc) assert_plane_disabled(plane); } -void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - u32 val; - bool enabled; - - val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); - enabled = !!(val & TRANS_ENABLE); - I915_STATE_WARN(enabled, - "transcoder assertion failed, should be off on pipe %c but is still active\n", - pipe_name(pipe)); -} - -static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, enum port port, - i915_reg_t dp_reg) -{ - enum pipe port_pipe; - bool state; - - state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); - - I915_STATE_WARN(state && port_pipe == pipe, - "PCH DP %c enabled on transcoder %c, should be disabled\n", - port_name(port), pipe_name(pipe)); - - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, - "IBX PCH DP %c still using transcoder B\n", - port_name(port)); -} - -static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe, enum port port, - i915_reg_t hdmi_reg) -{ - enum pipe port_pipe; - bool state; - - state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); - - I915_STATE_WARN(state && port_pipe == pipe, - "PCH HDMI %c enabled on transcoder %c, should be disabled\n", - port_name(port), pipe_name(pipe)); - - I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, - "IBX PCH HDMI %c still using transcoder B\n", - port_name(port)); -} - -static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - enum pipe port_pipe; - - assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); - assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); - assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); - - I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && - port_pipe == pipe, - "PCH VGA enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); - - I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && - port_pipe == pipe, - "PCH LVDS enabled on transcoder %c, should be disabled\n", - pipe_name(pipe)); - - /* PCH SDVOB multiplex with HDMIB */ - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); - assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); -} - void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dig_port, unsigned int expected_mask) @@ -562,154 +493,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, expected_mask); } -static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - i915_reg_t reg; - u32 val, pipeconf_val; - - /* Make sure PCH DPLL is enabled */ - assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); - - /* FDI must be feeding us bits for PCH ports */ - assert_fdi_tx_enabled(dev_priv, pipe); - assert_fdi_rx_enabled(dev_priv, pipe); - - if (HAS_PCH_CPT(dev_priv)) { - reg = TRANS_CHICKEN2(pipe); - val = intel_de_read(dev_priv, reg); - /* - * Workaround: Set the timing override bit - * before enabling the pch transcoder. - */ - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; - /* Configure frame start delay to match the CPU */ - val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; - val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); - intel_de_write(dev_priv, reg, val); - } - - reg = PCH_TRANSCONF(pipe); - val = intel_de_read(dev_priv, reg); - pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); - - if (HAS_PCH_IBX(dev_priv)) { - /* Configure frame start delay to match the CPU */ - val &= ~TRANS_FRAME_START_DELAY_MASK; - val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); - - /* - * Make the BPC in transcoder be consistent with - * that in pipeconf reg. For HDMI we must use 8bpc - * here for both 8bpc and 12bpc. - */ - val &= ~PIPECONF_BPC_MASK; - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - val |= PIPECONF_8BPC; - else - val |= pipeconf_val & PIPECONF_BPC_MASK; - } - - val &= ~TRANS_INTERLACE_MASK; - if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { - if (HAS_PCH_IBX(dev_priv) && - intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) - val |= TRANS_LEGACY_INTERLACED_ILK; - else - val |= TRANS_INTERLACED; - } else { - val |= TRANS_PROGRESSIVE; - } - - intel_de_write(dev_priv, reg, val | TRANS_ENABLE); - if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) - drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", - pipe_name(pipe)); -} - -static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) -{ - u32 val, pipeconf_val; - - /* FDI must be feeding us bits for PCH ports */ - assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); - assert_fdi_rx_enabled(dev_priv, PIPE_A); - - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); - /* Workaround: set timing override bit. */ - val |= TRANS_CHICKEN2_TIMING_OVERRIDE; - /* Configure frame start delay to match the CPU */ - val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; - val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); - - val = TRANS_ENABLE; - pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); - - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == - PIPECONF_INTERLACED_ILK) - val |= TRANS_INTERLACED; - else - val |= TRANS_PROGRESSIVE; - - intel_de_write(dev_priv, LPT_TRANSCONF, val); - if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, - TRANS_STATE_ENABLE, 100)) - drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); -} - -static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - i915_reg_t reg; - u32 val; - - /* FDI relies on the transcoder */ - assert_fdi_tx_disabled(dev_priv, pipe); - assert_fdi_rx_disabled(dev_priv, pipe); - - /* Ports must be off as well */ - assert_pch_ports_disabled(dev_priv, pipe); - - reg = PCH_TRANSCONF(pipe); - val = intel_de_read(dev_priv, reg); - val &= ~TRANS_ENABLE; - intel_de_write(dev_priv, reg, val); - /* wait for PCH transcoder off, transcoder state */ - if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", - pipe_name(pipe)); - - if (HAS_PCH_CPT(dev_priv)) { - /* Workaround: Clear the timing override chicken bit again. */ - reg = TRANS_CHICKEN2(pipe); - val = intel_de_read(dev_priv, reg); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - intel_de_write(dev_priv, reg, val); - } -} - -void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = intel_de_read(dev_priv, LPT_TRANSCONF); - val &= ~TRANS_ENABLE; - intel_de_write(dev_priv, LPT_TRANSCONF, val); - /* wait for PCH transcoder off, transcoder state */ - if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, - TRANS_STATE_ENABLE, 50)) - drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); - - /* Workaround: clear timing override bit. */ - val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); - val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; - intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); -} - enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -823,14 +606,6 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) intel_wait_for_pipe_off(old_crtc_state); } -bool -intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, - u64 modifier) -{ - return info->is_yuv && - info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2); -} - unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) { unsigned int size = 0; @@ -850,7 +625,11 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { unsigned int plane_size; - plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; + if (rem_info->plane[i].linear) + plane_size = rem_info->plane[i].size; + else + plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; + if (plane_size == 0) continue; @@ -869,7 +648,7 @@ bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) struct drm_i915_private *dev_priv = to_i915(plane->base.dev); return DISPLAY_VER(dev_priv) < 4 || - (plane->has_fbc && + (plane->fbc && plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL); } @@ -885,7 +664,7 @@ u32 intel_fb_xy_to_linear(int x, int y, { const struct drm_framebuffer *fb = state->hw.fb; unsigned int cpp = fb->format->cpp[color_plane]; - unsigned int pitch = state->view.color_plane[color_plane].stride; + unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; return y * pitch + x * cpp; } @@ -904,136 +683,6 @@ void intel_add_fb_offsets(int *x, int *y, *y += state->view.color_plane[color_plane].y; } -/* - * From the Sky Lake PRM: - * "The Color Control Surface (CCS) contains the compression status of - * the cache-line pairs. The compression state of the cache-line pair - * is specified by 2 bits in the CCS. Each CCS cache-line represents - * an area on the main surface of 16 x16 sets of 128 byte Y-tiled - * cache-line-pairs. CCS is always Y tiled." - * - * Since cache line pairs refers to horizontally adjacent cache lines, - * each cache line in the CCS corresponds to an area of 32x16 cache - * lines on the main surface. Since each pixel is 4 bytes, this gives - * us a ratio of one byte in the CCS for each 8x16 pixels in the - * main surface. - */ -static const struct drm_format_info skl_ccs_formats[] = { - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, - .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, -}; - -/* - * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the - * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles - * in the main surface. With 4 byte pixels and each Y-tile having dimensions of - * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in - * the main surface. - */ -static const struct drm_format_info gen12_ccs_formats[] = { - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, - { .format = DRM_FORMAT_YUYV, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_YVYU, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_UYVY, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_VYUY, .num_planes = 2, - .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 2, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_XYUV8888, .num_planes = 2, - .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, - .hsub = 1, .vsub = 1, .is_yuv = true }, - { .format = DRM_FORMAT_NV12, .num_planes = 4, - .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, - { .format = DRM_FORMAT_P010, .num_planes = 4, - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, - { .format = DRM_FORMAT_P012, .num_planes = 4, - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, - { .format = DRM_FORMAT_P016, .num_planes = 4, - .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, - .hsub = 2, .vsub = 2, .is_yuv = true }, -}; - -/* - * Same as gen12_ccs_formats[] above, but with additional surface used - * to pass Clear Color information in plane 2 with 64 bits of data. - */ -static const struct drm_format_info gen12_ccs_cc_formats[] = { - { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, }, - { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, - { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, - .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, - .hsub = 1, .vsub = 1, .has_alpha = true }, -}; - -static const struct drm_format_info * -lookup_format_info(const struct drm_format_info formats[], - int num_formats, u32 format) -{ - int i; - - for (i = 0; i < num_formats; i++) { - if (formats[i].format == format) - return &formats[i]; - } - - return NULL; -} - -static const struct drm_format_info * -intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) -{ - switch (cmd->modifier[0]) { - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - return lookup_format_info(skl_ccs_formats, - ARRAY_SIZE(skl_ccs_formats), - cmd->pixel_format); - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - return lookup_format_info(gen12_ccs_formats, - ARRAY_SIZE(gen12_ccs_formats), - cmd->pixel_format); - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - return lookup_format_info(gen12_ccs_cc_formats, - ARRAY_SIZE(gen12_ccs_cc_formats), - cmd->pixel_format); - default: - return NULL; - } -} - u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier) { @@ -1135,7 +784,7 @@ void intel_plane_disable_noatomic(struct intel_crtc *crtc, if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); - intel_disable_plane(plane, crtc_state); + intel_plane_disable_arm(plane, crtc_state); intel_wait_for_vblank(dev_priv, crtc->pipe); } @@ -1310,26 +959,6 @@ unlock: clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags); } -static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state) -{ - if (crtc_state->pch_pfit.enabled && - (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) || - crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) || - crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)) - return false; - - if (crtc_state->dsc.compression_enable) - return false; - - if (crtc_state->has_psr2) - return false; - - if (crtc_state->splitter.enable) - return false; - - return true; -} - static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1353,19 +982,14 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) */ tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; - if (IS_DG2(dev_priv)) { - /* - * Underrun recovery must always be disabled on DG2. However - * the chicken bit meaning is inverted compared to other - * platforms. - */ + /* + * Underrun recovery must always be disabled on display 13+. + * DG2 chicken bit meaning is inverted compared to other platforms. + */ + if (IS_DG2(dev_priv)) tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; - } else if (DISPLAY_VER(dev_priv) >= 13) { - if (underrun_recovery_supported(crtc_state)) - tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP; - else - tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; - } + else if (DISPLAY_VER(dev_priv) >= 13) + tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); } @@ -1395,158 +1019,6 @@ bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) return false; } -void lpt_disable_iclkip(struct drm_i915_private *dev_priv) -{ - u32 temp; - - intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); - - mutex_lock(&dev_priv->sb_lock); - - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); - temp |= SBI_SSCCTL_DISABLE; - intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); -} - -/* Program iCLKIP clock to the desired frequency */ -static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int clock = crtc_state->hw.adjusted_mode.crtc_clock; - u32 divsel, phaseinc, auxdiv, phasedir = 0; - u32 temp; - - lpt_disable_iclkip(dev_priv); - - /* The iCLK virtual clock root frequency is in MHz, - * but the adjusted_mode->crtc_clock in in KHz. To get the - * divisors, it is necessary to divide one by another, so we - * convert the virtual clock precision to KHz here for higher - * precision. - */ - for (auxdiv = 0; auxdiv < 2; auxdiv++) { - u32 iclk_virtual_root_freq = 172800 * 1000; - u32 iclk_pi_range = 64; - u32 desired_divisor; - - desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, - clock << auxdiv); - divsel = (desired_divisor / iclk_pi_range) - 2; - phaseinc = desired_divisor % iclk_pi_range; - - /* - * Near 20MHz is a corner case which is - * out of range for the 7-bit divisor - */ - if (divsel <= 0x7f) - break; - } - - /* This should not happen with any sane values */ - drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & - ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); - drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & - ~SBI_SSCDIVINTPHASE_INCVAL_MASK); - - drm_dbg_kms(&dev_priv->drm, - "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", - clock, auxdiv, divsel, phasedir, phaseinc); - - mutex_lock(&dev_priv->sb_lock); - - /* Program SSCDIVINTPHASE6 */ - temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); - temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; - temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); - temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; - temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); - temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); - temp |= SBI_SSCDIVINTPHASE_PROPAGATE; - intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); - - /* Program SSCAUXDIV */ - temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); - temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); - temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); - intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); - - /* Enable modulator and associated divider */ - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); - temp &= ~SBI_SSCCTL_DISABLE; - intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); - - /* Wait for initialization time */ - udelay(24); - - intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); -} - -int lpt_get_iclkip(struct drm_i915_private *dev_priv) -{ - u32 divsel, phaseinc, auxdiv; - u32 iclk_virtual_root_freq = 172800 * 1000; - u32 iclk_pi_range = 64; - u32 desired_divisor; - u32 temp; - - if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) - return 0; - - mutex_lock(&dev_priv->sb_lock); - - temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); - if (temp & SBI_SSCCTL_DISABLE) { - mutex_unlock(&dev_priv->sb_lock); - return 0; - } - - temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); - divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> - SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; - phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> - SBI_SSCDIVINTPHASE_INCVAL_SHIFT; - - temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); - auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> - SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; - - mutex_unlock(&dev_priv->sb_lock); - - desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; - - return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, - desired_divisor << auxdiv); -} - -static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, - enum pipe pch_transcoder) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - - intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), - intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), - intel_de_read(dev_priv, HBLANK(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), - intel_de_read(dev_priv, HSYNC(cpu_transcoder))); - - intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), - intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), - intel_de_read(dev_priv, VBLANK(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), - intel_de_read(dev_priv, VSYNC(cpu_transcoder))); - intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), - intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); -} - /* * Finds the encoder associated with the given CRTC. This can only be * used when we know that the CRTC isn't feeding multiple encoders! @@ -1555,15 +1027,17 @@ struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct drm_connector_state *connector_state; const struct drm_connector *connector; struct intel_encoder *encoder = NULL; + struct intel_crtc *master_crtc; int num_encoders = 0; int i; + master_crtc = intel_master_crtc(crtc_state); + for_each_new_connector_in_state(&state->base, connector, connector_state, i) { - if (connector_state->crtc != &crtc->base) + if (connector_state->crtc != &master_crtc->base) continue; encoder = to_intel_encoder(connector_state->best_encoder); @@ -1572,111 +1046,11 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state, drm_WARN(encoder->base.dev, num_encoders != 1, "%d encoders for pipe %c\n", - num_encoders, pipe_name(crtc->pipe)); + num_encoders, pipe_name(master_crtc->pipe)); return encoder; } -/* - * Enable PCH resources required for PCH ports: - * - PCH PLLs - * - FDI training & RX/TX - * - update transcoder timings - * - DP transcoding bits - * - transcoder - */ -static void ilk_pch_enable(const struct intel_atomic_state *state, - const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; - u32 temp; - - assert_pch_transcoder_disabled(dev_priv, pipe); - - /* For PCH output, training FDI link */ - intel_fdi_link_train(crtc, crtc_state); - - /* We need to program the right clock selection before writing the pixel - * mutliplier into the DPLL. */ - if (HAS_PCH_CPT(dev_priv)) { - u32 sel; - - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); - temp |= TRANS_DPLL_ENABLE(pipe); - sel = TRANS_DPLLB_SEL(pipe); - if (crtc_state->shared_dpll == - intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) - temp |= sel; - else - temp &= ~sel; - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); - } - - /* XXX: pch pll's can be enabled any time before we enable the PCH - * transcoder, and we actually should do this to not upset any PCH - * transcoder that already use the clock when we share it. - * - * Note that enable_shared_dpll tries to do the right thing, but - * get_shared_dpll unconditionally resets the pll - we need that to have - * the right LVDS enable sequence. */ - intel_enable_shared_dpll(crtc_state); - - /* set transcoder timing, panel must allow it */ - assert_pps_unlocked(dev_priv, pipe); - ilk_pch_transcoder_set_timings(crtc_state, pipe); - - intel_fdi_normal_train(crtc); - - /* For PCH DP, enable TRANS_DP_CTL */ - if (HAS_PCH_CPT(dev_priv) && - intel_crtc_has_dp_encoder(crtc_state)) { - const struct drm_display_mode *adjusted_mode = - &crtc_state->hw.adjusted_mode; - u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; - i915_reg_t reg = TRANS_DP_CTL(pipe); - enum port port; - - temp = intel_de_read(dev_priv, reg); - temp &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK | - TRANS_DP_BPC_MASK); - temp |= TRANS_DP_OUTPUT_ENABLE; - temp |= bpc << 9; /* same format but at 11:9 */ - - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) - temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) - temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; - - port = intel_get_crtc_new_encoder(state, crtc_state)->port; - drm_WARN_ON(dev, port < PORT_B || port > PORT_D); - temp |= TRANS_DP_PORT_SEL(port); - - intel_de_write(dev_priv, reg, temp); - } - - ilk_enable_pch_transcoder(crtc_state); -} - -void lpt_pch_enable(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - - assert_pch_transcoder_disabled(dev_priv, PIPE_A); - - lpt_program_iclkip(crtc_state); - - /* Set transcoder timing. */ - ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); - - lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); -} - static void cpt_verify_modeset(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -1919,7 +1293,7 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - return crtc_state->uapi.async_flip && intel_vtd_active() && + return crtc_state->uapi.async_flip && intel_vtd_active(i915) && (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); } @@ -2054,6 +1428,8 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); enum pipe pipe = crtc->pipe; + intel_psr_pre_plane_update(state, crtc); + if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state)) hsw_disable_ips(old_crtc_state); @@ -2165,7 +1541,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state, !(update_mask & BIT(plane->id))) continue; - intel_disable_plane(plane, new_crtc_state); + intel_plane_disable_arm(plane, new_crtc_state); if (old_plane_state->uapi.visible) fb_bits |= plane->frontbuffer_bit; @@ -2199,10 +1575,30 @@ intel_connector_primary_encoder(struct intel_connector *connector) static void intel_encoders_update_prepare(struct intel_atomic_state *state) { + struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_crtc_state *new_crtc_state, *old_crtc_state; + struct intel_crtc *crtc; struct drm_connector_state *new_conn_state; struct drm_connector *connector; int i; + /* + * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. + * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. + */ + if (i915->dpll.mgr) { + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (intel_crtc_needs_modeset(new_crtc_state)) + continue; + + new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; + new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; + } + } + + if (!state->modeset) + return; + for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { struct intel_connector *intel_connector; @@ -2229,6 +1625,9 @@ static void intel_encoders_update_complete(struct intel_atomic_state *state) struct drm_connector *connector; int i; + if (!state->modeset) + return; + for_each_new_connector_in_state(&state->base, connector, new_conn_state, i) { struct intel_connector *intel_connector; @@ -2316,28 +1715,6 @@ static void intel_encoders_enable(struct intel_atomic_state *state, } } -static void intel_encoders_pre_disable(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - const struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - const struct drm_connector_state *old_conn_state; - struct drm_connector *conn; - int i; - - for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { - struct intel_encoder *encoder = - to_intel_encoder(old_conn_state->best_encoder); - - if (old_conn_state->crtc != &crtc->base) - continue; - - if (encoder->pre_disable) - encoder->pre_disable(state, encoder, old_crtc_state, - old_conn_state); - } -} - static void intel_encoders_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -2432,7 +1809,7 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(crtc->base.primary); - plane->disable_plane(plane, crtc_state); + plane->disable_arm(plane, crtc_state); } static void ilk_crtc_enable(struct intel_atomic_state *state, @@ -2500,7 +1877,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_enable_transcoder(new_crtc_state); if (new_crtc_state->has_pch_encoder) - ilk_pch_enable(state, new_crtc_state); + ilk_pch_enable(state, crtc); intel_crtc_vblank_on(new_crtc_state); @@ -2592,42 +1969,39 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct intel_crtc *master = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(master->base.dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *master_crtc_state; + struct intel_crtc *master_crtc; struct drm_connector_state *conn_state; struct drm_connector *conn; struct intel_encoder *encoder = NULL; int i; - if (crtc_state->bigjoiner_slave) - master = crtc_state->bigjoiner_linked_crtc; - - master_crtc_state = intel_atomic_get_new_crtc_state(state, master); + master_crtc = intel_master_crtc(crtc_state); + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); for_each_new_connector_in_state(&state->base, conn, conn_state, i) { - if (conn_state->crtc != &master->base) + if (conn_state->crtc != &master_crtc->base) continue; encoder = to_intel_encoder(conn_state->best_encoder); break; } - if (!crtc_state->bigjoiner_slave) { - /* need to enable VDSC, which we skipped in pre-enable */ - intel_dsc_enable(encoder, crtc_state); - } else { - /* - * Enable sequence steps 1-7 on bigjoiner master - */ - intel_encoders_pre_pll_enable(state, master); - if (master_crtc_state->shared_dpll) - intel_enable_shared_dpll(master_crtc_state); - intel_encoders_pre_enable(state, master); + /* + * Enable sequence steps 1-7 on bigjoiner master + */ + if (crtc_state->bigjoiner_slave) + intel_encoders_pre_pll_enable(state, master_crtc); - /* and DSC on slave */ - intel_dsc_enable(NULL, crtc_state); - } + if (crtc_state->shared_dpll) + intel_enable_shared_dpll(crtc_state); + + if (crtc_state->bigjoiner_slave) + intel_encoders_pre_enable(state, master_crtc); + + /* need to enable VDSC, which we skipped in pre-enable */ + intel_dsc_enable(crtc_state); if (DISPLAY_VER(dev_priv) >= 13) intel_uncompressed_joiner_enable(crtc_state); @@ -2774,33 +2148,12 @@ static void ilk_crtc_disable(struct intel_atomic_state *state, ilk_pfit_disable(old_crtc_state); if (old_crtc_state->has_pch_encoder) - ilk_fdi_disable(crtc); + ilk_pch_disable(state, crtc); intel_encoders_post_disable(state, crtc); - if (old_crtc_state->has_pch_encoder) { - ilk_disable_pch_transcoder(dev_priv, pipe); - - if (HAS_PCH_CPT(dev_priv)) { - i915_reg_t reg; - u32 temp; - - /* disable TRANS_DP_CTL */ - reg = TRANS_DP_CTL(pipe); - temp = intel_de_read(dev_priv, reg); - temp &= ~(TRANS_DP_OUTPUT_ENABLE | - TRANS_DP_PORT_SEL_MASK); - temp |= TRANS_DP_PORT_SEL_NONE; - intel_de_write(dev_priv, reg, temp); - - /* disable DPLL_SEL */ - temp = intel_de_read(dev_priv, PCH_DPLL_SEL); - temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); - intel_de_write(dev_priv, PCH_DPLL_SEL, temp); - } - - ilk_fdi_pll_disable(crtc); - } + if (old_crtc_state->has_pch_encoder) + ilk_pch_post_disable(state, crtc); intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); @@ -2809,12 +2162,17 @@ static void ilk_crtc_disable(struct intel_atomic_state *state, static void hsw_crtc_disable(struct intel_atomic_state *state, struct intel_crtc *crtc) { + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + /* * FIXME collapse everything to one hook. * Need care with mst->ddi interactions. */ - intel_encoders_disable(state, crtc); - intel_encoders_post_disable(state, crtc); + if (!old_crtc_state->bigjoiner_slave) { + intel_encoders_disable(state, crtc); + intel_encoders_post_disable(state, crtc); + } } static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) @@ -4306,414 +3664,6 @@ out: return ret; } -static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) -{ - struct intel_encoder *encoder; - int i; - u32 val, final; - bool has_lvds = false; - bool has_cpu_edp = false; - bool has_panel = false; - bool has_ck505 = false; - bool can_ssc = false; - bool using_ssc_source = false; - - /* We need to take the global config into account */ - for_each_intel_encoder(&dev_priv->drm, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - has_panel = true; - has_lvds = true; - break; - case INTEL_OUTPUT_EDP: - has_panel = true; - if (encoder->port == PORT_A) - has_cpu_edp = true; - break; - default: - break; - } - } - - if (HAS_PCH_IBX(dev_priv)) { - has_ck505 = dev_priv->vbt.display_clock_mode; - can_ssc = has_ck505; - } else { - has_ck505 = false; - can_ssc = true; - } - - /* Check if any DPLLs are using the SSC source */ - for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { - u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); - - if (!(temp & DPLL_VCO_ENABLE)) - continue; - - if ((temp & PLL_REF_INPUT_MASK) == - PLLB_REF_INPUT_SPREADSPECTRUMIN) { - using_ssc_source = true; - break; - } - } - - drm_dbg_kms(&dev_priv->drm, - "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", - has_panel, has_lvds, has_ck505, using_ssc_source); - - /* Ironlake: try to setup display ref clock before DPLL - * enabling. This is only under driver's control after - * PCH B stepping, previous chipset stepping should be - * ignoring this setting. - */ - val = intel_de_read(dev_priv, PCH_DREF_CONTROL); - - /* As we must carefully and slowly disable/enable each source in turn, - * compute the final state we want first and check if we need to - * make any changes at all. - */ - final = val; - final &= ~DREF_NONSPREAD_SOURCE_MASK; - if (has_ck505) - final |= DREF_NONSPREAD_CK505_ENABLE; - else - final |= DREF_NONSPREAD_SOURCE_ENABLE; - - final &= ~DREF_SSC_SOURCE_MASK; - final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - final &= ~DREF_SSC1_ENABLE; - - if (has_panel) { - final |= DREF_SSC_SOURCE_ENABLE; - - if (intel_panel_use_ssc(dev_priv) && can_ssc) - final |= DREF_SSC1_ENABLE; - - if (has_cpu_edp) { - if (intel_panel_use_ssc(dev_priv) && can_ssc) - final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; - else - final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; - } else - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - } else if (using_ssc_source) { - final |= DREF_SSC_SOURCE_ENABLE; - final |= DREF_SSC1_ENABLE; - } - - if (final == val) - return; - - /* Always enable nonspread source */ - val &= ~DREF_NONSPREAD_SOURCE_MASK; - - if (has_ck505) - val |= DREF_NONSPREAD_CK505_ENABLE; - else - val |= DREF_NONSPREAD_SOURCE_ENABLE; - - if (has_panel) { - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_ENABLE; - - /* SSC must be turned on before enabling the CPU output */ - if (intel_panel_use_ssc(dev_priv) && can_ssc) { - drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); - val |= DREF_SSC1_ENABLE; - } else - val &= ~DREF_SSC1_ENABLE; - - /* Get SSC going before enabling the outputs */ - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - - val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - - /* Enable CPU source on CPU attached eDP */ - if (has_cpu_edp) { - if (intel_panel_use_ssc(dev_priv) && can_ssc) { - drm_dbg_kms(&dev_priv->drm, - "Using SSC on eDP\n"); - val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; - } else - val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; - } else - val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - } else { - drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); - - val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; - - /* Turn off CPU output */ - val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; - - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - - if (!using_ssc_source) { - drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); - - /* Turn off the SSC source */ - val &= ~DREF_SSC_SOURCE_MASK; - val |= DREF_SSC_SOURCE_DISABLE; - - /* Turn off SSC1 */ - val &= ~DREF_SSC1_ENABLE; - - intel_de_write(dev_priv, PCH_DREF_CONTROL, val); - intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); - udelay(200); - } - } - - BUG_ON(val != final); -} - -/* Implements 3 different sequences from BSpec chapter "Display iCLK - * Programming" based on the parameters passed: - * - Sequence to enable CLKOUT_DP - * - Sequence to enable CLKOUT_DP without spread - * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O - */ -static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, - bool with_spread, bool with_fdi) -{ - u32 reg, tmp; - - if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, - "FDI requires downspread\n")) - with_spread = true; - if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && - with_fdi, "LP PCH doesn't have FDI\n")) - with_fdi = false; - - mutex_lock(&dev_priv->sb_lock); - - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_DISABLE; - tmp |= SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - - udelay(24); - - if (with_spread) { - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - tmp &= ~SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - - if (with_fdi) - lpt_fdi_program_mphy(dev_priv); - } - - reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; - tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); - tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; - intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); -} - -/* Sequence to disable CLKOUT_DP */ -void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) -{ - u32 reg, tmp; - - mutex_lock(&dev_priv->sb_lock); - - reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; - tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); - tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; - intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); - - tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); - if (!(tmp & SBI_SSCCTL_DISABLE)) { - if (!(tmp & SBI_SSCCTL_PATHALT)) { - tmp |= SBI_SSCCTL_PATHALT; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - udelay(32); - } - tmp |= SBI_SSCCTL_DISABLE; - intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); - } - - mutex_unlock(&dev_priv->sb_lock); -} - -#define BEND_IDX(steps) ((50 + (steps)) / 5) - -static const u16 sscdivintphase[] = { - [BEND_IDX( 50)] = 0x3B23, - [BEND_IDX( 45)] = 0x3B23, - [BEND_IDX( 40)] = 0x3C23, - [BEND_IDX( 35)] = 0x3C23, - [BEND_IDX( 30)] = 0x3D23, - [BEND_IDX( 25)] = 0x3D23, - [BEND_IDX( 20)] = 0x3E23, - [BEND_IDX( 15)] = 0x3E23, - [BEND_IDX( 10)] = 0x3F23, - [BEND_IDX( 5)] = 0x3F23, - [BEND_IDX( 0)] = 0x0025, - [BEND_IDX( -5)] = 0x0025, - [BEND_IDX(-10)] = 0x0125, - [BEND_IDX(-15)] = 0x0125, - [BEND_IDX(-20)] = 0x0225, - [BEND_IDX(-25)] = 0x0225, - [BEND_IDX(-30)] = 0x0325, - [BEND_IDX(-35)] = 0x0325, - [BEND_IDX(-40)] = 0x0425, - [BEND_IDX(-45)] = 0x0425, - [BEND_IDX(-50)] = 0x0525, -}; - -/* - * Bend CLKOUT_DP - * steps -50 to 50 inclusive, in steps of 5 - * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) - * change in clock period = -(steps / 10) * 5.787 ps - */ -static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) -{ - u32 tmp; - int idx = BEND_IDX(steps); - - if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) - return; - - if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) - return; - - mutex_lock(&dev_priv->sb_lock); - - if (steps % 10 != 0) - tmp = 0xAAAAAAAB; - else - tmp = 0x00000000; - intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); - - tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); - tmp &= 0xffff0000; - tmp |= sscdivintphase[idx]; - intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); - - mutex_unlock(&dev_priv->sb_lock); -} - -#undef BEND_IDX - -static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) -{ - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); - u32 ctl = intel_de_read(dev_priv, SPLL_CTL); - - if ((ctl & SPLL_PLL_ENABLE) == 0) - return false; - - if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && - (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) - return true; - - if (IS_BROADWELL(dev_priv) && - (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) - return true; - - return false; -} - -static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, - enum intel_dpll_id id) -{ - u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); - u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); - - if ((ctl & WRPLL_PLL_ENABLE) == 0) - return false; - - if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) - return true; - - if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && - (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && - (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) - return true; - - return false; -} - -static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) -{ - struct intel_encoder *encoder; - bool has_fdi = false; - - for_each_intel_encoder(&dev_priv->drm, encoder) { - switch (encoder->type) { - case INTEL_OUTPUT_ANALOG: - has_fdi = true; - break; - default: - break; - } - } - - /* - * The BIOS may have decided to use the PCH SSC - * reference so we must not disable it until the - * relevant PLLs have stopped relying on it. We'll - * just leave the PCH SSC reference enabled in case - * any active PLL is using it. It will get disabled - * after runtime suspend if we don't have FDI. - * - * TODO: Move the whole reference clock handling - * to the modeset sequence proper so that we can - * actually enable/disable/reconfigure these things - * safely. To do that we need to introduce a real - * clock hierarchy. That would also allow us to do - * clock bending finally. - */ - dev_priv->pch_ssc_use = 0; - - if (spll_uses_pch_ssc(dev_priv)) { - drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); - dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); - } - - if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { - drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); - dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); - } - - if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { - drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); - dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); - } - - if (dev_priv->pch_ssc_use) - return; - - if (has_fdi) { - lpt_bend_clkout_dp(dev_priv, 0); - lpt_enable_clkout_dp(dev_priv, true, true); - } else { - lpt_disable_clkout_dp(dev_priv); - } -} - -/* - * Initialize reference clocks when the driver loads - */ -void intel_init_pch_refclk(struct drm_i915_private *dev_priv) -{ - if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) - ilk_init_pch_refclk(dev_priv); - else if (HAS_PCH_LPT(dev_priv)) - lpt_init_pch_refclk(dev_priv); -} - static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -4978,8 +3928,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, &pipe_config->dp_m2_n2); } -static void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, &pipe_config->fdi_m_n, NULL); @@ -5116,50 +4066,9 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, i9xx_get_pipe_color_config(pipe_config); intel_color_get_config(pipe_config); - if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { - struct intel_shared_dpll *pll; - enum intel_dpll_id pll_id; - bool pll_active; - - pipe_config->has_pch_encoder = true; - - tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe)); - pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> - FDI_DP_PORT_WIDTH_SHIFT) + 1; - - ilk_get_fdi_m_n_config(crtc, pipe_config); - - if (HAS_PCH_IBX(dev_priv)) { - /* - * The pipe->pch transcoder and pch transcoder->pll - * mapping is fixed. - */ - pll_id = (enum intel_dpll_id) crtc->pipe; - } else { - tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); - if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) - pll_id = DPLL_ID_PCH_PLL_B; - else - pll_id= DPLL_ID_PCH_PLL_A; - } - - pipe_config->shared_dpll = - intel_get_shared_dpll_by_id(dev_priv, pll_id); - pll = pipe_config->shared_dpll; - - pll_active = intel_dpll_get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state); - drm_WARN_ON(dev, !pll_active); - - tmp = pipe_config->dpll_hw_state.dpll; - pipe_config->pixel_multiplier = - ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) - >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; + pipe_config->pixel_multiplier = 1; - ilk_pch_clock_get(crtc, pipe_config); - } else { - pipe_config->pixel_multiplier = 1; - } + ilk_pch_get_config(pipe_config); intel_get_transcoder_timings(crtc, pipe_config); intel_get_pipe_src_size(crtc, pipe_config); @@ -5174,6 +4083,16 @@ out: return ret; } +static u8 bigjoiner_pipes(struct drm_i915_private *i915) +{ + if (DISPLAY_VER(i915) >= 12) + return BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D); + else if (DISPLAY_VER(i915) >= 11) + return BIT(PIPE_B) | BIT(PIPE_C); + else + return 0; +} + static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder) { @@ -5189,6 +4108,54 @@ static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv, return tmp & TRANS_DDI_FUNC_ENABLE; } +static u8 enabled_bigjoiner_pipes(struct drm_i915_private *dev_priv) +{ + u8 master_pipes = 0, slave_pipes = 0; + struct intel_crtc *crtc; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + enum intel_display_power_domain power_domain; + enum pipe pipe = crtc->pipe; + intel_wakeref_t wakeref; + + if ((bigjoiner_pipes(dev_priv) & BIT(pipe)) == 0) + continue; + + power_domain = intel_dsc_power_domain(crtc, (enum transcoder) pipe); + with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { + u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); + + if (!(tmp & BIG_JOINER_ENABLE)) + continue; + + if (tmp & MASTER_BIG_JOINER_ENABLE) + master_pipes |= BIT(pipe); + else + slave_pipes |= BIT(pipe); + } + + if (DISPLAY_VER(dev_priv) < 13) + continue; + + power_domain = POWER_DOMAIN_PIPE(pipe); + with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref) { + u32 tmp = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe)); + + if (tmp & UNCOMPRESSED_JOINER_MASTER) + master_pipes |= BIT(pipe); + if (tmp & UNCOMPRESSED_JOINER_SLAVE) + slave_pipes |= BIT(pipe); + } + } + + /* Bigjoiner pipes should always be consecutive master and slave */ + drm_WARN(&dev_priv->drm, slave_pipes != master_pipes << 1, + "Bigjoiner misconfigured (master pipes 0x%x, slave pipes 0x%x)\n", + master_pipes, slave_pipes); + + return slave_pipes; +} + static u8 hsw_panel_transcoders(struct drm_i915_private *i915) { u8 panel_transcoder_mask = BIT(TRANSCODER_EDP); @@ -5250,10 +4217,18 @@ static u8 hsw_enabled_transcoders(struct intel_crtc *crtc) enabled_transcoders |= BIT(cpu_transcoder); } + /* single pipe or bigjoiner master */ cpu_transcoder = (enum transcoder) crtc->pipe; if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) enabled_transcoders |= BIT(cpu_transcoder); + /* bigjoiner slave -> consider the master pipe's transcoder as well */ + if (enabled_bigjoiner_pipes(dev_priv) & BIT(crtc->pipe)) { + cpu_transcoder = (enum transcoder) crtc->pipe - 1; + if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder)) + enabled_transcoders |= BIT(cpu_transcoder); + } + return enabled_transcoders; } @@ -5374,45 +4349,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, return transcoder_is_dsi(pipe_config->cpu_transcoder); } -static void hsw_get_ddi_port_state(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; - enum port port; - u32 tmp; - - if (transcoder_is_dsi(cpu_transcoder)) { - port = (cpu_transcoder == TRANSCODER_DSI_A) ? - PORT_A : PORT_B; - } else { - tmp = intel_de_read(dev_priv, - TRANS_DDI_FUNC_CTL(cpu_transcoder)); - if (!(tmp & TRANS_DDI_FUNC_ENABLE)) - return; - if (DISPLAY_VER(dev_priv) >= 12) - port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); - else - port = TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp); - } - - /* - * Haswell has only FDI/PCH transcoder A. It is which is connected to - * DDI E. So just check whether this pipe is wired to DDI E and whether - * the PCH transcoder is on. - */ - if (DISPLAY_VER(dev_priv) < 9 && - (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) { - pipe_config->has_pch_encoder = true; - - tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); - pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> - FDI_DP_PORT_WIDTH_SHIFT) + 1; - - ilk_get_fdi_m_n_config(crtc, pipe_config); - } -} - static bool hsw_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -5439,21 +4375,12 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable) intel_uncompressed_joiner_get_config(pipe_config); - if (!active) { - /* bigjoiner slave doesn't enable transcoder */ - if (!pipe_config->bigjoiner_slave) - goto out; - - active = true; - pipe_config->pixel_multiplier = 1; + if (!active) + goto out; - /* we cannot read out most state, so don't bother.. */ - pipe_config->quirks |= PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE; - } else if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || - DISPLAY_VER(dev_priv) >= 11) { - hsw_get_ddi_port_state(crtc, pipe_config); + if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || + DISPLAY_VER(dev_priv) >= 11) intel_get_transcoder_timings(crtc, pipe_config); - } if (HAS_VRR(dev_priv) && !transcoder_is_dsi(pipe_config->cpu_transcoder)) intel_vrr_get_config(crtc, pipe_config); @@ -5521,10 +4448,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, } } - if (pipe_config->bigjoiner_slave) { - /* Cannot be read out as a slave, set to 0. */ - pipe_config->pixel_multiplier = 0; - } else if (pipe_config->cpu_transcoder != TRANSCODER_EDP && + if (pipe_config->cpu_transcoder != TRANSCODER_EDP && !transcoder_is_dsi(pipe_config->cpu_transcoder)) { pipe_config->pixel_multiplier = intel_de_read(dev_priv, @@ -5782,8 +4706,8 @@ static int i9xx_pll_refclk(struct drm_device *dev, } /* Returns the clock of the currently programmed mode of the given pipe. */ -static void i9xx_crtc_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -5893,24 +4817,6 @@ int intel_dotclock_calculate(int link_freq, return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); } -static void ilk_pch_clock_get(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - /* read out port_clock from the DPLL */ - i9xx_crtc_clock_get(crtc, pipe_config); - - /* - * In case there is an active pipe without active ports, - * we may need some idea for the dotclock anyway. - * Calculate one based on the FDI configuration. - */ - pipe_config->hw.adjusted_mode.crtc_clock = - intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), - &pipe_config->fdi_m_n); -} - /* Returns the currently programmed mode of the given encoder. */ struct drm_display_mode * intel_encoder_current_mode(struct intel_encoder *encoder) @@ -6245,6 +5151,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE; linked_state->color_ctl = plane_state->color_ctl; linked_state->view = plane_state->view; + linked_state->decrypt = plane_state->decrypt; intel_plane_copy_hw_state(linked_state, plane_state); linked_state->uapi.src = plane_state->uapi.src; @@ -6371,8 +5278,6 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, crtc_state->update_wm_post = true; if (mode_changed && crtc_state->hw.enable && - dev_priv->dpll_funcs && - !crtc_state->bigjoiner_slave && !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) { ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state); if (ret) @@ -6928,18 +5833,15 @@ static void intel_crtc_copy_uapi_to_hw_state_nomodeset(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state) { - const struct intel_crtc_state *from_crtc_state = crtc_state; + const struct intel_crtc_state *master_crtc_state; + struct intel_crtc *master_crtc; - if (crtc_state->bigjoiner_slave) { - from_crtc_state = intel_atomic_get_new_crtc_state(state, - crtc_state->bigjoiner_linked_crtc); + master_crtc = intel_master_crtc(crtc_state); + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); - /* No need to copy state if the master state is unchanged */ - if (!from_crtc_state) - return; - } - - intel_crtc_copy_color_blobs(crtc_state, from_crtc_state); + /* No need to copy state if the master state is unchanged */ + if (master_crtc_state) + intel_crtc_copy_color_blobs(crtc_state, master_crtc_state); } static void @@ -6982,7 +5884,6 @@ copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state, const struct intel_crtc_state *from_crtc_state) { struct intel_crtc_state *saved_state; - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); saved_state = kmemdup(from_crtc_state, sizeof(*saved_state), GFP_KERNEL); if (!saved_state) @@ -7012,8 +5913,8 @@ copy_bigjoiner_crtc_state(struct intel_crtc_state *crtc_state, crtc_state->nv12_planes = crtc_state->c8_planes = crtc_state->update_planes = 0; crtc_state->bigjoiner_linked_crtc = to_intel_crtc(from_crtc_state->uapi.crtc); crtc_state->bigjoiner_slave = true; - crtc_state->cpu_transcoder = (enum transcoder)crtc->pipe; - crtc_state->has_audio = false; + crtc_state->cpu_transcoder = from_crtc_state->cpu_transcoder; + crtc_state->has_audio = from_crtc_state->has_audio; return 0; } @@ -7609,51 +6510,48 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(output_types); - /* FIXME do the readout properly and get rid of this quirk */ - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); - - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); - PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); - - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); - - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); - PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); - - PIPE_CONF_CHECK_I(pixel_multiplier); - + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hdisplay); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_htotal); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hblank_end); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_hsync_end); + + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vdisplay); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vtotal); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vblank_end); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_start); + PIPE_CONF_CHECK_I(hw.pipe_mode.crtc_vsync_end); + + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hdisplay); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_htotal); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hblank_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_hsync_end); + + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vdisplay); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vtotal); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vblank_end); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_start); + PIPE_CONF_CHECK_I(hw.adjusted_mode.crtc_vsync_end); + + PIPE_CONF_CHECK_I(pixel_multiplier); + + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_INTERLACE); + + if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_INTERLACE); - - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_PHSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_NHSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_PVSYNC); - PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, - DRM_MODE_FLAG_NVSYNC); - } + DRM_MODE_FLAG_PHSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_NHSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_PVSYNC); + PIPE_CONF_CHECK_FLAGS(hw.adjusted_mode.flags, + DRM_MODE_FLAG_NVSYNC); } PIPE_CONF_CHECK_I(output_format); @@ -7665,9 +6563,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(hdmi_scrambling); PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); PIPE_CONF_CHECK_BOOL(has_infoframe); - /* FIXME do the readout properly and get rid of this quirk */ - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) - PIPE_CONF_CHECK_BOOL(fec_enable); + PIPE_CONF_CHECK_BOOL(fec_enable); PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); @@ -7696,9 +6592,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } PIPE_CONF_CHECK_I(scaler_state.scaler_id); - /* FIXME do the readout properly and get rid of this quirk */ - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) - PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); + PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); PIPE_CONF_CHECK_X(gamma_mode); if (IS_CHERRYVIEW(dev_priv)) @@ -7725,11 +6619,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(double_wide); - if (dev_priv->dpll.mgr) + if (dev_priv->dpll.mgr) { PIPE_CONF_CHECK_P(shared_dpll); - /* FIXME do the readout properly and get rid of this quirk */ - if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { PIPE_CONF_CHECK_X(dpll_hw_state.dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.fp0); @@ -7763,19 +6655,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias); } - if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) { - PIPE_CONF_CHECK_X(dsi_pll.ctrl); - PIPE_CONF_CHECK_X(dsi_pll.div); + PIPE_CONF_CHECK_X(dsi_pll.ctrl); + PIPE_CONF_CHECK_X(dsi_pll.div); - if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) - PIPE_CONF_CHECK_I(pipe_bpp); + if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) + PIPE_CONF_CHECK_I(pipe_bpp); - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); - PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.pipe_mode.crtc_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(hw.adjusted_mode.crtc_clock); + PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); - PIPE_CONF_CHECK_I(min_voltage_level); - } + PIPE_CONF_CHECK_I(min_voltage_level); if (current_config->has_psr || pipe_config->has_psr) PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable, @@ -8049,7 +6939,7 @@ verify_crtc_state(struct intel_crtc *crtc, struct intel_encoder *encoder; struct intel_crtc_state *pipe_config = old_crtc_state; struct drm_atomic_state *state = old_crtc_state->uapi.state; - struct intel_crtc *master = crtc; + struct intel_crtc *master_crtc; __drm_atomic_helper_crtc_destroy_state(&old_crtc_state->uapi); intel_crtc_free_hw_state(old_crtc_state); @@ -8077,10 +6967,9 @@ verify_crtc_state(struct intel_crtc *crtc, "(expected %i, found %i)\n", new_crtc_state->hw.active, crtc->active); - if (new_crtc_state->bigjoiner_slave) - master = new_crtc_state->bigjoiner_linked_crtc; + master_crtc = intel_master_crtc(new_crtc_state); - for_each_encoder_on_crtc(dev, &master->base, encoder) { + for_each_encoder_on_crtc(dev, &master_crtc->base, encoder) { enum pipe pipe; bool active; @@ -8090,7 +6979,7 @@ verify_crtc_state(struct intel_crtc *crtc, encoder->base.base.id, active, new_crtc_state->hw.active); - I915_STATE_WARN(active && master->pipe != pipe, + I915_STATE_WARN(active && master_crtc->pipe != pipe, "Encoder connected to wrong pipe %c\n", pipe_name(pipe)); @@ -8101,10 +6990,6 @@ verify_crtc_state(struct intel_crtc *crtc, if (!new_crtc_state->hw.active) return; - if (new_crtc_state->bigjoiner_slave) - /* No PLLs set for slave */ - pipe_config->shared_dpll = NULL; - intel_pipe_config_sanity_check(dev_priv, pipe_config); if (!intel_pipe_config_compare(new_crtc_state, @@ -8223,9 +7108,6 @@ verify_mpllb_state(struct intel_atomic_state *state, if (!new_crtc_state->hw.active) return; - if (new_crtc_state->bigjoiner_slave) - return; - encoder = intel_get_crtc_new_encoder(state, new_crtc_state); intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state); @@ -8607,28 +7489,13 @@ static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state) return 0; } -static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - - return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0; -} - -static bool pxp_is_borked(struct drm_i915_gem_object *obj) -{ - return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj); -} - static int intel_atomic_check_planes(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_plane_state *plane_state; struct intel_plane *plane; - struct intel_plane_state *new_plane_state; - struct intel_plane_state *old_plane_state; struct intel_crtc *crtc; - const struct drm_framebuffer *fb; int i, ret; ret = icl_add_linked_planes(state); @@ -8676,19 +7543,6 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state) return ret; } - for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - new_plane_state = intel_atomic_get_new_plane_state(state, plane); - old_plane_state = intel_atomic_get_old_plane_state(state, plane); - fb = new_plane_state->hw.fb; - if (fb) { - new_plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb)); - new_plane_state->force_black = pxp_is_borked(intel_fb_obj(fb)); - } else { - new_plane_state->decrypt = old_plane_state->decrypt; - new_plane_state->force_black = old_plane_state->force_black; - } - } - return 0; } @@ -8790,13 +7644,13 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, struct intel_crtc_state *new_crtc_state) { struct intel_crtc_state *slave_crtc_state, *master_crtc_state; - struct intel_crtc *slave, *master; + struct intel_crtc *slave_crtc, *master_crtc; /* slave being enabled, is master is still claiming this crtc? */ if (old_crtc_state->bigjoiner_slave) { - slave = crtc; - master = old_crtc_state->bigjoiner_linked_crtc; - master_crtc_state = intel_atomic_get_new_crtc_state(state, master); + slave_crtc = crtc; + master_crtc = old_crtc_state->bigjoiner_linked_crtc; + master_crtc_state = intel_atomic_get_new_crtc_state(state, master_crtc); if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state)) goto claimed; } @@ -8804,17 +7658,17 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, if (!new_crtc_state->bigjoiner) return 0; - slave = intel_dsc_get_bigjoiner_secondary(crtc); - if (!slave) { + slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc); + if (!slave_crtc) { DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires " "CRTC + 1 to be used, doesn't exist\n", crtc->base.base.id, crtc->base.name); return -EINVAL; } - new_crtc_state->bigjoiner_linked_crtc = slave; - slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave); - master = crtc; + new_crtc_state->bigjoiner_linked_crtc = slave_crtc; + slave_crtc_state = intel_atomic_get_crtc_state(&state->base, slave_crtc); + master_crtc = crtc; if (IS_ERR(slave_crtc_state)) return PTR_ERR(slave_crtc_state); @@ -8823,15 +7677,15 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, goto claimed; DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n", - slave->base.base.id, slave->base.name); + slave_crtc->base.base.id, slave_crtc->base.name); return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state); claimed: DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but " "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", - slave->base.base.id, slave->base.name, - master->base.base.id, master->base.name); + slave_crtc->base.base.id, slave_crtc->base.name, + master_crtc->base.base.id, master_crtc->base.name); return -EINVAL; } @@ -8865,35 +7719,37 @@ static void kill_bigjoiner_slave(struct intel_atomic_state *state, * correspond to the last vblank and have no relation to the actual time when * the flip done event was sent. */ -static int intel_atomic_check_async(struct intel_atomic_state *state) +static int intel_atomic_check_async(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct drm_i915_private *i915 = to_i915(state->base.dev); const struct intel_crtc_state *old_crtc_state, *new_crtc_state; const struct intel_plane_state *new_plane_state, *old_plane_state; - struct intel_crtc *crtc; struct intel_plane *plane; int i; - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - if (intel_crtc_needs_modeset(new_crtc_state)) { - drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); - return -EINVAL; - } + old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); + new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (!new_crtc_state->hw.active) { - drm_dbg_kms(&i915->drm, "CRTC inactive\n"); - return -EINVAL; - } - if (old_crtc_state->active_planes != new_crtc_state->active_planes) { - drm_dbg_kms(&i915->drm, - "Active planes cannot be changed during async flip\n"); - return -EINVAL; - } + if (intel_crtc_needs_modeset(new_crtc_state)) { + drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); + return -EINVAL; + } + + if (!new_crtc_state->hw.active) { + drm_dbg_kms(&i915->drm, "CRTC inactive\n"); + return -EINVAL; + } + if (old_crtc_state->active_planes != new_crtc_state->active_planes) { + drm_dbg_kms(&i915->drm, + "Active planes cannot be changed during async flip\n"); + return -EINVAL; } for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + if (plane->pipe != crtc->pipe) + continue; + /* * TODO: Async flip is only supported through the page flip IOCTL * as of now. So support currently added for primary plane only. @@ -8920,8 +7776,14 @@ static int intel_atomic_check_async(struct intel_atomic_state *state) return -EINVAL; } - if (old_plane_state->view.color_plane[0].stride != - new_plane_state->view.color_plane[0].stride) { + if (new_plane_state->hw.fb->format->num_planes > 1) { + drm_dbg_kms(&i915->drm, + "Planar formats not supported with async flips\n"); + return -EINVAL; + } + + if (old_plane_state->view.color_plane[0].mapping_stride != + new_plane_state->view.color_plane[0].mapping_stride) { drm_dbg_kms(&i915->drm, "Stride cannot be changed in async flip\n"); return -EINVAL; } @@ -9212,7 +8074,7 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (new_crtc_state->uapi.async_flip) { - ret = intel_atomic_check_async(state); + ret = intel_atomic_check_async(state, crtc); if (ret) goto fail; } @@ -9417,15 +8279,17 @@ static void intel_update_crtc(struct intel_atomic_state *state, intel_fbc_update(state, crtc); + intel_update_planes_on_crtc(state, crtc); + /* Perform vblank evasion around commit operation */ intel_pipe_update_start(new_crtc_state); commit_pipe_pre_planes(state, crtc); if (DISPLAY_VER(dev_priv) >= 9) - skl_update_planes_on_crtc(state, crtc); + skl_arm_planes_on_crtc(state, crtc); else - i9xx_update_planes_on_crtc(state, crtc); + i9xx_arm_planes_on_crtc(state, crtc); commit_pipe_post_planes(state, crtc); @@ -9449,23 +8313,6 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state, { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave); - - intel_encoders_pre_disable(state, crtc); - - intel_crtc_disable_planes(state, crtc); - - /* - * We still need special handling for disabling bigjoiner master - * and slaves since for slave we do not have encoder or plls - * so we dont need to disable those. - */ - if (old_crtc_state->bigjoiner) { - intel_crtc_disable_planes(state, - old_crtc_state->bigjoiner_linked_crtc); - old_crtc_state->bigjoiner_linked_crtc->active = false; - } - /* * We need to disable pipe CRC before disabling the pipe, * or we race against vblank off. @@ -9490,10 +8337,22 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) u32 handled = 0; int i; + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (!intel_crtc_needs_modeset(new_crtc_state)) + continue; + + if (!old_crtc_state->hw.active) + continue; + + intel_pre_plane_update(state, crtc); + intel_crtc_disable_planes(state, crtc); + } + /* Only disable port sync and MST slaves */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner) + if (!intel_crtc_needs_modeset(new_crtc_state)) continue; if (!old_crtc_state->hw.active) @@ -9505,10 +8364,10 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) * Slave vblanks are masked till Master Vblanks. */ if (!is_trans_port_sync_slave(old_crtc_state) && - !intel_dp_mst_is_slave_trans(old_crtc_state)) + !intel_dp_mst_is_slave_trans(old_crtc_state) && + !old_crtc_state->bigjoiner_slave) continue; - intel_pre_plane_update(state, crtc); intel_old_crtc_state_disables(state, old_crtc_state, new_crtc_state, crtc); handled |= BIT(crtc->pipe); @@ -9518,21 +8377,14 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (!intel_crtc_needs_modeset(new_crtc_state) || - (handled & BIT(crtc->pipe)) || - old_crtc_state->bigjoiner_slave) + (handled & BIT(crtc->pipe))) continue; - intel_pre_plane_update(state, crtc); - if (old_crtc_state->bigjoiner) { - struct intel_crtc *slave = - old_crtc_state->bigjoiner_linked_crtc; - - intel_pre_plane_update(state, slave); - } + if (!old_crtc_state->hw.active) + continue; - if (old_crtc_state->hw.active) - intel_old_crtc_state_disables(state, old_crtc_state, - new_crtc_state, crtc); + intel_old_crtc_state_disables(state, old_crtc_state, + new_crtc_state, crtc); } } @@ -9752,10 +8604,14 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s for_each_new_intel_plane_in_state(state, plane, plane_state, i) { struct drm_framebuffer *fb = plane_state->hw.fb; + int cc_plane; int ret; - if (!fb || - fb->modifier != I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) + if (!fb) + continue; + + cc_plane = intel_fb_rc_ccs_cc_plane(fb); + if (cc_plane < 0) continue; /* @@ -9772,7 +8628,7 @@ static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *s * GPU write on it. */ ret = i915_gem_object_read_from_page(intel_fb_obj(fb), - fb->offsets[2] + 16, + fb->offsets[cc_plane] + 16, &plane_state->ccval, sizeof(plane_state->ccval)); /* The above could only fail if the FB obj has an unexpected backing store type. */ @@ -9840,11 +8696,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) } } - if (state->modeset) - intel_encoders_update_prepare(state); + intel_encoders_update_prepare(state); intel_dbuf_pre_plane_update(state); - intel_psr_pre_plane_update(state); for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->uapi.async_flip) @@ -9854,11 +8708,12 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) /* Now enable the clocks, plane, pipe, and connectors that we set up. */ dev_priv->display->commit_modeset_enables(state); - if (state->modeset) { - intel_encoders_update_complete(state); + intel_encoders_update_complete(state); + if (state->modeset) intel_set_cdclk_post_plane_update(state); - } + + intel_wait_for_vblank_workers(state); /* FIXME: We should call drm_atomic_helper_commit_hw_done() here * already, but still need the state for the delayed optimization. To @@ -9874,13 +8729,6 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->uapi.async_flip) intel_crtc_disable_flip_done(state, crtc); - - if (new_crtc_state->hw.active && - !intel_crtc_needs_modeset(new_crtc_state) && - !new_crtc_state->preload_luts && - (new_crtc_state->uapi.color_mgmt_changed || - new_crtc_state->update_pipe)) - intel_color_load_luts(new_crtc_state); } /* @@ -9967,7 +8815,7 @@ static void intel_atomic_commit_work(struct work_struct *work) intel_atomic_commit_tail(state); } -static int __i915_sw_fence_call +static int intel_atomic_commit_ready(struct i915_sw_fence *fence, enum i915_sw_fence_notify notify) { @@ -10580,7 +9428,7 @@ intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, static const struct drm_mode_config_funcs intel_mode_funcs = { .fb_create = intel_user_framebuffer_create, - .get_format_info = intel_get_format_info, + .get_format_info = intel_fb_get_format_info, .output_poll_changed = intel_fbdev_output_poll_changed, .mode_valid = intel_mode_valid, .atomic_check = intel_atomic_check, @@ -10640,7 +9488,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) return; intel_init_cdclk_hooks(dev_priv); - intel_init_audio_hooks(dev_priv); + intel_audio_hooks_init(dev_priv); intel_dpll_init_clock_hook(dev_priv); @@ -11628,9 +10476,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_plane *plane; int min_cdclk = 0; - if (crtc_state->bigjoiner_slave) - continue; - if (crtc_state->hw.active) { /* * The initial mode needs to be set in order to keep @@ -11690,39 +10535,6 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) intel_bw_crtc_update(bw_state, crtc_state); intel_pipe_config_sanity_check(dev_priv, crtc_state); - - /* discard our incomplete slave state, copy it from master */ - if (crtc_state->bigjoiner && crtc_state->hw.active) { - struct intel_crtc *slave = crtc_state->bigjoiner_linked_crtc; - struct intel_crtc_state *slave_crtc_state = - to_intel_crtc_state(slave->base.state); - - copy_bigjoiner_crtc_state(slave_crtc_state, crtc_state); - slave->base.mode = crtc->base.mode; - - cdclk_state->min_cdclk[slave->pipe] = min_cdclk; - cdclk_state->min_voltage_level[slave->pipe] = - crtc_state->min_voltage_level; - - for_each_intel_plane_on_crtc(&dev_priv->drm, slave, plane) { - const struct intel_plane_state *plane_state = - to_intel_plane_state(plane->base.state); - - /* - * FIXME don't have the fb yet, so can't - * use intel_plane_data_rate() :( - */ - if (plane_state->uapi.visible) - crtc_state->data_rate[plane->id] = - 4 * crtc_state->pixel_rate; - else - crtc_state->data_rate[plane->id] = 0; - } - - intel_bw_crtc_update(bw_state, slave_crtc_state); - drm_calc_timestamping_constants(&slave->base, - &slave_crtc_state->hw.adjusted_mode); - } } } @@ -12027,7 +10839,7 @@ void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915) destroy_workqueue(i915->flip_wq); destroy_workqueue(i915->modeset_wq); - intel_fbc_cleanup_cfb(i915); + intel_fbc_cleanup(i915); } /* part #3: call after gem init */ diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 0c76bf57f86b..38c15ec30ee7 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -521,7 +521,6 @@ void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, bool constant_n, bool fec_enable); -void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); enum drm_mode_status @@ -542,9 +541,6 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv, const char *name, u32 reg, int ref_freq); int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, const char *name, u32 reg); -void lpt_pch_enable(const struct intel_crtc_state *crtc_state); -void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv); -void lpt_disable_iclkip(struct drm_i915_private *dev_priv); void intel_init_display_hooks(struct drm_i915_private *dev_priv); unsigned int intel_fb_xy_to_linear(int x, int y, const struct intel_plane_state *state, @@ -580,10 +576,6 @@ struct drm_framebuffer * intel_framebuffer_create(struct drm_i915_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd); -void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe); - -int lpt_get_iclkip(struct drm_i915_private *dev_priv); bool intel_fuzzy_clock_check(int clock1, int clock2); void intel_display_prepare_reset(struct drm_i915_private *dev_priv); @@ -592,8 +584,11 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n); +void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); +void i9xx_crtc_clock_get(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); - bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state); void hsw_enable_ips(const struct intel_crtc_state *crtc_state); void hsw_disable_ips(const struct intel_crtc_state *crtc_state); @@ -610,9 +605,6 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); bool intel_plane_uses_fence(const struct intel_plane_state *plane_state); -bool -intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, - u64 modifier); struct intel_encoder * intel_get_crtc_new_encoder(const struct intel_atomic_state *state, @@ -632,7 +624,6 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915); void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915); void intel_modeset_driver_remove_nogem(struct drm_i915_private *i915); void intel_display_resume(struct drm_device *dev); -void intel_init_pch_refclk(struct drm_i915_private *dev_priv); int intel_modeset_all_pipes(struct intel_atomic_state *state); /* modesetting asserts */ diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index e04767695530..acf70ae66a29 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -52,27 +52,12 @@ static int i915_fbc_status(struct seq_file *m, void *unused) wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&fbc->lock); - if (intel_fbc_is_active(dev_priv)) + if (intel_fbc_is_active(fbc)) { seq_puts(m, "FBC enabled\n"); - else + seq_printf(m, "Compressing: %s\n", + yesno(intel_fbc_is_compressing(fbc))); + } else { seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); - - if (intel_fbc_is_active(dev_priv)) { - u32 mask; - - if (DISPLAY_VER(dev_priv) >= 8) - mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK; - else if (DISPLAY_VER(dev_priv) >= 7) - mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK; - else if (DISPLAY_VER(dev_priv) >= 5) - mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK; - else if (IS_G4X(dev_priv)) - mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK; - else - mask = intel_de_read(dev_priv, FBC_STATUS) & - (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); - - seq_printf(m, "Compressing: %s\n", yesno(mask)); } mutex_unlock(&fbc->lock); @@ -85,9 +70,6 @@ static int i915_fbc_false_color_get(void *data, u64 *val) { struct drm_i915_private *dev_priv = data; - if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv)) - return -ENODEV; - *val = dev_priv->fbc.false_color; return 0; @@ -96,21 +78,8 @@ static int i915_fbc_false_color_get(void *data, u64 *val) static int i915_fbc_false_color_set(void *data, u64 val) { struct drm_i915_private *dev_priv = data; - u32 reg; - - if (DISPLAY_VER(dev_priv) < 7 || !HAS_FBC(dev_priv)) - return -ENODEV; - - mutex_lock(&dev_priv->fbc.lock); - - reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL); - dev_priv->fbc.false_color = val; - intel_de_write(dev_priv, ILK_DPFC_CONTROL, - val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR)); - - mutex_unlock(&dev_priv->fbc.lock); - return 0; + return intel_fbc_set_false_color(&dev_priv->fbc, val); } DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops, @@ -303,8 +272,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) }; val = intel_de_read(dev_priv, EDP_PSR2_STATUS(intel_dp->psr.transcoder)); - status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> - EDP_PSR2_STATUS_STATE_SHIFT; + status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val); if (status_val < ARRAY_SIZE(live_status)) status = live_status[status_val]; } else { @@ -503,28 +471,9 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, static int i915_power_domain_info(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct i915_power_domains *power_domains = &dev_priv->power_domains; - int i; - - mutex_lock(&power_domains->lock); - - seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); - for (i = 0; i < power_domains->power_well_count; i++) { - struct i915_power_well *power_well; - enum intel_display_power_domain power_domain; - - power_well = &power_domains->power_wells[i]; - seq_printf(m, "%-25s %d\n", power_well->desc->name, - power_well->count); - - for_each_power_domain(power_domain, power_well->desc->domains) - seq_printf(m, " %-23s %d\n", - intel_display_power_domain_str(power_domain), - power_domains->domain_use_count[power_domain]); - } + struct drm_i915_private *i915 = node_to_i915(m->private); - mutex_unlock(&power_domains->lock); + intel_display_power_debug(i915, m); return 0; } @@ -2095,7 +2044,7 @@ i915_fifo_underrun_reset_write(struct file *filp, return ret; } - ret = intel_fbc_reset_underrun(dev_priv); + ret = intel_fbc_reset_underrun(&dev_priv->fbc); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 1672604f9ef7..229b4c127c6c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -15,6 +15,7 @@ #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_hotplug.h" +#include "intel_pch_refclk.h" #include "intel_pcode.h" #include "intel_pm.h" #include "intel_pps.h" @@ -23,6 +24,98 @@ #include "intel_vga.h" #include "vlv_sideband.h" +struct i915_power_well_ops { + /* + * Synchronize the well's hw state to match the current sw state, for + * example enable/disable it based on the current refcount. Called + * during driver init and resume time, possibly after first calling + * the enable/disable handlers. + */ + void (*sync_hw)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* + * Enable the well and resources that depend on it (for example + * interrupts located on the well). Called after the 0->1 refcount + * transition. + */ + void (*enable)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* + * Disable the well and resources that depend on it. Called after + * the 1->0 refcount transition. + */ + void (*disable)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); + /* Returns the hw enabled state. */ + bool (*is_enabled)(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well); +}; + +struct i915_power_well_regs { + i915_reg_t bios; + i915_reg_t driver; + i915_reg_t kvmr; + i915_reg_t debug; +}; + +/* Power well structure for haswell */ +struct i915_power_well_desc { + const char *name; + bool always_on; + u64 domains; + /* unique identifier for this power well */ + enum i915_power_well_id id; + /* + * Arbitraty data associated with this power well. Platform and power + * well specific. + */ + union { + struct { + /* + * request/status flag index in the PUNIT power well + * control/status registers. + */ + u8 idx; + } vlv; + struct { + enum dpio_phy phy; + } bxt; + struct { + const struct i915_power_well_regs *regs; + /* + * request/status flag index in the power well + * constrol/status registers. + */ + u8 idx; + /* Mask of pipes whose IRQ logic is backed by the pw */ + u8 irq_pipe_mask; + /* + * Instead of waiting for the status bit to ack enables, + * just wait a specific amount of time and then consider + * the well enabled. + */ + u16 fixed_enable_delay; + /* The pw is backing the VGA functionality */ + bool has_vga:1; + bool has_fuses:1; + /* + * The pw is for an ICL+ TypeC PHY port in + * Thunderbolt mode. + */ + bool is_tc_tbt:1; + } hsw; + }; + const struct i915_power_well_ops *ops; +}; + +struct i915_power_well { + const struct i915_power_well_desc *desc; + /* power well enable/disable usage count */ + int count; + /* cached hw enabled state */ + bool hw_enabled; +}; + bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); @@ -154,8 +247,8 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "MODESET"; case POWER_DOMAIN_GT_IRQ: return "GT_IRQ"; - case POWER_DOMAIN_DPLL_DC_OFF: - return "DPLL_DC_OFF"; + case POWER_DOMAIN_DC_OFF: + return "DC_OFF"; case POWER_DOMAIN_TC_COLD_OFF: return "TC_COLD_OFF"; default: @@ -434,6 +527,11 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, pg = DISPLAY_VER(dev_priv) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : SKL_PW_CTL_IDX_TO_PG(pw_idx); + + /* Wa_16013190616:adlp */ + if (IS_ALDERLAKE_P(dev_priv) && pg == SKL_PG1) + intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1, 0, DISABLE_FLR_SRC); + /* * For PW1 we have to wait both for the PW0/PG0 fuse state * before enabling the power well and PW1/PG1's own fuse @@ -894,7 +992,7 @@ static u32 sanitize_target_dc_state(struct drm_i915_private *dev_priv, u32 target_dc_state) { - u32 states[] = { + static const u32 states[] = { DC_STATE_EN_UPTO_DC6, DC_STATE_EN_UPTO_DC5, DC_STATE_EN_DC3CO, @@ -2802,7 +2900,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, ICL_PW_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ - BIT_ULL(POWER_DOMAIN_DPLL_DC_OFF) | \ + BIT_ULL(POWER_DOMAIN_DC_OFF) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define ICL_DDI_IO_A_POWER_DOMAINS ( \ @@ -3105,6 +3203,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915, BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_PORT_DSI) | \ BIT_ULL(POWER_DOMAIN_INIT)) #define XELPD_AUX_IO_D_XELPD_POWER_DOMAINS BIT_ULL(POWER_DOMAIN_AUX_D_XELPD) @@ -6390,3 +6489,28 @@ void intel_display_power_resume(struct drm_i915_private *i915) hsw_disable_pc8(i915); } } + +void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m) +{ + struct i915_power_domains *power_domains = &i915->power_domains; + int i; + + mutex_lock(&power_domains->lock); + + seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count"); + for (i = 0; i < power_domains->power_well_count; i++) { + struct i915_power_well *power_well; + enum intel_display_power_domain power_domain; + + power_well = &power_domains->power_wells[i]; + seq_printf(m, "%-25s %d\n", power_well->desc->name, + power_well->count); + + for_each_power_domain(power_domain, power_well->desc->domains) + seq_printf(m, " %-23s %d\n", + intel_display_power_domain_str(power_domain), + power_domains->domain_use_count[power_domain]); + } + + mutex_unlock(&power_domains->lock); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 0612e4b6e3c8..686d18eaa24c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -6,11 +6,13 @@ #ifndef __INTEL_DISPLAY_POWER_H__ #define __INTEL_DISPLAY_POWER_H__ -#include "intel_display.h" #include "intel_runtime_pm.h" #include "i915_reg.h" +enum dpio_channel; +enum dpio_phy; struct drm_i915_private; +struct i915_power_well; struct intel_encoder; enum intel_display_power_domain { @@ -117,7 +119,7 @@ enum intel_display_power_domain { POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, - POWER_DOMAIN_DPLL_DC_OFF, + POWER_DOMAIN_DC_OFF, POWER_DOMAIN_TC_COLD_OFF, POWER_DOMAIN_INIT, @@ -155,100 +157,6 @@ enum i915_power_well_id { ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ (tran) + POWER_DOMAIN_TRANSCODER_A) -struct i915_power_well; - -struct i915_power_well_ops { - /* - * Synchronize the well's hw state to match the current sw state, for - * example enable/disable it based on the current refcount. Called - * during driver init and resume time, possibly after first calling - * the enable/disable handlers. - */ - void (*sync_hw)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* - * Enable the well and resources that depend on it (for example - * interrupts located on the well). Called after the 0->1 refcount - * transition. - */ - void (*enable)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* - * Disable the well and resources that depend on it. Called after - * the 1->0 refcount transition. - */ - void (*disable)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); - /* Returns the hw enabled state. */ - bool (*is_enabled)(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well); -}; - -struct i915_power_well_regs { - i915_reg_t bios; - i915_reg_t driver; - i915_reg_t kvmr; - i915_reg_t debug; -}; - -/* Power well structure for haswell */ -struct i915_power_well_desc { - const char *name; - bool always_on; - u64 domains; - /* unique identifier for this power well */ - enum i915_power_well_id id; - /* - * Arbitraty data associated with this power well. Platform and power - * well specific. - */ - union { - struct { - /* - * request/status flag index in the PUNIT power well - * control/status registers. - */ - u8 idx; - } vlv; - struct { - enum dpio_phy phy; - } bxt; - struct { - const struct i915_power_well_regs *regs; - /* - * request/status flag index in the power well - * constrol/status registers. - */ - u8 idx; - /* Mask of pipes whose IRQ logic is backed by the pw */ - u8 irq_pipe_mask; - /* - * Instead of waiting for the status bit to ack enables, - * just wait a specific amount of time and then consider - * the well enabled. - */ - u16 fixed_enable_delay; - /* The pw is backing the VGA functionality */ - bool has_vga:1; - bool has_fuses:1; - /* - * The pw is for an ICL+ TypeC PHY port in - * Thunderbolt mode. - */ - bool is_tc_tbt:1; - } hsw; - }; - const struct i915_power_well_ops *ops; -}; - -struct i915_power_well { - const struct i915_power_well_desc *desc; - /* power well enable/disable usage count */ - int count; - /* cached hw enabled state */ - bool hw_enabled; -}; - struct i915_power_domains { /* * Power wells needed for initialization at driver init and suspend @@ -391,6 +299,8 @@ intel_display_power_put_all_in_set(struct drm_i915_private *i915, intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask); } +void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m); + /* * FIXME: We should probably switch this to a 0-based scheme to be consistent * with how we now name/number DBUF_CTL instances. diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 39e11eaec1a3..b9c967837872 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -28,6 +28,7 @@ #include <linux/async.h> #include <linux/i2c.h> +#include <linux/pm_qos.h> #include <linux/pwm.h> #include <linux/sched/clock.h> @@ -41,6 +42,7 @@ #include <drm/drm_probe_helper.h> #include <drm/drm_rect.h> #include <drm/drm_vblank.h> +#include <drm/drm_vblank_work.h> #include <drm/i915_mei_hdcp_interface.h> #include <media/cec-notifier.h> @@ -49,6 +51,7 @@ struct drm_printer; struct __intel_global_objs_state; struct intel_ddi_buf_trans; +struct intel_fbc; /* * Display related stuff @@ -115,7 +118,8 @@ struct intel_fb_view { * bytes for 0/180 degree rotation * pixels for 90/270 degree rotation */ - unsigned int stride; + unsigned int mapping_stride; + unsigned int scanout_stride; } color_plane[4]; }; @@ -194,10 +198,6 @@ struct intel_encoder { void (*update_complete)(struct intel_atomic_state *, struct intel_encoder *, struct intel_crtc *); - void (*pre_disable)(struct intel_atomic_state *, - struct intel_encoder *, - const struct intel_crtc_state *, - const struct drm_connector_state *); void (*disable)(struct intel_atomic_state *, struct intel_encoder *, const struct intel_crtc_state *, @@ -949,7 +949,6 @@ struct intel_crtc_state { * accordingly. */ #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ -#define PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE (1<<1) /* bigjoiner slave, partial readout */ unsigned long quirks; unsigned fb_bits; /* framebuffers to flip */ @@ -1241,6 +1240,9 @@ struct intel_crtc_state { u8 link_count; u8 pixel_overlap; } splitter; + + /* for loading single buffered registers during vblank */ + struct drm_vblank_work vblank_work; }; enum intel_pipe_crc_source { @@ -1325,6 +1327,9 @@ struct intel_crtc { /* scalers available on this crtc */ int num_scalers; + /* for loading single buffered registers during vblank */ + struct pm_qos_request vblank_pm_qos; + #ifdef CONFIG_DEBUG_FS struct intel_pipe_crc pipe_crc; #endif @@ -1335,8 +1340,6 @@ struct intel_plane { enum i9xx_plane_id i9xx_plane; enum plane_id id; enum pipe pipe; - bool has_fbc; - bool has_ccs; bool need_async_flip_disable_wa; u32 frontbuffer_bit; @@ -1344,6 +1347,8 @@ struct intel_plane { u32 base, cntl, size; } cursor; + struct intel_fbc *fbc; + /* * NOTE: Do not place new plane state fields here (e.g., when adding * new plane properties). New runtime state should now be placed in @@ -1362,11 +1367,17 @@ struct intel_plane { unsigned int (*max_stride)(struct intel_plane *plane, u32 pixel_format, u64 modifier, unsigned int rotation); - void (*update_plane)(struct intel_plane *plane, + /* Write all non-self arming plane registers */ + void (*update_noarm)(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); - void (*disable_plane)(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state); + /* Write all self-arming plane registers */ + void (*update_arm)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + /* Disable the plane, must arm */ + void (*disable_arm)(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state); bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); @@ -1563,6 +1574,8 @@ struct intel_dp { int num_sink_rates; int sink_rates[DP_MAX_SUPPORTED_RATES]; bool use_rate_select; + /* Max sink lane count as reported by DP_MAX_LANE_COUNT */ + int max_sink_lane_count; /* intersection of source and sink rates */ int num_common_rates; int common_rates[DP_MAX_SUPPORTED_RATES]; @@ -1640,6 +1653,9 @@ struct intel_dp { struct intel_dp_pcon_frl frl; struct intel_psr psr; + + /* When we last wrote the OUI for eDP */ + unsigned long last_oui_write; }; enum lspcon_vendor { @@ -2041,20 +2057,4 @@ to_intel_frontbuffer(struct drm_framebuffer *fb) return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; } -static inline bool is_ccs_modifier(u64 modifier) -{ - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_CCS || - modifier == I915_FORMAT_MOD_Yf_TILED_CCS; -} - -static inline bool is_gen12_ccs_modifier(u64 modifier) -{ - return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC || - modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS; -} - #endif /* __INTEL_DISPLAY_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index c3c00ff03869..b20f3441ca60 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -20,6 +20,8 @@ enum { DMC_FW_MAIN = 0, DMC_FW_PIPEA, DMC_FW_PIPEB, + DMC_FW_PIPEC, + DMC_FW_PIPED, DMC_FW_MAX }; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 8195452b2d4c..5a8206298691 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -29,6 +29,7 @@ #include <linux/i2c.h> #include <linux/notifier.h> #include <linux/slab.h> +#include <linux/timekeeping.h> #include <linux/types.h> #include <asm/byteorder.h> @@ -127,7 +128,7 @@ static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp) } /* update sink rates from dpcd */ -static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) +static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp) { static const int dp_rates[] = { 162000, 270000, 540000, 810000 @@ -197,6 +198,54 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) intel_dp->num_sink_rates = i; } +static void intel_dp_set_sink_rates(struct intel_dp *intel_dp) +{ + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &intel_dig_port->base; + + intel_dp_set_dpcd_sink_rates(intel_dp); + + if (intel_dp->num_sink_rates) + return; + + drm_err(&dp_to_i915(intel_dp)->drm, + "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD with no link rates, using defaults\n", + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name); + + intel_dp_set_default_sink_rates(intel_dp); +} + +static void intel_dp_set_default_max_sink_lane_count(struct intel_dp *intel_dp) +{ + intel_dp->max_sink_lane_count = 1; +} + +static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp) +{ + struct intel_connector *connector = intel_dp->attached_connector; + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *encoder = &intel_dig_port->base; + + intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd); + + switch (intel_dp->max_sink_lane_count) { + case 1: + case 2: + case 4: + return; + } + + drm_err(&dp_to_i915(intel_dp)->drm, + "[CONNECTOR:%d:%s][ENCODER:%d:%s] Invalid DPCD max lane count (%d), using default\n", + connector->base.base.id, connector->base.name, + encoder->base.base.id, encoder->base.name, + intel_dp->max_sink_lane_count); + + intel_dp_set_default_max_sink_lane_count(intel_dp); +} + /* Get length of rates array potentially limited by max_rate. */ static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate) { @@ -219,10 +268,19 @@ static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp, intel_dp->num_common_rates, max_rate); } +static int intel_dp_common_rate(struct intel_dp *intel_dp, int index) +{ + if (drm_WARN_ON(&dp_to_i915(intel_dp)->drm, + index < 0 || index >= intel_dp->num_common_rates)) + return 162000; + + return intel_dp->common_rates[index]; +} + /* Theoretical max between source and sink */ static int intel_dp_max_common_rate(struct intel_dp *intel_dp) { - return intel_dp->common_rates[intel_dp->num_common_rates - 1]; + return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1); } /* Theoretical max between source and sink */ @@ -230,7 +288,7 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); int source_max = dig_port->max_lanes; - int sink_max = drm_dp_max_lane_count(intel_dp->dpcd); + int sink_max = intel_dp->max_sink_lane_count; int fia_max = intel_tc_port_fia_max_lane_count(dig_port); int lttpr_max = drm_dp_lttpr_max_lane_count(intel_dp->lttpr_common_caps); @@ -242,7 +300,15 @@ static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp) int intel_dp_max_lane_count(struct intel_dp *intel_dp) { - return intel_dp->max_link_lane_count; + switch (intel_dp->max_link_lane_count) { + case 1: + case 2: + case 4: + return intel_dp->max_link_lane_count; + default: + MISSING_CASE(intel_dp->max_link_lane_count); + return 1; + } } /* @@ -554,13 +620,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, if (index > 0) { if (intel_dp_is_edp(intel_dp) && !intel_dp_can_link_train_fallback_for_edp(intel_dp, - intel_dp->common_rates[index - 1], + intel_dp_common_rate(intel_dp, index - 1), lane_count)) { drm_dbg_kms(&i915->drm, "Retrying Link training for eDP with same parameters\n"); return 0; } - intel_dp->max_link_rate = intel_dp->common_rates[index - 1]; + intel_dp->max_link_rate = intel_dp_common_rate(intel_dp, index - 1); intel_dp->max_link_lane_count = lane_count; } else if (lane_count > 1) { if (intel_dp_is_edp(intel_dp) && @@ -1000,14 +1066,11 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp) int intel_dp_max_link_rate(struct intel_dp *intel_dp) { - struct drm_i915_private *i915 = dp_to_i915(intel_dp); int len; len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate); - if (drm_WARN_ON(&i915->drm, len <= 0)) - return 162000; - return intel_dp->common_rates[len - 1]; + return intel_dp_common_rate(intel_dp, len - 1); } int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) @@ -1204,7 +1267,7 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, output_bpp); for (i = 0; i < intel_dp->num_common_rates; i++) { - link_rate = intel_dp->common_rates[i]; + link_rate = intel_dp_common_rate(intel_dp, i); if (link_rate < limits->min_rate || link_rate > limits->max_rate) continue; @@ -1283,7 +1346,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, else vdsc_cfg->slice_height = 2; - ret = intel_dsc_compute_params(encoder, crtc_state); + ret = intel_dsc_compute_params(crtc_state); if (ret) return ret; @@ -1452,17 +1515,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, &pipe_config->hw.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct link_config_limits limits; - int common_len; int ret; - common_len = intel_dp_common_len_rate_limit(intel_dp, - intel_dp->max_link_rate); - - /* No common link rates between source and sink */ - drm_WARN_ON(encoder->base.dev, common_len <= 0); - - limits.min_rate = intel_dp->common_rates[0]; - limits.max_rate = intel_dp->common_rates[common_len - 1]; + limits.min_rate = intel_dp_common_rate(intel_dp, 0); + limits.max_rate = intel_dp_max_link_rate(intel_dp); limits.min_lane_count = 1; limits.max_lane_count = intel_dp_max_lane_count(intel_dp); @@ -1955,6 +2011,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) drm_err(&i915->drm, "Failed to write source OUI\n"); + + intel_dp->last_oui_write = jiffies; +} + +void intel_dp_wait_source_oui(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + drm_dbg_kms(&i915->drm, "Performing OUI wait\n"); + wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30); } /* If the device supports it, try to set the power state appropriately */ @@ -2143,6 +2209,18 @@ static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) return max_frl_rate; } +static bool +intel_dp_pcon_is_frl_trained(struct intel_dp *intel_dp, + u8 max_frl_bw_mask, u8 *frl_trained_mask) +{ + if (drm_dp_pcon_hdmi_link_active(&intel_dp->aux) && + drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, frl_trained_mask) == DP_PCON_HDMI_MODE_FRL && + *frl_trained_mask >= max_frl_bw_mask) + return true; + + return false; +} + static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) { #define TIMEOUT_FRL_READY_MS 500 @@ -2153,10 +2231,6 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) u8 max_frl_bw_mask = 0, frl_trained_mask; bool is_active; - ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); - if (ret < 0) - return ret; - max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); @@ -2168,6 +2242,12 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) if (max_frl_bw <= 0) return -EINVAL; + max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); + drm_dbg(&i915->drm, "MAX_FRL_BW_MASK = %u\n", max_frl_bw_mask); + + if (intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask)) + goto frl_trained; + ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); if (ret < 0) return ret; @@ -2177,7 +2257,6 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) if (!is_active) return -ETIMEDOUT; - max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, DP_PCON_ENABLE_SEQUENTIAL_LINK); if (ret < 0) @@ -2193,19 +2272,15 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) * Wait for FRL to be completed * Check if the HDMI Link is up and active. */ - wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); + wait_for(is_active = + intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), + TIMEOUT_HDMI_LINK_ACTIVE_MS); if (!is_active) return -ETIMEDOUT; - /* Verify HDMI Link configuration shows FRL Mode */ - if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != - DP_PCON_HDMI_MODE_FRL) { - drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); - return -EINVAL; - } - drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); - +frl_trained: + drm_dbg(&i915->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); intel_dp->frl.is_trained = true; drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); @@ -2223,6 +2298,28 @@ static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) return false; } +static +int intel_dp_pcon_set_tmds_mode(struct intel_dp *intel_dp) +{ + int ret; + u8 buf = 0; + + /* Set PCON source control mode */ + buf |= DP_PCON_ENABLE_SOURCE_CTL_MODE; + + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); + if (ret < 0) + return ret; + + /* Set HDMI LINK ENABLE */ + buf |= DP_PCON_ENABLE_HDMI_LINK; + ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); + if (ret < 0) + return ret; + + return 0; +} + void intel_dp_check_frl_training(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -2241,7 +2338,7 @@ void intel_dp_check_frl_training(struct intel_dp *intel_dp) int ret, mode; drm_dbg(&dev_priv->drm, "Couldn't set FRL mode, continuing with TMDS mode\n"); - ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); + ret = intel_dp_pcon_set_tmds_mode(intel_dp); mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) @@ -2603,6 +2700,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) intel_dp->use_rate_select = true; else intel_dp_set_sink_rates(intel_dp); + intel_dp_set_max_sink_lane_count(intel_dp); intel_dp_set_common_rates(intel_dp); intel_dp_reset_max_link_params(intel_dp); @@ -2648,6 +2746,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) drm_dp_is_branch(intel_dp->dpcd)); intel_dp_set_sink_rates(intel_dp); + intel_dp_set_max_sink_lane_count(intel_dp); intel_dp_set_common_rates(intel_dp); } @@ -5014,6 +5113,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, intel_dp_set_source_rates(intel_dp); intel_dp_set_default_sink_rates(intel_dp); + intel_dp_set_default_max_sink_lane_count(intel_dp); intel_dp_set_common_rates(intel_dp); intel_dp_reset_max_link_params(intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index ce229026dc91..b64145a3869a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -119,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_dp_phy_test(struct intel_encoder *encoder); +void intel_dp_wait_source_oui(struct intel_dp *intel_dp); + #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 8b9c925c4c16..62c112daacf2 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -36,6 +36,7 @@ #include "intel_backlight.h" #include "intel_display_types.h" +#include "intel_dp.h" #include "intel_dp_aux_backlight.h" /* TODO: @@ -106,6 +107,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector) int ret; u8 tcon_cap[4]; + intel_dp_wait_source_oui(intel_dp); + ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap)); if (ret != sizeof(tcon_cap)) return false; @@ -204,6 +207,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state, int ret; u8 old_ctrl, ctrl; + intel_dp_wait_source_oui(intel_dp); + ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl); if (ret != 1) { drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 85676c953e0a..e264467de8ed 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -301,7 +301,10 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp, static bool has_per_lane_signal_levels(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy) { - return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) || + DISPLAY_VER(i915) >= 11; } /* 128b/132b */ @@ -683,15 +686,6 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp, return true; } -static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_dp, - enum drm_dp_phy dp_phy) -{ - if (dp_phy == DP_PHY_DPRX) - drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd); - else - drm_dp_lttpr_link_train_clock_recovery_delay(); -} - static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state, const u8 old_link_status[DP_LINK_STATUS_SIZE], const u8 new_link_status[DP_LINK_STATUS_SIZE]) @@ -750,6 +744,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]; bool max_vswing_reached = false; char phy_name[10]; + int delay_us; + + delay_us = drm_dp_read_clock_recovery_delay(&intel_dp->aux, + intel_dp->dpcd, dp_phy, + intel_dp_is_uhbr(crtc_state)); intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); @@ -777,7 +776,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, voltage_tries = 1; for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) { - intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy); + usleep_range(delay_us, 2 * delay_us); if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { @@ -895,19 +894,6 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, return DP_TRAINING_PATTERN_2; } -static void -intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp, - enum drm_dp_phy dp_phy) -{ - if (dp_phy == DP_PHY_DPRX) { - drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd); - } else { - const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy); - - drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps); - } -} - /* * Perform the link training channel equalization phase on the given DP PHY * using one of training pattern 2, 3 or 4 depending on the source and @@ -925,6 +911,11 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]; bool channel_eq = false; char phy_name[10]; + int delay_us; + + delay_us = drm_dp_read_channel_eq_delay(&intel_dp->aux, + intel_dp->dpcd, dp_phy, + intel_dp_is_uhbr(crtc_state)); intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)); @@ -944,8 +935,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp, } for (tries = 0; tries < 5; tries++) { - intel_dp_link_training_channel_equalization_delay(intel_dp, - dp_phy); + usleep_range(delay_us, 2 * delay_us); + if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy, link_status) < 0) { drm_err(&i915->drm, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 89d701e8ae9d..b8bc7d397c81 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -231,6 +231,7 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct drm_connector_list_iter connector_list_iter; struct intel_connector *connector_iter; + int ret = 0; if (DISPLAY_VER(dev_priv) < 12) return 0; @@ -243,7 +244,6 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, struct intel_digital_connector_state *conn_iter_state; struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; - int ret; if (connector_iter->mst_port != connector->mst_port || connector_iter == connector) @@ -252,8 +252,8 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, conn_iter_state = intel_atomic_get_digital_connector_state(state, connector_iter); if (IS_ERR(conn_iter_state)) { - drm_connector_list_iter_end(&connector_list_iter); - return PTR_ERR(conn_iter_state); + ret = PTR_ERR(conn_iter_state); + break; } if (!conn_iter_state->base.crtc) @@ -262,20 +262,18 @@ intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector, crtc = to_intel_crtc(conn_iter_state->base.crtc); crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); if (IS_ERR(crtc_state)) { - drm_connector_list_iter_end(&connector_list_iter); - return PTR_ERR(crtc_state); + ret = PTR_ERR(crtc_state); + break; } ret = drm_atomic_add_affected_planes(&state->base, &crtc->base); - if (ret) { - drm_connector_list_iter_end(&connector_list_iter); - return ret; - } + if (ret) + break; crtc_state->uapi.mode_changed = true; } drm_connector_list_iter_end(&connector_list_iter); - return 0; + return ret; } static int @@ -348,16 +346,6 @@ static void wait_for_act_sent(struct intel_encoder *encoder, drm_dp_check_act_status(&intel_dp->mst_mgr); } -static void intel_mst_pre_disable_dp(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state) -{ - if (old_crtc_state->has_audio) - intel_audio_codec_disable(encoder, old_crtc_state, - old_conn_state); -} - static void intel_mst_disable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *old_crtc_state, @@ -382,6 +370,9 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, if (ret) { drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret); } + if (old_crtc_state->has_audio) + intel_audio_codec_disable(encoder, + old_crtc_state, old_conn_state); } static void intel_mst_post_disable_dp(struct intel_atomic_state *state, @@ -916,7 +907,6 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe intel_encoder->compute_config = intel_dp_mst_compute_config; intel_encoder->compute_config_late = intel_dp_mst_compute_config_late; - intel_encoder->pre_disable = intel_mst_pre_disable_dp; intel_encoder->disable = intel_mst_disable_dp; intel_encoder->post_disable = intel_mst_post_disable_dp; intel_encoder->update_pipe = intel_ddi_update_pipe; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 0a7e04db04be..fc8fda77483a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -26,6 +26,7 @@ #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" +#include "intel_pch_refclk.h" #include "intel_tc.h" /** @@ -3740,7 +3741,7 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv, * domain. */ pll->wakeref = intel_display_power_get(dev_priv, - POWER_DOMAIN_DPLL_DC_OFF); + POWER_DOMAIN_DC_OFF); } icl_pll_power_enable(dev_priv, pll, enable_reg); @@ -3847,7 +3848,7 @@ static void combo_pll_disable(struct drm_i915_private *dev_priv, if (IS_JSL_EHL(dev_priv) && pll->info->id == DPLL_ID_EHL_DPLL4) - intel_display_power_put(dev_priv, POWER_DOMAIN_DPLL_DC_OFF, + intel_display_power_put(dev_priv, POWER_DOMAIN_DC_OFF, pll->wakeref); } @@ -4231,7 +4232,7 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915, if (IS_JSL_EHL(i915) && pll->on && pll->info->id == DPLL_ID_EHL_DPLL4) { pll->wakeref = intel_display_power_get(i915, - POWER_DOMAIN_DPLL_DC_OFF); + POWER_DOMAIN_DC_OFF); } pll->state.pipe_mask = 0; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 2f59d863be4c..ef2889753807 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -27,7 +27,6 @@ #include <linux/types.h> -#include "intel_display.h" #include "intel_wakeref.h" /*FIXME: Move this to a more appropriate place. */ @@ -37,6 +36,7 @@ (void) (&__a == &__b); \ __a > __b ? (__a - __b) : (__b - __a); }) +enum tc_port; struct drm_device; struct drm_i915_private; struct intel_atomic_state; diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index 8f7b1f7534a4..963ca7155b06 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -167,6 +167,64 @@ void intel_dpt_unpin(struct i915_address_space *vm) i915_vma_put(dpt->vma); } +/** + * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume + * @i915: device instance + * + * Restore the memory mapping during system resume for all framebuffers which + * are mapped to HW via a GGTT->DPT page table. The content of these page + * tables are not stored in the hibernation image during S4 and S3RST->S4 + * transitions, so here we reprogram the PTE entries in those tables. + * + * This function must be called after the mappings in GGTT have been restored calling + * i915_ggtt_resume(). + */ +void intel_dpt_resume(struct drm_i915_private *i915) +{ + struct drm_framebuffer *drm_fb; + + if (!HAS_DISPLAY(i915)) + return; + + mutex_lock(&i915->drm.mode_config.fb_lock); + drm_for_each_fb(drm_fb, &i915->drm) { + struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); + + if (fb->dpt_vm) + i915_ggtt_resume_vm(fb->dpt_vm); + } + mutex_unlock(&i915->drm.mode_config.fb_lock); +} + +/** + * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend + * @i915: device instance + * + * Suspend the memory mapping during system suspend for all framebuffers which + * are mapped to HW via a GGTT->DPT page table. + * + * This function must be called before the mappings in GGTT are suspended calling + * i915_ggtt_suspend(). + */ +void intel_dpt_suspend(struct drm_i915_private *i915) +{ + struct drm_framebuffer *drm_fb; + + if (!HAS_DISPLAY(i915)) + return; + + mutex_lock(&i915->drm.mode_config.fb_lock); + + drm_for_each_fb(drm_fb, &i915->drm) { + struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb); + + if (fb->dpt_vm) + i915_ggtt_suspend_vm(fb->dpt_vm); + } + + mutex_unlock(&i915->drm.mode_config.fb_lock); +} + struct i915_address_space * intel_dpt_create(struct intel_framebuffer *fb) { diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h index 45142b8f849f..e18a9f767b11 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.h +++ b/drivers/gpu/drm/i915/display/intel_dpt.h @@ -6,6 +6,8 @@ #ifndef __INTEL_DPT_H__ #define __INTEL_DPT_H__ +struct drm_i915_private; + struct i915_address_space; struct i915_vma; struct intel_framebuffer; @@ -13,6 +15,8 @@ struct intel_framebuffer; void intel_dpt_destroy(struct i915_address_space *vm); struct i915_vma *intel_dpt_pin(struct i915_address_space *vm); void intel_dpt_unpin(struct i915_address_space *vm); +void intel_dpt_suspend(struct drm_i915_private *i915); +void intel_dpt_resume(struct drm_i915_private *i915); struct i915_address_space * intel_dpt_create(struct intel_framebuffer *fb); diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 62a8a69f9f5d..83a69a4a4fea 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -100,7 +100,7 @@ void intel_dsb_indexed_reg_write(const struct intel_crtc_state *crtc_state, u32 reg_val; if (!dsb) { - intel_de_write(dev_priv, reg, val); + intel_de_write_fw(dev_priv, reg, val); return; } buf = dsb->cmd_buf; @@ -177,7 +177,7 @@ void intel_dsb_reg_write(const struct intel_crtc_state *crtc_state, dsb = crtc_state->dsb; if (!dsb) { - intel_de_write(dev_priv, reg, val); + intel_de_write_fw(dev_priv, reg, val); return; } diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h index fbc40ffdc02e..a3a906cb097e 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.h +++ b/drivers/gpu/drm/i915/display/intel_dsi.h @@ -166,57 +166,15 @@ static inline u16 intel_dsi_encoder_ports(struct intel_encoder *encoder) return enc_to_intel_dsi(encoder)->ports; } -/* icl_dsi.c */ -void icl_dsi_init(struct drm_i915_private *dev_priv); -void icl_dsi_frame_update(struct intel_crtc_state *crtc_state); - -/* intel_dsi.c */ int intel_dsi_bitrate(const struct intel_dsi *intel_dsi); int intel_dsi_tlpx_ns(const struct intel_dsi *intel_dsi); enum drm_panel_orientation intel_dsi_get_panel_orientation(struct intel_connector *connector); - -/* vlv_dsi.c */ -void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); -enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); int intel_dsi_get_modes(struct drm_connector *connector); enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode); struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, const struct mipi_dsi_host_ops *funcs, enum port port); -void vlv_dsi_init(struct drm_i915_private *dev_priv); - -/* vlv_dsi_pll.c */ -int vlv_dsi_pll_compute(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void vlv_dsi_pll_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *config); -void vlv_dsi_pll_disable(struct intel_encoder *encoder); -u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); - -bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); -int bxt_dsi_pll_compute(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void bxt_dsi_pll_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *config); -void bxt_dsi_pll_disable(struct intel_encoder *encoder); -u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, - struct intel_crtc_state *config); -void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); - -void assert_dsi_pll_enabled(struct drm_i915_private *i915); -void assert_dsi_pll_disabled(struct drm_i915_private *i915); - -/* intel_dsi_vbt.c */ -bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); -void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); -void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); -void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, - enum mipi_seq seq_id); -void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); -void intel_dsi_log_params(struct intel_dsi *intel_dsi); #endif /* _INTEL_DSI_H */ diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index f61ed82e8867..7d234429e71e 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -71,6 +71,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 u8 data[2] = {}; enum port port; size_t len = panel->backlight.max > U8_MAX ? 2 : 1; + unsigned long mode_flags; if (len == 1) { data[0] = level; @@ -81,8 +82,11 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) { dsi_device = intel_dsi->dsi_hosts[port]->device; + mode_flags = dsi_device->mode_flags; + dsi_device->mode_flags &= ~MIPI_DSI_MODE_LPM; mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, &data, len); + dsi_device->mode_flags = mode_flags; } } diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index f241bedb8597..0da91849efde 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -41,6 +41,8 @@ #include "i915_drv.h" #include "intel_display_types.h" #include "intel_dsi.h" +#include "intel_dsi_vbt.h" +#include "vlv_dsi.h" #include "vlv_sideband.h" #define MIPI_TRANSFER_MODE_SHIFT 0 diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h new file mode 100644 index 000000000000..dc642c1fe7ef --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __INTEL_DSI_VBT_H__ +#define __INTEL_DSI_VBT_H__ + +#include <linux/types.h> + +enum mipi_seq; +struct intel_dsi; + +bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id); +void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on); +void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi); +void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, + enum mipi_seq seq_id); +void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec); +void intel_dsi_log_params(struct intel_dsi *intel_dsi); + +#endif /* __INTEL_DSI_VBT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index cb511b2b7069..c4a743d0913f 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -13,26 +13,465 @@ #define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a)) -bool is_ccs_plane(const struct drm_framebuffer *fb, int plane) +/* + * From the Sky Lake PRM: + * "The Color Control Surface (CCS) contains the compression status of + * the cache-line pairs. The compression state of the cache-line pair + * is specified by 2 bits in the CCS. Each CCS cache-line represents + * an area on the main surface of 16 x16 sets of 128 byte Y-tiled + * cache-line-pairs. CCS is always Y tiled." + * + * Since cache line pairs refers to horizontally adjacent cache lines, + * each cache line in the CCS corresponds to an area of 32x16 cache + * lines on the main surface. Since each pixel is 4 bytes, this gives + * us a ratio of one byte in the CCS for each 8x16 pixels in the + * main surface. + */ +static const struct drm_format_info skl_ccs_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, + .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, }, +}; + +/* + * Gen-12 compression uses 4 bits of CCS data for each cache line pair in the + * main surface. And each 64B CCS cache line represents an area of 4x1 Y-tiles + * in the main surface. With 4 byte pixels and each Y-tile having dimensions of + * 32x32 pixels, the ratio turns out to 1B in the CCS for every 2x32 pixels in + * the main surface. + */ +static const struct drm_format_info gen12_ccs_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_YUYV, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_YVYU, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_UYVY, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_VYUY, .num_planes = 2, + .char_per_block = { 2, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 2, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_XYUV8888, .num_planes = 2, + .char_per_block = { 4, 1 }, .block_w = { 1, 2 }, .block_h = { 1, 1 }, + .hsub = 1, .vsub = 1, .is_yuv = true }, + { .format = DRM_FORMAT_NV12, .num_planes = 4, + .char_per_block = { 1, 2, 1, 1 }, .block_w = { 1, 1, 4, 4 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P010, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P012, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, + { .format = DRM_FORMAT_P016, .num_planes = 4, + .char_per_block = { 2, 4, 1, 1 }, .block_w = { 1, 1, 2, 2 }, .block_h = { 1, 1, 1, 1 }, + .hsub = 2, .vsub = 2, .is_yuv = true }, +}; + +/* + * Same as gen12_ccs_formats[] above, but with additional surface used + * to pass Clear Color information in plane 2 with 64 bits of data. + */ +static const struct drm_format_info gen12_ccs_cc_formats[] = { + { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, }, + { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 3, + .char_per_block = { 4, 1, 0 }, .block_w = { 1, 2, 2 }, .block_h = { 1, 1, 1 }, + .hsub = 1, .vsub = 1, .has_alpha = true }, +}; + +struct intel_modifier_desc { + u64 modifier; + struct { + u8 from; + u8 until; + } display_ver; +#define DISPLAY_VER_ALL { 0, -1 } + + const struct drm_format_info *formats; + int format_count; +#define FORMAT_OVERRIDE(format_list) \ + .formats = format_list, \ + .format_count = ARRAY_SIZE(format_list) + + u8 plane_caps; + + struct { + u8 cc_planes:3; + u8 packed_aux_planes:4; + u8 planar_aux_planes:4; + } ccs; +}; + +#define INTEL_PLANE_CAP_CCS_MASK (INTEL_PLANE_CAP_CCS_RC | \ + INTEL_PLANE_CAP_CCS_RC_CC | \ + INTEL_PLANE_CAP_CCS_MC) +#define INTEL_PLANE_CAP_TILING_MASK (INTEL_PLANE_CAP_TILING_X | \ + INTEL_PLANE_CAP_TILING_Y | \ + INTEL_PLANE_CAP_TILING_Yf) +#define INTEL_PLANE_CAP_TILING_NONE 0 + +static const struct intel_modifier_desc intel_modifiers[] = { + { + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, + .display_ver = { 12, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_MC, + + .ccs.packed_aux_planes = BIT(1), + .ccs.planar_aux_planes = BIT(2) | BIT(3), + + FORMAT_OVERRIDE(gen12_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, + .display_ver = { 12, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC, + + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(gen12_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, + .display_ver = { 12, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC_CC, + + .ccs.cc_planes = BIT(2), + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(gen12_ccs_cc_formats), + }, { + .modifier = I915_FORMAT_MOD_Yf_TILED_CCS, + .display_ver = { 9, 11 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Yf | INTEL_PLANE_CAP_CCS_RC, + + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(skl_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Y_TILED_CCS, + .display_ver = { 9, 11 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y | INTEL_PLANE_CAP_CCS_RC, + + .ccs.packed_aux_planes = BIT(1), + + FORMAT_OVERRIDE(skl_ccs_formats), + }, { + .modifier = I915_FORMAT_MOD_Yf_TILED, + .display_ver = { 9, 11 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Yf, + }, { + .modifier = I915_FORMAT_MOD_Y_TILED, + .display_ver = { 9, 13 }, + .plane_caps = INTEL_PLANE_CAP_TILING_Y, + }, { + .modifier = I915_FORMAT_MOD_X_TILED, + .display_ver = DISPLAY_VER_ALL, + .plane_caps = INTEL_PLANE_CAP_TILING_X, + }, { + .modifier = DRM_FORMAT_MOD_LINEAR, + .display_ver = DISPLAY_VER_ALL, + }, +}; + +static const struct intel_modifier_desc *lookup_modifier_or_null(u64 modifier) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) + if (intel_modifiers[i].modifier == modifier) + return &intel_modifiers[i]; + + return NULL; +} + +static const struct intel_modifier_desc *lookup_modifier(u64 modifier) +{ + const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier); + + if (WARN_ON(!md)) + return &intel_modifiers[0]; + + return md; +} + +static const struct drm_format_info * +lookup_format_info(const struct drm_format_info formats[], + int num_formats, u32 format) +{ + int i; + + for (i = 0; i < num_formats; i++) { + if (formats[i].format == format) + return &formats[i]; + } + + return NULL; +} + +/** + * intel_fb_get_format_info: Get a modifier specific format information + * @cmd: FB add command structure + * + * Returns: + * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0], + * or %NULL if the modifier doesn't override the format. + */ +const struct drm_format_info * +intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd) +{ + const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]); + + if (!md || !md->formats) + return NULL; + + return lookup_format_info(md->formats, md->format_count, cmd->pixel_format); +} + +static bool plane_caps_contain_any(u8 caps, u8 mask) +{ + return caps & mask; +} + +static bool plane_caps_contain_all(u8 caps, u8 mask) +{ + return (caps & mask) == mask; +} + +/** + * intel_fb_is_ccs_modifier: Check if a modifier is a CCS modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a render, render with color clear or + * media compression modifier. + */ +bool intel_fb_is_ccs_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_CCS_MASK); +} + +/** + * intel_fb_is_rc_ccs_cc_modifier: Check if a modifier is an RC CCS CC modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a render with color clear modifier. + */ +bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_CCS_RC_CC); +} + +/** + * intel_fb_is_mc_ccs_modifier: Check if a modifier is an MC CCS modifier type + * @modifier: Modifier to check + * + * Returns: + * Returns %true if @modifier is a media compression modifier. + */ +bool intel_fb_is_mc_ccs_modifier(u64 modifier) +{ + return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps, + INTEL_PLANE_CAP_CCS_MC); +} + +static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md, + u8 display_ver_from, u8 display_ver_until) +{ + return md->display_ver.from <= display_ver_until && + display_ver_from <= md->display_ver.until; +} + +static bool plane_has_modifier(struct drm_i915_private *i915, + u8 plane_caps, + const struct intel_modifier_desc *md) +{ + if (!IS_DISPLAY_VER(i915, md->display_ver.from, md->display_ver.until)) + return false; + + if (!plane_caps_contain_all(plane_caps, md->plane_caps)) + return false; + + return true; +} + +/** + * intel_fb_plane_get_modifiers: Get the modifiers for the given platform and plane capabilities + * @i915: i915 device instance + * @plane_caps: capabilities for the plane the modifiers are queried for + * + * Returns: + * Returns the list of modifiers allowed by the @i915 platform and @plane_caps. + * The caller must free the returned buffer. + */ +u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915, + u8 plane_caps) +{ + u64 *list, *p; + int count = 1; /* +1 for invalid modifier terminator */ + int i; + + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) { + if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i])) + count++; + } + + list = kmalloc_array(count, sizeof(*list), GFP_KERNEL); + if (drm_WARN_ON(&i915->drm, !list)) + return NULL; + + p = list; + for (i = 0; i < ARRAY_SIZE(intel_modifiers); i++) { + if (plane_has_modifier(i915, plane_caps, &intel_modifiers[i])) + *p++ = intel_modifiers[i].modifier; + } + *p++ = DRM_FORMAT_MOD_INVALID; + + return list; +} + +/** + * intel_fb_plane_supports_modifier: Determine if a modifier is supported by the given plane + * @plane: Plane to check the modifier support for + * @modifier: The modifier to check the support for + * + * Returns: + * %true if the @modifier is supported on @plane. + */ +bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier) +{ + int i; + + for (i = 0; i < plane->base.modifier_count; i++) + if (plane->base.modifiers[i] == modifier) + return true; + + return false; +} + +static bool format_is_yuv_semiplanar(const struct intel_modifier_desc *md, + const struct drm_format_info *info) { - if (!is_ccs_modifier(fb->modifier)) + int yuv_planes; + + if (!info->is_yuv) return false; - return plane >= fb->format->num_planes / 2; + if (plane_caps_contain_any(md->plane_caps, INTEL_PLANE_CAP_CCS_MASK)) + yuv_planes = 4; + else + yuv_planes = 2; + + return info->num_planes == yuv_planes; +} + +/** + * intel_format_info_is_yuv_semiplanar: Check if the given format is YUV semiplanar + * @info: format to check + * @modifier: modifier used with the format + * + * Returns: + * %true if @info / @modifier is YUV semiplanar. + */ +bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, + u64 modifier) +{ + return format_is_yuv_semiplanar(lookup_modifier(modifier), info); +} + +static u8 ccs_aux_plane_mask(const struct intel_modifier_desc *md, + const struct drm_format_info *format) +{ + if (format_is_yuv_semiplanar(md, format)) + return md->ccs.planar_aux_planes; + else + return md->ccs.packed_aux_planes; } -bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane) +/** + * intel_fb_is_ccs_aux_plane: Check if a framebuffer color plane is a CCS AUX plane + * @fb: Framebuffer + * @color_plane: color plane index to check + * + * Returns: + * Returns %true if @fb's color plane at index @color_plane is a CCS AUX plane. + */ +bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane) { - return is_gen12_ccs_modifier(fb->modifier) && is_ccs_plane(fb, plane); + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); + + return ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); } -bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane) +/** + * intel_fb_is_gen12_ccs_aux_plane: Check if a framebuffer color plane is a GEN12 CCS AUX plane + * @fb: Framebuffer + * @color_plane: color plane index to check + * + * Returns: + * Returns %true if @fb's color plane at index @color_plane is a GEN12 CCS AUX plane. + */ +static bool intel_fb_is_gen12_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane) { - return fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC && - plane == 2; + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); + + return check_modifier_display_ver_range(md, 12, 13) && + ccs_aux_plane_mask(md, fb->format) & BIT(color_plane); } -bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) +/** + * intel_fb_rc_ccs_cc_plane: Get the CCS CC color plane index for a framebuffer + * @fb: Framebuffer + * + * Returns: + * Returns the index of the color clear plane for @fb, or -1 if @fb is not a + * framebuffer using a render compression/color clear modifier. + */ +int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb) +{ + const struct intel_modifier_desc *md = lookup_modifier(fb->modifier); + + if (!md->ccs.cc_planes) + return -1; + + drm_WARN_ON_ONCE(fb->dev, hweight8(md->ccs.cc_planes) > 1); + + return ilog2((int)md->ccs.cc_planes); +} + +static bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int color_plane) +{ + return intel_fb_rc_ccs_cc_plane(fb) == color_plane; +} + +static bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) { return intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && color_plane == 1; @@ -41,12 +480,13 @@ bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane) bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane) { return fb->modifier == DRM_FORMAT_MOD_LINEAR || - is_gen12_ccs_plane(fb, color_plane); + intel_fb_is_gen12_ccs_aux_plane(fb, color_plane) || + is_gen12_ccs_cc_plane(fb, color_plane); } int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) { - drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || + drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) || (main_plane && main_plane >= fb->format->num_planes / 2)); return fb->format->num_planes / 2 + main_plane; @@ -54,7 +494,7 @@ int main_to_ccs_plane(const struct drm_framebuffer *fb, int main_plane) int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) { - drm_WARN_ON(fb->dev, !is_ccs_modifier(fb->modifier) || + drm_WARN_ON(fb->dev, !intel_fb_is_ccs_modifier(fb->modifier) || ccs_plane < fb->format->num_planes / 2); if (is_gen12_ccs_cc_plane(fb, ccs_plane)) @@ -63,35 +503,12 @@ int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane) return ccs_plane - fb->format->num_planes / 2; } -static unsigned int gen12_aligned_scanout_stride(const struct intel_framebuffer *fb, - int color_plane) -{ - struct drm_i915_private *i915 = to_i915(fb->base.dev); - unsigned int stride = fb->base.pitches[color_plane]; - - if (IS_ALDERLAKE_P(i915)) - return roundup_pow_of_two(max(stride, - 8u * intel_tile_width_bytes(&fb->base, color_plane))); - - return stride; -} - static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_plane) { - struct drm_i915_private *i915 = to_i915(fb->base.dev); int main_plane = skl_ccs_to_main_plane(&fb->base, ccs_plane); unsigned int main_stride = fb->base.pitches[main_plane]; unsigned int main_tile_width = intel_tile_width_bytes(&fb->base, main_plane); - /* - * On ADL-P the AUX stride must align with a power-of-two aligned main - * surface stride. The stride of the allocated main surface object can - * be less than this POT stride, which is then autopadded to the POT - * size. - */ - if (IS_ALDERLAKE_P(i915)) - main_stride = gen12_aligned_scanout_stride(fb, main_plane); - return DIV_ROUND_UP(main_stride, 4 * main_tile_width) * 64; } @@ -99,7 +516,7 @@ int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane) { struct drm_i915_private *i915 = to_i915(fb->dev); - if (is_ccs_modifier(fb->modifier)) + if (intel_fb_is_ccs_modifier(fb->modifier)) return main_to_ccs_plane(fb, main_plane); else if (DISPLAY_VER(i915) < 11 && intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) @@ -128,13 +545,14 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) else return 512; case I915_FORMAT_MOD_Y_TILED_CCS: - if (is_ccs_plane(fb, color_plane)) + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) return 128; fallthrough; case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - if (is_ccs_plane(fb, color_plane)) + if (intel_fb_is_ccs_aux_plane(fb, color_plane) || + is_gen12_ccs_cc_plane(fb, color_plane)) return 64; fallthrough; case I915_FORMAT_MOD_Y_TILED: @@ -143,7 +561,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane) else return 512; case I915_FORMAT_MOD_Yf_TILED_CCS: - if (is_ccs_plane(fb, color_plane)) + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) return 128; fallthrough; case I915_FORMAT_MOD_Yf_TILED: @@ -199,7 +617,7 @@ static void intel_tile_block_dims(const struct drm_framebuffer *fb, int color_pl { intel_tile_dims(fb, color_plane, tile_width, tile_height); - if (is_gen12_ccs_plane(fb, color_plane)) + if (intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) *tile_height = 1; } @@ -223,16 +641,19 @@ intel_fb_align_height(const struct drm_framebuffer *fb, static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) { - switch (fb_modifier) { - case I915_FORMAT_MOD_X_TILED: - return I915_TILING_X; - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps & + INTEL_PLANE_CAP_TILING_MASK; + + switch (tiling_caps) { + case INTEL_PLANE_CAP_TILING_Y: return I915_TILING_Y; + case INTEL_PLANE_CAP_TILING_X: + return I915_TILING_X; + case INTEL_PLANE_CAP_TILING_Yf: + case INTEL_PLANE_CAP_TILING_NONE: + return I915_TILING_NONE; default: + MISSING_CASE(tiling_caps); return I915_TILING_NONE; } } @@ -271,7 +692,7 @@ unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, return 512 * 4096; /* AUX_DIST needs only 4K alignment */ - if (is_ccs_plane(fb, color_plane)) + if (intel_fb_is_ccs_aux_plane(fb, color_plane)) return 4096; if (is_semiplanar_uv_plane(fb, color_plane)) { @@ -330,7 +751,7 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub, * TODO: Deduct the subsampling from the char block for all CCS * formats and planes. */ - if (!is_gen12_ccs_plane(fb, color_plane)) { + if (!intel_fb_is_gen12_ccs_aux_plane(fb, color_plane)) { *hsub = fb->format->hsub; *vsub = fb->format->vsub; @@ -357,24 +778,13 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub, static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h) { - struct drm_i915_private *i915 = to_i915(fb->base.dev); - int main_plane = is_ccs_plane(&fb->base, color_plane) ? + int main_plane = intel_fb_is_ccs_aux_plane(&fb->base, color_plane) ? skl_ccs_to_main_plane(&fb->base, color_plane) : 0; unsigned int main_width = fb->base.width; unsigned int main_height = fb->base.height; int main_hsub, main_vsub; int hsub, vsub; - /* - * On ADL-P the CCS AUX surface layout always aligns with the - * power-of-two aligned main surface stride. The main surface - * stride in the allocated FB object may not be power-of-two - * sized, in which case it is auto-padded to the POT size. - */ - if (IS_ALDERLAKE_P(i915) && is_ccs_plane(&fb->base, color_plane)) - main_width = gen12_aligned_scanout_stride(fb, 0) / - fb->base.format->cpp[0]; - intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane); intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane); @@ -409,6 +819,20 @@ static u32 intel_adjust_tile_offset(int *x, int *y, return new_offset; } +static u32 intel_adjust_linear_offset(int *x, int *y, + unsigned int cpp, + unsigned int pitch, + u32 old_offset, + u32 new_offset) +{ + old_offset += *y * pitch + *x * cpp; + + *y = (old_offset - new_offset) / pitch; + *x = ((old_offset - new_offset) - *y * pitch) / cpp; + + return new_offset; +} + static u32 intel_adjust_aligned_offset(int *x, int *y, const struct drm_framebuffer *fb, int color_plane, @@ -439,10 +863,8 @@ static u32 intel_adjust_aligned_offset(int *x, int *y, tile_size, pitch_tiles, old_offset, new_offset); } else { - old_offset += *y * pitch + *x * cpp; - - *y = (old_offset - new_offset) / pitch; - *x = ((old_offset - new_offset) - *y * pitch) / cpp; + intel_adjust_linear_offset(x, y, cpp, pitch, + old_offset, new_offset); } return new_offset; @@ -459,7 +881,7 @@ u32 intel_plane_adjust_aligned_offset(int *x, int *y, { return intel_adjust_aligned_offset(x, y, state->hw.fb, color_plane, state->hw.rotation, - state->view.color_plane[color_plane].stride, + state->view.color_plane[color_plane].mapping_stride, old_offset, new_offset); } @@ -540,7 +962,7 @@ u32 intel_plane_compute_aligned_offset(int *x, int *y, struct drm_i915_private *i915 = to_i915(intel_plane->base.dev); const struct drm_framebuffer *fb = state->hw.fb; unsigned int rotation = state->hw.rotation; - int pitch = state->view.color_plane[color_plane].stride; + int pitch = state->view.color_plane[color_plane].mapping_stride; u32 alignment; if (intel_plane->id == PLANE_CURSOR) @@ -562,6 +984,7 @@ static int intel_fb_offset_to_xy(int *x, int *y, u32 alignment; if (DISPLAY_VER(i915) >= 12 && + !intel_fb_needs_pot_stride_remap(to_intel_framebuffer(fb)) && is_semiplanar_uv_plane(fb, color_plane)) alignment = intel_tile_row_size(fb, color_plane); else if (fb->modifier != DRM_FORMAT_MOD_LINEAR) @@ -610,7 +1033,7 @@ static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane int ccs_x, ccs_y; int main_x, main_y; - if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane)) + if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane)) return 0; /* @@ -673,7 +1096,7 @@ static bool intel_plane_can_remap(const struct intel_plane_state *plane_state) * The new CCS hash mode isn't compatible with remapping as * the virtual address of the pages affects the compressed data. */ - if (is_ccs_modifier(fb->modifier)) + if (intel_fb_is_ccs_modifier(fb->modifier)) return false; /* Linear needs a page aligned stride for remapping */ @@ -699,11 +1122,11 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb) static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation) { if (drm_rotation_90_or_270(rotation)) - return fb->rotated_view.color_plane[color_plane].stride; + return fb->rotated_view.color_plane[color_plane].mapping_stride; else if (intel_fb_needs_pot_stride_remap(fb)) - return fb->remapped_view.color_plane[color_plane].stride; + return fb->remapped_view.color_plane[color_plane].mapping_stride; else - return fb->normal_view.color_plane[color_plane].stride; + return fb->normal_view.color_plane[color_plane].mapping_stride; } static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state) @@ -814,18 +1237,32 @@ plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane, unsigned int pitch_tiles) { if (intel_fb_needs_pot_stride_remap(fb)) { - unsigned int min_stride = is_ccs_plane(&fb->base, color_plane) ? 2 : 8; /* * ADL_P, the only platform needing a POT stride has a minimum - * of 8 main surface and 2 CCS AUX stride tiles. + * of 8 main surface tiles. */ - return roundup_pow_of_two(max(pitch_tiles, min_stride)); + return roundup_pow_of_two(max(pitch_tiles, 8u)); } else { return pitch_tiles; } } static unsigned int +plane_view_scanout_stride(const struct intel_framebuffer *fb, int color_plane, + unsigned int tile_width, + unsigned int src_stride_tiles, unsigned int dst_stride_tiles) +{ + unsigned int stride_tiles; + + if (IS_ALDERLAKE_P(to_i915(fb->base.dev))) + stride_tiles = src_stride_tiles; + else + stride_tiles = dst_stride_tiles; + + return stride_tiles * tile_width * fb->base.format->cpp[color_plane]; +} + +static unsigned int plane_view_width_tiles(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, int x) @@ -841,11 +1278,31 @@ plane_view_height_tiles(const struct intel_framebuffer *fb, int color_plane, return DIV_ROUND_UP(y + dims->height, dims->tile_height); } +static unsigned int +plane_view_linear_tiles(const struct intel_framebuffer *fb, int color_plane, + const struct fb_plane_view_dims *dims, + int x, int y) +{ + struct drm_i915_private *i915 = to_i915(fb->base.dev); + unsigned int size; + + size = (y + dims->height) * fb->base.pitches[color_plane] + + x * fb->base.format->cpp[color_plane]; + + return DIV_ROUND_UP(size, intel_tile_size(i915)); +} + #define assign_chk_ovf(i915, var, val) ({ \ drm_WARN_ON(&(i915)->drm, overflows_type(val, var)); \ (var) = (val); \ }) +#define assign_bfld_chk_ovf(i915, var, val) ({ \ + (var) = (val); \ + drm_WARN_ON(&(i915)->drm, (var) != (val)); \ + (var); \ +}) + static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, u32 obj_offset, u32 gtt_offset, int x, int y, @@ -860,12 +1317,26 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p struct drm_rect r; u32 size = 0; - assign_chk_ovf(i915, remap_info->offset, obj_offset); - assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims)); - assign_chk_ovf(i915, remap_info->width, plane_view_width_tiles(fb, color_plane, dims, x)); - assign_chk_ovf(i915, remap_info->height, plane_view_height_tiles(fb, color_plane, dims, y)); + assign_bfld_chk_ovf(i915, remap_info->offset, obj_offset); + + if (intel_fb_is_gen12_ccs_aux_plane(&fb->base, color_plane)) { + remap_info->linear = 1; + + assign_chk_ovf(i915, remap_info->size, + plane_view_linear_tiles(fb, color_plane, dims, x, y)); + } else { + remap_info->linear = 0; + + assign_chk_ovf(i915, remap_info->src_stride, + plane_view_src_stride_tiles(fb, color_plane, dims)); + assign_chk_ovf(i915, remap_info->width, + plane_view_width_tiles(fb, color_plane, dims, x)); + assign_chk_ovf(i915, remap_info->height, + plane_view_height_tiles(fb, color_plane, dims, y)); + } if (view->gtt.type == I915_GGTT_VIEW_ROTATED) { + drm_WARN_ON(&i915->drm, remap_info->linear); check_array_bounds(i915, view->gtt.rotated.plane, color_plane); assign_chk_ovf(i915, remap_info->dst_stride, @@ -881,7 +1352,8 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p color_plane_info->x = r.x1; color_plane_info->y = r.y1; - color_plane_info->stride = remap_info->dst_stride * tile_height; + color_plane_info->mapping_stride = remap_info->dst_stride * tile_height; + color_plane_info->scanout_stride = color_plane_info->mapping_stride; size += remap_info->dst_stride * remap_info->width; @@ -900,16 +1372,29 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p gtt_offset = aligned_offset; } - assign_chk_ovf(i915, remap_info->dst_stride, - plane_view_dst_stride_tiles(fb, color_plane, remap_info->width)); - color_plane_info->x = x; color_plane_info->y = y; - color_plane_info->stride = remap_info->dst_stride * tile_width * - fb->base.format->cpp[color_plane]; + if (remap_info->linear) { + color_plane_info->mapping_stride = fb->base.pitches[color_plane]; + color_plane_info->scanout_stride = color_plane_info->mapping_stride; - size += remap_info->dst_stride * remap_info->height; + size += remap_info->size; + } else { + unsigned int dst_stride = plane_view_dst_stride_tiles(fb, color_plane, + remap_info->width); + + assign_chk_ovf(i915, remap_info->dst_stride, dst_stride); + color_plane_info->mapping_stride = dst_stride * + tile_width * + fb->base.format->cpp[color_plane]; + color_plane_info->scanout_stride = + plane_view_scanout_stride(fb, color_plane, tile_width, + remap_info->src_stride, + dst_stride); + + size += dst_stride * remap_info->height; + } } /* @@ -917,10 +1402,16 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p * the x/y offsets. x,y will hold the first pixel of the framebuffer * plane from the start of the remapped/rotated gtt mapping. */ - intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y, - tile_width, tile_height, - tile_size, remap_info->dst_stride, - gtt_offset * tile_size, 0); + if (remap_info->linear) + intel_adjust_linear_offset(&color_plane_info->x, &color_plane_info->y, + fb->base.format->cpp[color_plane], + color_plane_info->mapping_stride, + gtt_offset * tile_size, 0); + else + intel_adjust_tile_offset(&color_plane_info->x, &color_plane_info->y, + tile_width, tile_height, + tile_size, remap_info->dst_stride, + gtt_offset * tile_size, 0); return size; } @@ -933,15 +1424,10 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane, const struct fb_plane_view_dims *dims, int x, int y) { - struct drm_i915_private *i915 = to_i915(fb->base.dev); unsigned int tiles; if (is_surface_linear(&fb->base, color_plane)) { - unsigned int size; - - size = (y + dims->height) * fb->base.pitches[color_plane] + - x * fb->base.format->cpp[color_plane]; - tiles = DIV_ROUND_UP(size, intel_tile_size(i915)); + tiles = plane_view_linear_tiles(fb, color_plane, dims, x, y); } else { tiles = plane_view_src_stride_tiles(fb, color_plane, dims) * plane_view_height_tiles(fb, color_plane, dims, y); @@ -1030,7 +1516,9 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer * */ fb->normal_view.color_plane[i].x = x; fb->normal_view.color_plane[i].y = y; - fb->normal_view.color_plane[i].stride = fb->base.pitches[i]; + fb->normal_view.color_plane[i].mapping_stride = fb->base.pitches[i]; + fb->normal_view.color_plane[i].scanout_stride = + fb->normal_view.color_plane[i].mapping_stride; offset = calc_plane_aligned_offset(fb, i, &x, &y); @@ -1080,7 +1568,7 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state) src_w = drm_rect_width(&plane_state->uapi.src) >> 16; src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - drm_WARN_ON(&i915->drm, is_ccs_modifier(fb->modifier)); + drm_WARN_ON(&i915->drm, intel_fb_is_ccs_modifier(fb->modifier)); /* Make src coordinates relative to the viewport */ drm_rect_translate(&plane_state->uapi.src, @@ -1143,7 +1631,7 @@ u32 intel_fb_max_stride(struct drm_i915_private *dev_priv, * * The new CCS hash mode makes remapping impossible */ - if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) || + if (DISPLAY_VER(dev_priv) < 4 || intel_fb_is_ccs_modifier(modifier) || intel_modifier_uses_dpt(dev_priv, modifier)) return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier); else if (DISPLAY_VER(dev_priv) >= 7) @@ -1168,27 +1656,19 @@ intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane) * we need the stride to be page aligned. */ if (fb->pitches[color_plane] > max_stride && - !is_ccs_modifier(fb->modifier)) + !intel_fb_is_ccs_modifier(fb->modifier)) return intel_tile_size(dev_priv); else return 64; } tile_width = intel_tile_width_bytes(fb, color_plane); - if (is_ccs_modifier(fb->modifier)) { - /* - * On ADL-P the stride must be either 8 tiles or a stride - * that is aligned to 16 tiles, required by the 16 tiles = - * 64 kbyte CCS AUX PTE granularity, allowing CCS FBs to be - * remapped. - */ - if (IS_ALDERLAKE_P(dev_priv)) - tile_width *= fb->pitches[0] <= tile_width * 8 ? 8 : 16; + if (intel_fb_is_ccs_modifier(fb->modifier)) { /* * On TGL the surface stride must be 4 tile aligned, mapped by * one 64 byte cacheline on the CCS AUX surface. */ - else if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(dev_priv) >= 12) tile_width *= 4; /* * Display WA #0531: skl,bxt,kbl,glk @@ -1224,7 +1704,7 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state) return 0; /* FIXME other color planes? */ - stride = plane_state->view.color_plane[0].stride; + stride = plane_state->view.color_plane[0].mapping_stride; max_stride = plane->max_stride(plane, fb->format->format, fb->modifier, rotation); @@ -1430,7 +1910,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, goto err; } - if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) { + if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) { int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i); if (fb->pitches[i] != ccs_aux_stride) { diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index 1cbdd84502bd..b54997175d6d 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -6,6 +6,7 @@ #ifndef __INTEL_FB_H__ #define __INTEL_FB_H__ +#include <linux/bits.h> #include <linux/types.h> struct drm_device; @@ -16,12 +17,34 @@ struct drm_i915_private; struct drm_mode_fb_cmd2; struct intel_fb_view; struct intel_framebuffer; +struct intel_plane; struct intel_plane_state; -bool is_ccs_plane(const struct drm_framebuffer *fb, int plane); -bool is_gen12_ccs_plane(const struct drm_framebuffer *fb, int plane); -bool is_gen12_ccs_cc_plane(const struct drm_framebuffer *fb, int plane); -bool is_semiplanar_uv_plane(const struct drm_framebuffer *fb, int color_plane); +#define INTEL_PLANE_CAP_NONE 0 +#define INTEL_PLANE_CAP_CCS_RC BIT(0) +#define INTEL_PLANE_CAP_CCS_RC_CC BIT(1) +#define INTEL_PLANE_CAP_CCS_MC BIT(2) +#define INTEL_PLANE_CAP_TILING_X BIT(3) +#define INTEL_PLANE_CAP_TILING_Y BIT(4) +#define INTEL_PLANE_CAP_TILING_Yf BIT(5) + +bool intel_fb_is_ccs_modifier(u64 modifier); +bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier); +bool intel_fb_is_mc_ccs_modifier(u64 modifier); + +bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane); +int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb); + +u64 *intel_fb_plane_get_modifiers(struct drm_i915_private *i915, + u8 plane_caps); +bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier); + +const struct drm_format_info * +intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd); + +bool +intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, + u64 modifier); bool is_surface_linear(const struct drm_framebuffer *fb, int color_plane); diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 3f77f3013584..3b20f69e0240 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -142,13 +142,11 @@ retry: if (ret) goto err; - if (!ret) { - vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment, - view, pinctl); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto err_unpin; - } + vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment, + view, pinctl); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unpin; } if (uses_fence && i915_vma_is_map_and_fenceable(vma)) { diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 1f66de77a6b1..614e8697c068 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -48,6 +48,16 @@ #include "intel_fbc.h" #include "intel_frontbuffer.h" +struct intel_fbc_funcs { + void (*activate)(struct intel_fbc *fbc); + void (*deactivate)(struct intel_fbc *fbc); + bool (*is_active)(struct intel_fbc *fbc); + bool (*is_compressing)(struct intel_fbc *fbc); + void (*nuke)(struct intel_fbc *fbc); + void (*program_cfb)(struct intel_fbc *fbc); + void (*set_false_color)(struct intel_fbc *fbc, bool enable); +}; + /* * For SKL+, the plane source size used by the hardware is based on the value we * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value @@ -68,7 +78,7 @@ static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int stride; - stride = plane_state->view.color_plane[0].stride; + stride = plane_state->view.color_plane[0].mapping_stride; if (!drm_rotation_90_or_270(plane_state->hw.rotation)) stride /= fb->format->cpp[0]; @@ -84,9 +94,10 @@ static unsigned int _intel_fbc_cfb_stride(const struct intel_fbc_state_cache *ca } /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int skl_fbc_min_cfb_stride(struct drm_i915_private *i915, +static unsigned int skl_fbc_min_cfb_stride(struct intel_fbc *fbc, const struct intel_fbc_state_cache *cache) { + struct drm_i915_private *i915 = fbc->i915; unsigned int limit = 4; /* 1:4 compression limit is the worst case */ unsigned int cpp = 4; /* FBC always 4 bytes per pixel */ unsigned int height = 4; /* FBC segment is 4 lines */ @@ -113,9 +124,10 @@ static unsigned int skl_fbc_min_cfb_stride(struct drm_i915_private *i915, } /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */ -static unsigned int intel_fbc_cfb_stride(struct drm_i915_private *i915, +static unsigned int intel_fbc_cfb_stride(struct intel_fbc *fbc, const struct intel_fbc_state_cache *cache) { + struct drm_i915_private *i915 = fbc->i915; unsigned int stride = _intel_fbc_cfb_stride(cache); /* @@ -124,98 +136,182 @@ static unsigned int intel_fbc_cfb_stride(struct drm_i915_private *i915, * that regardless of the compression limit we choose later. */ if (DISPLAY_VER(i915) >= 9) - return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(i915, cache)); + return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(fbc, cache)); else return stride; } -static unsigned int intel_fbc_cfb_size(struct drm_i915_private *dev_priv, +static unsigned int intel_fbc_cfb_size(struct intel_fbc *fbc, const struct intel_fbc_state_cache *cache) { + struct drm_i915_private *i915 = fbc->i915; int lines = cache->plane.src_h; - if (DISPLAY_VER(dev_priv) == 7) + if (DISPLAY_VER(i915) == 7) lines = min(lines, 2048); - else if (DISPLAY_VER(dev_priv) >= 8) + else if (DISPLAY_VER(i915) >= 8) lines = min(lines, 2560); - return lines * intel_fbc_cfb_stride(dev_priv, cache); + return lines * intel_fbc_cfb_stride(fbc, cache); } -static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv) +static u32 i8xx_fbc_ctl(struct intel_fbc *fbc) { + const struct intel_fbc_reg_params *params = &fbc->params; + struct drm_i915_private *i915 = fbc->i915; + unsigned int cfb_stride; + u32 fbc_ctl; + + cfb_stride = params->cfb_stride / fbc->limit; + + /* FBC_CTL wants 32B or 64B units */ + if (DISPLAY_VER(i915) == 2) + cfb_stride = (cfb_stride / 32) - 1; + else + cfb_stride = (cfb_stride / 64) - 1; + + fbc_ctl = FBC_CTL_PERIODIC | + FBC_CTL_INTERVAL(params->interval) | + FBC_CTL_STRIDE(cfb_stride); + + if (IS_I945GM(i915)) + fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ + + if (params->fence_id >= 0) + fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); + + return fbc_ctl; +} + +static u32 i965_fbc_ctl2(struct intel_fbc *fbc) +{ + const struct intel_fbc_reg_params *params = &fbc->params; + u32 fbc_ctl2; + + fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | + FBC_CTL_PLANE(params->crtc.i9xx_plane); + + if (params->fence_id >= 0) + fbc_ctl2 |= FBC_CTL_CPU_FENCE_EN; + + return fbc_ctl2; +} + +static void i8xx_fbc_deactivate(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; u32 fbc_ctl; /* Disable compression */ - fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL); + fbc_ctl = intel_de_read(i915, FBC_CONTROL); if ((fbc_ctl & FBC_CTL_EN) == 0) return; fbc_ctl &= ~FBC_CTL_EN; - intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + intel_de_write(i915, FBC_CONTROL, fbc_ctl); /* Wait for compressing bit to clear */ - if (intel_de_wait_for_clear(dev_priv, FBC_STATUS, + if (intel_de_wait_for_clear(i915, FBC_STATUS, FBC_STAT_COMPRESSING, 10)) { - drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n"); + drm_dbg_kms(&i915->drm, "FBC idle timed out\n"); return; } } -static void i8xx_fbc_activate(struct drm_i915_private *dev_priv) +static void i8xx_fbc_activate(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; const struct intel_fbc_reg_params *params = &fbc->params; - int cfb_pitch; + struct drm_i915_private *i915 = fbc->i915; int i; - u32 fbc_ctl; - - cfb_pitch = params->cfb_stride / fbc->limit; - - /* FBC_CTL wants 32B or 64B units */ - if (DISPLAY_VER(dev_priv) == 2) - cfb_pitch = (cfb_pitch / 32) - 1; - else - cfb_pitch = (cfb_pitch / 64) - 1; /* Clear old tags */ for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) - intel_de_write(dev_priv, FBC_TAG(i), 0); - - if (DISPLAY_VER(dev_priv) == 4) { - u32 fbc_ctl2; - - /* Set it up... */ - fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM; - fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane); - if (params->fence_id >= 0) - fbc_ctl2 |= FBC_CTL_CPU_FENCE; - intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2); - intel_de_write(dev_priv, FBC_FENCE_OFF, + intel_de_write(i915, FBC_TAG(i), 0); + + if (DISPLAY_VER(i915) == 4) { + intel_de_write(i915, FBC_CONTROL2, + i965_fbc_ctl2(fbc)); + intel_de_write(i915, FBC_FENCE_OFF, params->fence_y_offset); } - /* enable it... */ - fbc_ctl = FBC_CTL_INTERVAL(params->interval); - fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC; - if (IS_I945GM(dev_priv)) - fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ - fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff); - if (params->fence_id >= 0) - fbc_ctl |= FBC_CTL_FENCENO(params->fence_id); - intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl); + intel_de_write(i915, FBC_CONTROL, + FBC_CTL_EN | i8xx_fbc_ctl(fbc)); } -static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv) +static bool i8xx_fbc_is_active(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN; + return intel_de_read(fbc->i915, FBC_CONTROL) & FBC_CTL_EN; } -static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915) +static bool i8xx_fbc_is_compressing(struct intel_fbc *fbc) { - switch (i915->fbc.limit) { + return intel_de_read(fbc->i915, FBC_STATUS) & + (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED); +} + +static void i8xx_fbc_nuke(struct intel_fbc *fbc) +{ + struct intel_fbc_reg_params *params = &fbc->params; + enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + struct drm_i915_private *dev_priv = fbc->i915; + + spin_lock_irq(&dev_priv->uncore.lock); + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), + intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); + spin_unlock_irq(&dev_priv->uncore.lock); +} + +static void i8xx_fbc_program_cfb(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; + + GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start, + fbc->compressed_fb.start, U32_MAX)); + GEM_BUG_ON(range_overflows_end_t(u64, i915->dsm.start, + fbc->compressed_llb.start, U32_MAX)); + + intel_de_write(i915, FBC_CFB_BASE, + i915->dsm.start + fbc->compressed_fb.start); + intel_de_write(i915, FBC_LL_BASE, + i915->dsm.start + fbc->compressed_llb.start); +} + +static const struct intel_fbc_funcs i8xx_fbc_funcs = { + .activate = i8xx_fbc_activate, + .deactivate = i8xx_fbc_deactivate, + .is_active = i8xx_fbc_is_active, + .is_compressing = i8xx_fbc_is_compressing, + .nuke = i8xx_fbc_nuke, + .program_cfb = i8xx_fbc_program_cfb, +}; + +static void i965_fbc_nuke(struct intel_fbc *fbc) +{ + struct intel_fbc_reg_params *params = &fbc->params; + enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + struct drm_i915_private *dev_priv = fbc->i915; + + spin_lock_irq(&dev_priv->uncore.lock); + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), + intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); + spin_unlock_irq(&dev_priv->uncore.lock); +} + +static const struct intel_fbc_funcs i965_fbc_funcs = { + .activate = i8xx_fbc_activate, + .deactivate = i8xx_fbc_deactivate, + .is_active = i8xx_fbc_is_active, + .is_compressing = i8xx_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = i8xx_fbc_program_cfb, +}; + +static u32 g4x_dpfc_ctl_limit(struct intel_fbc *fbc) +{ + switch (fbc->limit) { default: - MISSING_CASE(i915->fbc.limit); + MISSING_CASE(fbc->limit); fallthrough; case 1: return DPFC_CTL_LIMIT_1X; @@ -226,260 +322,334 @@ static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915) } } -static void g4x_fbc_activate(struct drm_i915_private *dev_priv) +static u32 g4x_dpfc_ctl(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; + const struct intel_fbc_reg_params *params = &fbc->params; + struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; - dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN; + dpfc_ctl = g4x_dpfc_ctl_limit(fbc) | + DPFC_CTL_PLANE_G4X(params->crtc.i9xx_plane); - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); + if (IS_G4X(i915)) + dpfc_ctl |= DPFC_CTL_SR_EN; if (params->fence_id >= 0) { - dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id; - intel_de_write(dev_priv, DPFC_FENCE_YOFF, - params->fence_y_offset); - } else { - intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0); + dpfc_ctl |= DPFC_CTL_FENCE_EN_G4X; + + if (DISPLAY_VER(i915) < 6) + dpfc_ctl |= DPFC_CTL_FENCENO(params->fence_id); } - /* enable it... */ - intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + return dpfc_ctl; +} + +static void g4x_fbc_activate(struct intel_fbc *fbc) +{ + const struct intel_fbc_reg_params *params = &fbc->params; + struct drm_i915_private *i915 = fbc->i915; + + intel_de_write(i915, DPFC_FENCE_YOFF, + params->fence_y_offset); + + intel_de_write(i915, DPFC_CONTROL, + DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } -static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv) +static void g4x_fbc_deactivate(struct intel_fbc *fbc) { + struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; /* Disable compression */ - dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL); + dpfc_ctl = intel_de_read(i915, DPFC_CONTROL); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl); + intel_de_write(i915, DPFC_CONTROL, dpfc_ctl); } } -static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv) +static bool g4x_fbc_is_active(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN; + return intel_de_read(fbc->i915, DPFC_CONTROL) & DPFC_CTL_EN; } -static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv) +static bool g4x_fbc_is_compressing(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + return intel_de_read(fbc->i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK; +} - spin_lock_irq(&dev_priv->uncore.lock); - intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), - intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane))); - spin_unlock_irq(&dev_priv->uncore.lock); +static void g4x_fbc_program_cfb(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; + + intel_de_write(i915, DPFC_CB_BASE, fbc->compressed_fb.start); } -static void i965_fbc_recompress(struct drm_i915_private *dev_priv) +static const struct intel_fbc_funcs g4x_fbc_funcs = { + .activate = g4x_fbc_activate, + .deactivate = g4x_fbc_deactivate, + .is_active = g4x_fbc_is_active, + .is_compressing = g4x_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = g4x_fbc_program_cfb, +}; + +static void ilk_fbc_activate(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane; + struct intel_fbc_reg_params *params = &fbc->params; + struct drm_i915_private *i915 = fbc->i915; - spin_lock_irq(&dev_priv->uncore.lock); - intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), - intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane))); - spin_unlock_irq(&dev_priv->uncore.lock); + intel_de_write(i915, ILK_DPFC_FENCE_YOFF, + params->fence_y_offset); + + intel_de_write(i915, ILK_DPFC_CONTROL, + DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } -/* This function forces a CFB recompression through the nuke operation. */ -static void snb_fbc_recompress(struct drm_i915_private *dev_priv) +static void ilk_fbc_deactivate(struct intel_fbc *fbc) { - intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE); - intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE); + struct drm_i915_private *i915 = fbc->i915; + u32 dpfc_ctl; + + /* Disable compression */ + dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL); + if (dpfc_ctl & DPFC_CTL_EN) { + dpfc_ctl &= ~DPFC_CTL_EN; + intel_de_write(i915, ILK_DPFC_CONTROL, dpfc_ctl); + } } -static void intel_fbc_recompress(struct drm_i915_private *dev_priv) +static bool ilk_fbc_is_active(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; + return intel_de_read(fbc->i915, ILK_DPFC_CONTROL) & DPFC_CTL_EN; +} - trace_intel_fbc_nuke(fbc->crtc); +static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) +{ + return intel_de_read(fbc->i915, ILK_DPFC_STATUS) & DPFC_COMP_SEG_MASK; +} - if (DISPLAY_VER(dev_priv) >= 6) - snb_fbc_recompress(dev_priv); - else if (DISPLAY_VER(dev_priv) >= 4) - i965_fbc_recompress(dev_priv); - else - i8xx_fbc_recompress(dev_priv); +static void ilk_fbc_program_cfb(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; + + intel_de_write(i915, ILK_DPFC_CB_BASE, fbc->compressed_fb.start); } -static void ilk_fbc_activate(struct drm_i915_private *dev_priv) +static const struct intel_fbc_funcs ilk_fbc_funcs = { + .activate = ilk_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ilk_fbc_is_compressing, + .nuke = i965_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, +}; + +static void snb_fbc_program_fence(struct intel_fbc *fbc) { - struct intel_fbc_reg_params *params = &dev_priv->fbc.params; - u32 dpfc_ctl; + const struct intel_fbc_reg_params *params = &fbc->params; + struct drm_i915_private *i915 = fbc->i915; + u32 ctl = 0; - dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane); + if (params->fence_id >= 0) + ctl = SNB_DPFC_FENCE_EN | SNB_DPFC_FENCENO(params->fence_id); - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); + intel_de_write(i915, SNB_DPFC_CTL_SA, ctl); + intel_de_write(i915, SNB_DPFC_CPU_FENCE_OFFSET, params->fence_y_offset); +} - if (params->fence_id >= 0) { - dpfc_ctl |= DPFC_CTL_FENCE_EN; - if (IS_IRONLAKE(dev_priv)) - dpfc_ctl |= params->fence_id; - if (IS_SANDYBRIDGE(dev_priv)) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fence_id); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, - params->fence_y_offset); - } - } else { - if (IS_SANDYBRIDGE(dev_priv)) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); - } - } +static void snb_fbc_activate(struct intel_fbc *fbc) +{ + snb_fbc_program_fence(fbc); - intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF, - params->fence_y_offset); - /* enable it... */ - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); + ilk_fbc_activate(fbc); } -static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv) +static void snb_fbc_nuke(struct intel_fbc *fbc) { - u32 dpfc_ctl; + struct drm_i915_private *i915 = fbc->i915; - /* Disable compression */ - dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL); - if (dpfc_ctl & DPFC_CTL_EN) { - dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl); - } + intel_de_write(i915, MSG_FBC_REND_STATE, FBC_REND_NUKE); + intel_de_posting_read(i915, MSG_FBC_REND_STATE); +} + +static const struct intel_fbc_funcs snb_fbc_funcs = { + .activate = snb_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ilk_fbc_is_compressing, + .nuke = snb_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, +}; + +static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) +{ + const struct intel_fbc_reg_params *params = &fbc->params; + struct drm_i915_private *i915 = fbc->i915; + u32 val = 0; + + if (params->override_cfb_stride) + val |= FBC_STRIDE_OVERRIDE | + FBC_STRIDE(params->override_cfb_stride / fbc->limit); + + intel_de_write(i915, GLK_FBC_STRIDE, val); } -static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv) +static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) { - return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN; + const struct intel_fbc_reg_params *params = &fbc->params; + struct drm_i915_private *i915 = fbc->i915; + u32 val = 0; + + /* Display WA #0529: skl, kbl, bxt. */ + if (params->override_cfb_stride) + val |= CHICKEN_FBC_STRIDE_OVERRIDE | + CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit); + + intel_de_rmw(i915, CHICKEN_MISC_4, + CHICKEN_FBC_STRIDE_OVERRIDE | + CHICKEN_FBC_STRIDE_MASK, val); } -static void gen7_fbc_activate(struct drm_i915_private *dev_priv) +static u32 ivb_dpfc_ctl(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; const struct intel_fbc_reg_params *params = &fbc->params; + struct drm_i915_private *i915 = fbc->i915; u32 dpfc_ctl; - if (DISPLAY_VER(dev_priv) >= 10) { - u32 val = 0; + dpfc_ctl = g4x_dpfc_ctl_limit(fbc); - if (params->override_cfb_stride) - val |= FBC_STRIDE_OVERRIDE | - FBC_STRIDE(params->override_cfb_stride / fbc->limit); + if (IS_IVYBRIDGE(i915)) + dpfc_ctl |= DPFC_CTL_PLANE_IVB(params->crtc.i9xx_plane); - intel_de_write(dev_priv, GLK_FBC_STRIDE, val); - } else if (DISPLAY_VER(dev_priv) == 9) { - u32 val = 0; + if (params->fence_id >= 0) + dpfc_ctl |= DPFC_CTL_FENCE_EN_IVB; - /* Display WA #0529: skl, kbl, bxt. */ - if (params->override_cfb_stride) - val |= CHICKEN_FBC_STRIDE_OVERRIDE | - CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit); + if (fbc->false_color) + dpfc_ctl |= DPFC_CTL_FALSE_COLOR; - intel_de_rmw(dev_priv, CHICKEN_MISC_4, - CHICKEN_FBC_STRIDE_OVERRIDE | - CHICKEN_FBC_STRIDE_MASK, val); - } + return dpfc_ctl; +} - dpfc_ctl = 0; - if (IS_IVYBRIDGE(dev_priv)) - dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane); +static void ivb_fbc_activate(struct intel_fbc *fbc) +{ + struct drm_i915_private *i915 = fbc->i915; - dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv); + if (DISPLAY_VER(i915) >= 10) + glk_fbc_program_cfb_stride(fbc); + else if (DISPLAY_VER(i915) == 9) + skl_fbc_program_cfb_stride(fbc); - if (params->fence_id >= 0) { - dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, - SNB_CPU_FENCE_ENABLE | params->fence_id); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, - params->fence_y_offset); - } else if (dev_priv->ggtt.num_fences) { - intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0); - intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0); - } + if (i915->ggtt.num_fences) + snb_fbc_program_fence(fbc); - if (dev_priv->fbc.false_color) - dpfc_ctl |= FBC_CTL_FALSE_COLOR; + intel_de_write(i915, ILK_DPFC_CONTROL, + DPFC_CTL_EN | ivb_dpfc_ctl(fbc)); +} - intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); +static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) +{ + return intel_de_read(fbc->i915, ILK_DPFC_STATUS2) & DPFC_COMP_SEG_MASK_IVB; } -static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv) +static void ivb_fbc_set_false_color(struct intel_fbc *fbc, + bool enable) { - if (DISPLAY_VER(dev_priv) >= 5) - return ilk_fbc_is_active(dev_priv); - else if (IS_GM45(dev_priv)) - return g4x_fbc_is_active(dev_priv); - else - return i8xx_fbc_is_active(dev_priv); + intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL, + DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); } -static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv) +static const struct intel_fbc_funcs ivb_fbc_funcs = { + .activate = ivb_fbc_activate, + .deactivate = ilk_fbc_deactivate, + .is_active = ilk_fbc_is_active, + .is_compressing = ivb_fbc_is_compressing, + .nuke = snb_fbc_nuke, + .program_cfb = ilk_fbc_program_cfb, + .set_false_color = ivb_fbc_set_false_color, +}; + +static bool intel_fbc_hw_is_active(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; + return fbc->funcs->is_active(fbc); +} +static void intel_fbc_hw_activate(struct intel_fbc *fbc) +{ trace_intel_fbc_activate(fbc->crtc); fbc->active = true; fbc->activated = true; - if (DISPLAY_VER(dev_priv) >= 7) - gen7_fbc_activate(dev_priv); - else if (DISPLAY_VER(dev_priv) >= 5) - ilk_fbc_activate(dev_priv); - else if (IS_GM45(dev_priv)) - g4x_fbc_activate(dev_priv); - else - i8xx_fbc_activate(dev_priv); + fbc->funcs->activate(fbc); } -static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv) +static void intel_fbc_hw_deactivate(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - trace_intel_fbc_deactivate(fbc->crtc); fbc->active = false; - if (DISPLAY_VER(dev_priv) >= 5) - ilk_fbc_deactivate(dev_priv); - else if (IS_GM45(dev_priv)) - g4x_fbc_deactivate(dev_priv); - else - i8xx_fbc_deactivate(dev_priv); + fbc->funcs->deactivate(fbc); +} + +bool intel_fbc_is_compressing(struct intel_fbc *fbc) +{ + return fbc->funcs->is_compressing(fbc); +} + +static void intel_fbc_nuke(struct intel_fbc *fbc) +{ + trace_intel_fbc_nuke(fbc->crtc); + + fbc->funcs->nuke(fbc); +} + +int intel_fbc_set_false_color(struct intel_fbc *fbc, bool enable) +{ + if (!fbc->funcs || !fbc->funcs->set_false_color) + return -ENODEV; + + mutex_lock(&fbc->lock); + + fbc->false_color = enable; + + fbc->funcs->set_false_color(fbc, enable); + + mutex_unlock(&fbc->lock); + + return 0; } /** * intel_fbc_is_active - Is FBC active? - * @dev_priv: i915 device instance + * @fbc: The FBC instance * * This function is used to verify the current state of FBC. * * FIXME: This should be tracked in the plane config eventually * instead of queried at runtime for most callers. */ -bool intel_fbc_is_active(struct drm_i915_private *dev_priv) +bool intel_fbc_is_active(struct intel_fbc *fbc) { - return dev_priv->fbc.active; + return fbc->active; } -static void intel_fbc_activate(struct drm_i915_private *dev_priv) +static void intel_fbc_activate(struct intel_fbc *fbc) { - intel_fbc_hw_activate(dev_priv); - intel_fbc_recompress(dev_priv); + intel_fbc_hw_activate(fbc); + intel_fbc_nuke(fbc); } -static void intel_fbc_deactivate(struct drm_i915_private *dev_priv, - const char *reason) +static void intel_fbc_deactivate(struct intel_fbc *fbc, const char *reason) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); if (fbc->active) - intel_fbc_hw_deactivate(dev_priv); + intel_fbc_hw_deactivate(fbc); fbc->no_fbc_reason = reason; } @@ -492,7 +662,7 @@ static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915) return BIT_ULL(32); } -static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv) +static u64 intel_fbc_stolen_end(struct drm_i915_private *i915) { u64 end; @@ -500,13 +670,13 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv) * reserved range size, so it always assumes the maximum (8mb) is used. * If we enable FBC using a CFB on that memory range we'll get FIFO * underruns, even if that range is not reserved by the BIOS. */ - if (IS_BROADWELL(dev_priv) || (DISPLAY_VER(dev_priv) == 9 && - !IS_BROXTON(dev_priv))) - end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024; + if (IS_BROADWELL(i915) || + (DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915))) + end = resource_size(&i915->dsm) - 8 * 1024 * 1024; else end = U64_MAX; - return min(end, intel_fbc_cfb_base_max(dev_priv)); + return min(end, intel_fbc_cfb_base_max(i915)); } static int intel_fbc_min_limit(int fb_cpp) @@ -514,10 +684,10 @@ static int intel_fbc_min_limit(int fb_cpp) return fb_cpp == 2 ? 2 : 1; } -static int intel_fbc_max_limit(struct drm_i915_private *dev_priv) +static int intel_fbc_max_limit(struct drm_i915_private *i915) { /* WaFbcOnly1to1Ratio:ctg */ - if (IS_G4X(dev_priv)) + if (IS_G4X(i915)) return 1; /* @@ -527,23 +697,23 @@ static int intel_fbc_max_limit(struct drm_i915_private *dev_priv) return 4; } -static int find_compression_limit(struct drm_i915_private *dev_priv, +static int find_compression_limit(struct intel_fbc *fbc, unsigned int size, int min_limit) { - struct intel_fbc *fbc = &dev_priv->fbc; - u64 end = intel_fbc_stolen_end(dev_priv); + struct drm_i915_private *i915 = fbc->i915; + u64 end = intel_fbc_stolen_end(i915); int ret, limit = min_limit; size /= limit; /* Try to over-allocate to reduce reallocations and fragmentation. */ - ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb, + ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, size <<= 1, 4096, 0, end); if (ret == 0) return limit; - for (; limit <= intel_fbc_max_limit(dev_priv); limit <<= 1) { - ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb, + for (; limit <= intel_fbc_max_limit(i915); limit <<= 1) { + ret = i915_gem_stolen_insert_node_in_range(i915, &fbc->compressed_fb, size >>= 1, 4096, 0, end); if (ret == 0) return limit; @@ -552,34 +722,34 @@ static int find_compression_limit(struct drm_i915_private *dev_priv, return 0; } -static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, +static int intel_fbc_alloc_cfb(struct intel_fbc *fbc, unsigned int size, int min_limit) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; int ret; - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(&i915->drm, drm_mm_node_allocated(&fbc->compressed_fb)); - drm_WARN_ON(&dev_priv->drm, + drm_WARN_ON(&i915->drm, drm_mm_node_allocated(&fbc->compressed_llb)); - if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) { - ret = i915_gem_stolen_insert_node(dev_priv, &fbc->compressed_llb, + if (DISPLAY_VER(i915) < 5 && !IS_G4X(i915)) { + ret = i915_gem_stolen_insert_node(i915, &fbc->compressed_llb, 4096, 4096); if (ret) goto err; } - ret = find_compression_limit(dev_priv, size, min_limit); + ret = find_compression_limit(fbc, size, min_limit); if (!ret) goto err_llb; else if (ret > min_limit) - drm_info_once(&dev_priv->drm, + drm_info_once(&i915->drm, "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n"); fbc->limit = ret; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(&i915->drm, "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n", fbc->compressed_fb.size, fbc->limit); @@ -587,82 +757,62 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, err_llb: if (drm_mm_node_allocated(&fbc->compressed_llb)) - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb); + i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); err: - if (drm_mm_initialized(&dev_priv->mm.stolen)) - drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); + if (drm_mm_initialized(&i915->mm.stolen)) + drm_info_once(&i915->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; } -static void intel_fbc_program_cfb(struct drm_i915_private *dev_priv) +static void intel_fbc_program_cfb(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - if (DISPLAY_VER(dev_priv) >= 5) { - intel_de_write(dev_priv, ILK_DPFC_CB_BASE, - fbc->compressed_fb.start); - } else if (IS_GM45(dev_priv)) { - intel_de_write(dev_priv, DPFC_CB_BASE, - fbc->compressed_fb.start); - } else { - GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, - fbc->compressed_fb.start, - U32_MAX)); - GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start, - fbc->compressed_llb.start, - U32_MAX)); - - intel_de_write(dev_priv, FBC_CFB_BASE, - dev_priv->dsm.start + fbc->compressed_fb.start); - intel_de_write(dev_priv, FBC_LL_BASE, - dev_priv->dsm.start + fbc->compressed_llb.start); - } + fbc->funcs->program_cfb(fbc); } -static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; - if (WARN_ON(intel_fbc_hw_is_active(dev_priv))) + if (WARN_ON(intel_fbc_hw_is_active(fbc))) return; if (drm_mm_node_allocated(&fbc->compressed_llb)) - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb); + i915_gem_stolen_remove_node(i915, &fbc->compressed_llb); if (drm_mm_node_allocated(&fbc->compressed_fb)) - i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb); + i915_gem_stolen_remove_node(i915, &fbc->compressed_fb); } -void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) +void intel_fbc_cleanup(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = &i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!HAS_FBC(i915)) return; mutex_lock(&fbc->lock); - __intel_fbc_cleanup_cfb(dev_priv); + __intel_fbc_cleanup_cfb(fbc); mutex_unlock(&fbc->lock); } -static bool stride_is_valid(struct drm_i915_private *dev_priv, +static bool stride_is_valid(struct drm_i915_private *i915, u64 modifier, unsigned int stride) { /* This should have been caught earlier. */ - if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0)) + if (drm_WARN_ON_ONCE(&i915->drm, (stride & (64 - 1)) != 0)) return false; /* Below are the additional FBC restrictions. */ if (stride < 512) return false; - if (DISPLAY_VER(dev_priv) == 2 || DISPLAY_VER(dev_priv) == 3) + if (DISPLAY_VER(i915) == 2 || DISPLAY_VER(i915) == 3) return stride == 4096 || stride == 8192; - if (DISPLAY_VER(dev_priv) == 4 && !IS_G4X(dev_priv) && stride < 2048) + if (DISPLAY_VER(i915) == 4 && !IS_G4X(i915) && stride < 2048) return false; /* Display WA #1105: skl,bxt,kbl,cfl,glk */ - if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) && + if ((DISPLAY_VER(i915) == 9 || IS_GEMINILAKE(i915)) && modifier == DRM_FORMAT_MOD_LINEAR && stride & 511) return false; @@ -672,7 +822,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv, return true; } -static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, +static bool pixel_format_is_valid(struct drm_i915_private *i915, u32 pixel_format) { switch (pixel_format) { @@ -682,10 +832,10 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, case DRM_FORMAT_XRGB1555: case DRM_FORMAT_RGB565: /* 16bpp not supported on gen2 */ - if (DISPLAY_VER(dev_priv) == 2) + if (DISPLAY_VER(i915) == 2) return false; /* WaFbcOnly1to1Ratio:ctg */ - if (IS_G4X(dev_priv)) + if (IS_G4X(i915)) return false; return true; default: @@ -693,13 +843,13 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv, } } -static bool rotation_is_valid(struct drm_i915_private *dev_priv, +static bool rotation_is_valid(struct drm_i915_private *i915, u32 pixel_format, unsigned int rotation) { - if (DISPLAY_VER(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 && + if (DISPLAY_VER(i915) >= 9 && pixel_format == DRM_FORMAT_RGB565 && drm_rotation_90_or_270(rotation)) return false; - else if (DISPLAY_VER(dev_priv) <= 4 && !IS_G4X(dev_priv) && + else if (DISPLAY_VER(i915) <= 4 && !IS_G4X(i915) && rotation != DRM_MODE_ROTATE_0) return false; @@ -712,19 +862,19 @@ static bool rotation_is_valid(struct drm_i915_private *dev_priv, * the X and Y offset registers. That's why we include the src x/y offsets * instead of just looking at the plane size. */ -static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) +static bool intel_fbc_hw_tracking_covers_screen(struct intel_fbc *fbc, + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; unsigned int effective_w, effective_h, max_w, max_h; - if (DISPLAY_VER(dev_priv) >= 10) { + if (DISPLAY_VER(i915) >= 10) { max_w = 5120; max_h = 4096; - } else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) { + } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) { max_w = 4096; max_h = 4096; - } else if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) { + } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) { max_w = 4096; max_h = 2048; } else { @@ -740,14 +890,14 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) return effective_w <= max_w && effective_h <= max_h; } -static bool tiling_is_valid(struct drm_i915_private *dev_priv, +static bool tiling_is_valid(struct drm_i915_private *i915, u64 modifier) { switch (modifier) { case DRM_FORMAT_MOD_LINEAR: case I915_FORMAT_MOD_Y_TILED: case I915_FORMAT_MOD_Yf_TILED: - return DISPLAY_VER(dev_priv) >= 9; + return DISPLAY_VER(i915) >= 9; case I915_FORMAT_MOD_X_TILED: return true; default: @@ -759,8 +909,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &i915->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; struct drm_framebuffer *fb = plane_state->hw.fb; @@ -769,7 +919,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, return; cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + if (IS_HASWELL(i915) || IS_BROADWELL(i915)) cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate; cache->plane.rotation = plane_state->hw.rotation; @@ -794,7 +944,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->fence_y_offset = intel_plane_fence_y_offset(plane_state); - drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE && + drm_WARN_ON(&i915->drm, plane_state->flags & PLANE_HAS_FENCE && !plane_state->ggtt_vma->fence); if (plane_state->flags & PLANE_HAS_FENCE && @@ -806,19 +956,17 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->psr2_active = crtc_state->has_psr2; } -static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) +static bool intel_fbc_cfb_size_changed(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - return intel_fbc_cfb_size(dev_priv, &fbc->state_cache) > + return intel_fbc_cfb_size(fbc, &fbc->state_cache) > fbc->compressed_fb.size * fbc->limit; } -static u16 intel_fbc_override_cfb_stride(struct drm_i915_private *dev_priv, +static u16 intel_fbc_override_cfb_stride(struct intel_fbc *fbc, const struct intel_fbc_state_cache *cache) { unsigned int stride = _intel_fbc_cfb_stride(cache); - unsigned int stride_aligned = intel_fbc_cfb_stride(dev_priv, cache); + unsigned int stride_aligned = intel_fbc_cfb_stride(fbc, cache); /* * Override stride in 64 byte units per 4 line segment. @@ -828,23 +976,23 @@ static u16 intel_fbc_override_cfb_stride(struct drm_i915_private *dev_priv, * we always need to use the override there. */ if (stride != stride_aligned || - (DISPLAY_VER(dev_priv) == 9 && + (DISPLAY_VER(fbc->i915) == 9 && cache->fb.modifier == DRM_FORMAT_MOD_LINEAR)) return stride_aligned * 4 / 64; return 0; } -static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) +static bool intel_fbc_can_enable(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; - if (intel_vgpu_active(dev_priv)) { + if (intel_vgpu_active(i915)) { fbc->no_fbc_reason = "VGPU is active"; return false; } - if (!dev_priv->params.enable_fbc) { + if (!i915->params.enable_fbc) { fbc->no_fbc_reason = "disabled per module param or by default"; return false; } @@ -859,11 +1007,11 @@ static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv) static bool intel_fbc_can_activate(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &i915->fbc; struct intel_fbc_state_cache *cache = &fbc->state_cache; - if (!intel_fbc_can_enable(dev_priv)) + if (!intel_fbc_can_enable(fbc)) return false; if (!cache->plane.visible) { @@ -884,7 +1032,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } - if (!intel_fbc_hw_tracking_covers_screen(crtc)) { + if (!intel_fbc_hw_tracking_covers_screen(fbc, crtc)) { fbc->no_fbc_reason = "mode too large for compression"; return false; } @@ -906,28 +1054,28 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * For now this will effectively disable FBC with 90/270 degree * rotation. */ - if (DISPLAY_VER(dev_priv) < 9 && cache->fence_id < 0) { + if (DISPLAY_VER(i915) < 9 && cache->fence_id < 0) { fbc->no_fbc_reason = "framebuffer not tiled or fenced"; return false; } - if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { + if (!pixel_format_is_valid(i915, cache->fb.format->format)) { fbc->no_fbc_reason = "pixel format is invalid"; return false; } - if (!rotation_is_valid(dev_priv, cache->fb.format->format, + if (!rotation_is_valid(i915, cache->fb.format->format, cache->plane.rotation)) { fbc->no_fbc_reason = "rotation unsupported"; return false; } - if (!tiling_is_valid(dev_priv, cache->fb.modifier)) { + if (!tiling_is_valid(i915, cache->fb.modifier)) { fbc->no_fbc_reason = "tiling unsupported"; return false; } - if (!stride_is_valid(dev_priv, cache->fb.modifier, + if (!stride_is_valid(i915, cache->fb.modifier, cache->fb.stride * cache->fb.format->cpp[0])) { fbc->no_fbc_reason = "framebuffer stride not supported"; return false; @@ -940,8 +1088,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) } /* WaFbcExceedCdClockThreshold:hsw,bdw */ - if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) && - cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) { + if ((IS_HASWELL(i915) || IS_BROADWELL(i915)) && + cache->crtc.hsw_bdw_pixel_rate >= i915->cdclk.hw.cdclk * 95 / 100) { fbc->no_fbc_reason = "pixel rate is too big"; return false; } @@ -956,7 +1104,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * we didn't get any invalidate/deactivate calls, but this would require * a lot of tracking just for a specific case. If we conclude it's an * important case, we can implement it later. */ - if (intel_fbc_cfb_size_changed(dev_priv)) { + if (intel_fbc_cfb_size_changed(fbc)) { fbc->no_fbc_reason = "CFB requirements changed"; return false; } @@ -966,14 +1114,14 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * having a Y offset that isn't divisible by 4 causes FIFO underrun * and screen flicker. */ - if (DISPLAY_VER(dev_priv) >= 9 && + if (DISPLAY_VER(i915) >= 9 && (fbc->state_cache.plane.adjusted_y & 3)) { fbc->no_fbc_reason = "plane Y offset is misaligned"; return false; } /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */ - if (DISPLAY_VER(dev_priv) >= 11 && + if (DISPLAY_VER(i915) >= 11 && (cache->plane.src_h + cache->plane.adjusted_y) % 4) { fbc->no_fbc_reason = "plane height + offset is non-modulo of 4"; return false; @@ -984,7 +1132,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) * Recommendation is to keep this combination disabled * Bspec: 50422 HSD: 14010260002 */ - if (fbc->state_cache.psr2_active && DISPLAY_VER(dev_priv) >= 12) { + if (fbc->state_cache.psr2_active && DISPLAY_VER(i915) >= 12) { fbc->no_fbc_reason = "not supported with PSR2"; return false; } @@ -992,12 +1140,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return true; } -static void intel_fbc_get_reg_params(struct intel_crtc *crtc, - struct intel_fbc_reg_params *params) +static void intel_fbc_get_reg_params(struct intel_fbc *fbc, + struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; + const struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct intel_fbc_reg_params *params = &fbc->params; /* Since all our fields are integer types, use memset here so the * comparison function can rely on memcmp because the padding will be @@ -1016,9 +1163,9 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, params->fb.modifier = cache->fb.modifier; params->fb.stride = cache->fb.stride; - params->cfb_stride = intel_fbc_cfb_stride(dev_priv, cache); - params->cfb_size = intel_fbc_cfb_size(dev_priv, cache); - params->override_cfb_stride = intel_fbc_override_cfb_stride(dev_priv, cache); + params->cfb_stride = intel_fbc_cfb_stride(fbc, cache); + params->cfb_size = intel_fbc_cfb_size(fbc, cache); + params->override_cfb_stride = intel_fbc_override_cfb_stride(fbc, cache); params->plane_visible = cache->plane.visible; } @@ -1026,8 +1173,8 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - const struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &i915->fbc; const struct intel_fbc_state_cache *cache = &fbc->state_cache; const struct intel_fbc_reg_params *params = &fbc->params; @@ -1049,13 +1196,13 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state) if (params->fb.stride != cache->fb.stride) return false; - if (params->cfb_stride != intel_fbc_cfb_stride(dev_priv, cache)) + if (params->cfb_stride != intel_fbc_cfb_stride(fbc, cache)) return false; - if (params->cfb_size != intel_fbc_cfb_size(dev_priv, cache)) + if (params->cfb_size != intel_fbc_cfb_size(fbc, cache)) return false; - if (params->override_cfb_stride != intel_fbc_override_cfb_stride(dev_priv, cache)) + if (params->override_cfb_stride != intel_fbc_override_cfb_stride(fbc, cache)) return false; return true; @@ -1069,12 +1216,12 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_fbc *fbc = plane->fbc; const char *reason = "update pending"; bool need_vblank_wait = false; - if (!plane->has_fbc || !plane_state) + if (!fbc || !plane_state) return need_vblank_wait; mutex_lock(&fbc->lock); @@ -1086,7 +1233,7 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state, fbc->flip_pending = true; if (!intel_fbc_can_flip_nuke(crtc_state)) { - intel_fbc_deactivate(dev_priv, reason); + intel_fbc_deactivate(fbc, reason); /* * Display WA #1198: glk+ @@ -1102,7 +1249,7 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state, * if at least one frame has already passed. */ if (fbc->activated && - DISPLAY_VER(dev_priv) >= 10) + DISPLAY_VER(i915) >= 10) need_vblank_wait = true; fbc->activated = false; } @@ -1112,70 +1259,62 @@ unlock: return need_vblank_wait; } -/** - * __intel_fbc_disable - disable FBC - * @dev_priv: i915 device instance - * - * This is the low level function that actually disables FBC. Callers should - * grab the FBC lock. - */ -static void __intel_fbc_disable(struct drm_i915_private *dev_priv) +static void __intel_fbc_disable(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = fbc->i915; struct intel_crtc *crtc = fbc->crtc; - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); - drm_WARN_ON(&dev_priv->drm, !fbc->crtc); - drm_WARN_ON(&dev_priv->drm, fbc->active); + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&i915->drm, !fbc->crtc); + drm_WARN_ON(&i915->drm, fbc->active); - drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n", + drm_dbg_kms(&i915->drm, "Disabling FBC on pipe %c\n", pipe_name(crtc->pipe)); - __intel_fbc_cleanup_cfb(dev_priv); + __intel_fbc_cleanup_cfb(fbc); fbc->crtc = NULL; } static void __intel_fbc_post_update(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_fbc *fbc = &dev_priv->fbc; + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_fbc *fbc = &i915->fbc; - drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock)); + drm_WARN_ON(&i915->drm, !mutex_is_locked(&fbc->lock)); if (fbc->crtc != crtc) return; fbc->flip_pending = false; - if (!dev_priv->params.enable_fbc) { - intel_fbc_deactivate(dev_priv, "disabled at runtime per module param"); - __intel_fbc_disable(dev_priv); + if (!i915->params.enable_fbc) { + intel_fbc_deactivate(fbc, "disabled at runtime per module param"); + __intel_fbc_disable(fbc); return; } - intel_fbc_get_reg_params(crtc, &fbc->params); + intel_fbc_get_reg_params(fbc, crtc); if (!intel_fbc_can_activate(crtc)) return; if (!fbc->busy_bits) - intel_fbc_activate(dev_priv); + intel_fbc_activate(fbc); else - intel_fbc_deactivate(dev_priv, "frontbuffer write"); + intel_fbc_deactivate(fbc, "frontbuffer write"); } void intel_fbc_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_plane *plane = to_intel_plane(crtc->base.primary); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = plane->fbc; - if (!plane->has_fbc || !plane_state) + if (!fbc || !plane_state) return; mutex_lock(&fbc->lock); @@ -1191,13 +1330,13 @@ static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) return fbc->possible_framebuffer_bits; } -void intel_fbc_invalidate(struct drm_i915_private *dev_priv, +void intel_fbc_invalidate(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = &i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!HAS_FBC(i915)) return; if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) @@ -1208,17 +1347,17 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits; if (fbc->crtc && fbc->busy_bits) - intel_fbc_deactivate(dev_priv, "frontbuffer write"); + intel_fbc_deactivate(fbc, "frontbuffer write"); mutex_unlock(&fbc->lock); } -void intel_fbc_flush(struct drm_i915_private *dev_priv, +void intel_fbc_flush(struct drm_i915_private *i915, unsigned int frontbuffer_bits, enum fb_op_origin origin) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = &i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!HAS_FBC(i915)) return; mutex_lock(&fbc->lock); @@ -1231,7 +1370,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, if (!fbc->busy_bits && fbc->crtc && (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { if (fbc->active) - intel_fbc_recompress(dev_priv); + intel_fbc_nuke(fbc); else if (!fbc->flip_pending) __intel_fbc_post_update(fbc->crtc); } @@ -1242,7 +1381,7 @@ out: /** * intel_fbc_choose_crtc - select a CRTC to enable FBC on - * @dev_priv: i915 device instance + * @i915: i915 device instance * @state: the atomic state structure * * This function looks at the proposed state for CRTCs and planes, then chooses @@ -1250,12 +1389,12 @@ out: * true. * * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe - * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc. + * enable FBC for the chosen CRTC. If it does, it will set i915->fbc.crtc. */ -void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, +void intel_fbc_choose_crtc(struct drm_i915_private *i915, struct intel_atomic_state *state) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = &i915->fbc; struct intel_plane *plane; struct intel_plane_state *plane_state; bool crtc_chosen = false; @@ -1268,7 +1407,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, !intel_atomic_get_new_crtc_state(state, fbc->crtc)) goto out; - if (!intel_fbc_can_enable(dev_priv)) + if (!intel_fbc_can_enable(fbc)) goto out; /* Simply choose the first CRTC that is compatible and has a visible @@ -1279,7 +1418,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, struct intel_crtc_state *crtc_state; struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc); - if (!plane->has_fbc) + if (plane->fbc != fbc) continue; if (!plane_state->uapi.visible) @@ -1312,19 +1451,21 @@ out: static void intel_fbc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_plane *plane = to_intel_plane(crtc->base.primary); const struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); const struct intel_plane_state *plane_state = intel_atomic_get_new_plane_state(state, plane); - struct intel_fbc *fbc = &dev_priv->fbc; - struct intel_fbc_state_cache *cache = &fbc->state_cache; + struct intel_fbc *fbc = plane->fbc; + struct intel_fbc_state_cache *cache; int min_limit; - if (!plane->has_fbc || !plane_state) + if (!fbc || !plane_state) return; + cache = &fbc->state_cache; + min_limit = intel_fbc_min_limit(plane_state->hw.fb ? plane_state->hw.fb->format->cpp[0] : 0); @@ -1335,13 +1476,13 @@ static void intel_fbc_enable(struct intel_atomic_state *state, goto out; if (fbc->limit >= min_limit && - !intel_fbc_cfb_size_changed(dev_priv)) + !intel_fbc_cfb_size_changed(fbc)) goto out; - __intel_fbc_disable(dev_priv); + __intel_fbc_disable(fbc); } - drm_WARN_ON(&dev_priv->drm, fbc->active); + drm_WARN_ON(&i915->drm, fbc->active); intel_fbc_update_state_cache(crtc, crtc_state, plane_state); @@ -1349,20 +1490,19 @@ static void intel_fbc_enable(struct intel_atomic_state *state, if (!cache->plane.visible) goto out; - if (intel_fbc_alloc_cfb(dev_priv, - intel_fbc_cfb_size(dev_priv, cache), min_limit)) { + if (intel_fbc_alloc_cfb(fbc, intel_fbc_cfb_size(fbc, cache), min_limit)) { cache->plane.visible = false; fbc->no_fbc_reason = "not enough stolen memory"; goto out; } - drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n", + drm_dbg_kms(&i915->drm, "Enabling FBC on pipe %c\n", pipe_name(crtc->pipe)); fbc->no_fbc_reason = "FBC enabled but not active yet\n"; fbc->crtc = crtc; - intel_fbc_program_cfb(dev_priv); + intel_fbc_program_cfb(fbc); out: mutex_unlock(&fbc->lock); } @@ -1375,16 +1515,15 @@ out: */ void intel_fbc_disable(struct intel_crtc *crtc) { - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_plane *plane = to_intel_plane(crtc->base.primary); - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = plane->fbc; - if (!plane->has_fbc) + if (!fbc) return; mutex_lock(&fbc->lock); if (fbc->crtc == crtc) - __intel_fbc_disable(dev_priv); + __intel_fbc_disable(fbc); mutex_unlock(&fbc->lock); } @@ -1412,30 +1551,30 @@ void intel_fbc_update(struct intel_atomic_state *state, /** * intel_fbc_global_disable - globally disable FBC - * @dev_priv: i915 device instance + * @i915: i915 device instance * * This function disables FBC regardless of which CRTC is associated with it. */ -void intel_fbc_global_disable(struct drm_i915_private *dev_priv) +void intel_fbc_global_disable(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = &i915->fbc; - if (!HAS_FBC(dev_priv)) + if (!HAS_FBC(i915)) return; mutex_lock(&fbc->lock); if (fbc->crtc) { - drm_WARN_ON(&dev_priv->drm, fbc->crtc->active); - __intel_fbc_disable(dev_priv); + drm_WARN_ON(&i915->drm, fbc->crtc->active); + __intel_fbc_disable(fbc); } mutex_unlock(&fbc->lock); } static void intel_fbc_underrun_work_fn(struct work_struct *work) { - struct drm_i915_private *dev_priv = + struct drm_i915_private *i915 = container_of(work, struct drm_i915_private, fbc.underrun_work); - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = &i915->fbc; mutex_lock(&fbc->lock); @@ -1443,46 +1582,47 @@ static void intel_fbc_underrun_work_fn(struct work_struct *work) if (fbc->underrun_detected || !fbc->crtc) goto out; - drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n"); + drm_dbg_kms(&i915->drm, "Disabling FBC due to FIFO underrun.\n"); fbc->underrun_detected = true; - intel_fbc_deactivate(dev_priv, "FIFO underrun"); + intel_fbc_deactivate(fbc, "FIFO underrun"); out: mutex_unlock(&fbc->lock); } /* * intel_fbc_reset_underrun - reset FBC fifo underrun status. - * @dev_priv: i915 device instance + * @fbc: The FBC instance * * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we * want to re-enable FBC after an underrun to increase test coverage. */ -int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) +int intel_fbc_reset_underrun(struct intel_fbc *fbc) { + struct drm_i915_private *i915 = fbc->i915; int ret; - cancel_work_sync(&dev_priv->fbc.underrun_work); + cancel_work_sync(&fbc->underrun_work); - ret = mutex_lock_interruptible(&dev_priv->fbc.lock); + ret = mutex_lock_interruptible(&fbc->lock); if (ret) return ret; - if (dev_priv->fbc.underrun_detected) { - drm_dbg_kms(&dev_priv->drm, + if (fbc->underrun_detected) { + drm_dbg_kms(&i915->drm, "Re-allowing FBC after fifo underrun\n"); - dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared"; + fbc->no_fbc_reason = "FIFO underrun cleared"; } - dev_priv->fbc.underrun_detected = false; - mutex_unlock(&dev_priv->fbc.lock); + fbc->underrun_detected = false; + mutex_unlock(&fbc->lock); return 0; } /** * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun - * @dev_priv: i915 device instance + * @fbc: The FBC instance * * Without FBC, most underruns are harmless and don't really cause too many * problems, except for an annoying message on dmesg. With FBC, underruns can @@ -1494,11 +1634,9 @@ int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv) * * This function is called from the IRQ handler. */ -void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) +void intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - if (!HAS_FBC(dev_priv)) + if (!HAS_FBC(fbc->i915)) return; /* There's no guarantee that underrun_detected won't be set to true @@ -1522,26 +1660,26 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv) * space to change the value during runtime without sanitizing it again. IGT * relies on being able to change i915.enable_fbc at runtime. */ -static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) +static int intel_sanitize_fbc_option(struct drm_i915_private *i915) { - if (dev_priv->params.enable_fbc >= 0) - return !!dev_priv->params.enable_fbc; + if (i915->params.enable_fbc >= 0) + return !!i915->params.enable_fbc; - if (!HAS_FBC(dev_priv)) + if (!HAS_FBC(i915)) return 0; - if (IS_BROADWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 9) + if (IS_BROADWELL(i915) || DISPLAY_VER(i915) >= 9) return 1; return 0; } -static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) +static bool need_fbc_vtd_wa(struct drm_i915_private *i915) { /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */ - if (intel_vtd_active() && - (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) { - drm_info(&dev_priv->drm, + if (intel_vtd_active(i915) && + (IS_SKYLAKE(i915) || IS_BROXTON(i915))) { + drm_info(&i915->drm, "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n"); return true; } @@ -1551,36 +1689,50 @@ static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv) /** * intel_fbc_init - Initialize FBC - * @dev_priv: the i915 device + * @i915: the i915 device * * This function might be called during PM init process. */ -void intel_fbc_init(struct drm_i915_private *dev_priv) +void intel_fbc_init(struct drm_i915_private *i915) { - struct intel_fbc *fbc = &dev_priv->fbc; + struct intel_fbc *fbc = &i915->fbc; + fbc->i915 = i915; INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); mutex_init(&fbc->lock); fbc->active = false; - if (!drm_mm_initialized(&dev_priv->mm.stolen)) - mkwrite_device_info(dev_priv)->display.has_fbc = false; + if (!drm_mm_initialized(&i915->mm.stolen)) + mkwrite_device_info(i915)->display.has_fbc = false; - if (need_fbc_vtd_wa(dev_priv)) - mkwrite_device_info(dev_priv)->display.has_fbc = false; + if (need_fbc_vtd_wa(i915)) + mkwrite_device_info(i915)->display.has_fbc = false; - dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv); - drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n", - dev_priv->params.enable_fbc); + i915->params.enable_fbc = intel_sanitize_fbc_option(i915); + drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", + i915->params.enable_fbc); - if (!HAS_FBC(dev_priv)) { + if (!HAS_FBC(i915)) { fbc->no_fbc_reason = "unsupported by this chipset"; return; } + if (DISPLAY_VER(i915) >= 7) + fbc->funcs = &ivb_fbc_funcs; + else if (DISPLAY_VER(i915) == 6) + fbc->funcs = &snb_fbc_funcs; + else if (DISPLAY_VER(i915) == 5) + fbc->funcs = &ilk_fbc_funcs; + else if (IS_G4X(i915)) + fbc->funcs = &g4x_fbc_funcs; + else if (DISPLAY_VER(i915) == 4) + fbc->funcs = &i965_fbc_funcs; + else + fbc->funcs = &i8xx_fbc_funcs; + /* We still don't have any sort of hardware state readout for FBC, so * deactivate it in case the BIOS activated it to make sure software * matches the hardware state. */ - if (intel_fbc_hw_is_active(dev_priv)) - intel_fbc_hw_deactivate(dev_priv); + if (intel_fbc_hw_is_active(fbc)) + intel_fbc_hw_deactivate(fbc); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index b97d908738e6..ce48a22c5e9e 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -14,16 +14,19 @@ struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_fbc; struct intel_plane_state; void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, struct intel_atomic_state *state); -bool intel_fbc_is_active(struct drm_i915_private *dev_priv); +bool intel_fbc_is_active(struct intel_fbc *fbc); +bool intel_fbc_is_compressing(struct intel_fbc *fbc); bool intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_post_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_init(struct drm_i915_private *dev_priv); +void intel_fbc_cleanup(struct drm_i915_private *dev_priv); void intel_fbc_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_fbc_disable(struct intel_crtc *crtc); @@ -33,8 +36,8 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv, enum fb_op_origin origin); void intel_fbc_flush(struct drm_i915_private *dev_priv, unsigned int frontbuffer_bits, enum fb_op_origin origin); -void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv); -void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv); -int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv); +void intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc); +int intel_fbc_reset_underrun(struct intel_fbc *fbc); +int intel_fbc_set_false_color(struct intel_fbc *fbc, bool enable); #endif /* __INTEL_FBC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index dd2cf0c59921..2b5f80f3b4e0 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -8,7 +8,6 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_fdi.h" -#include "intel_sbi.h" static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) @@ -887,6 +886,43 @@ void hsw_fdi_link_train(struct intel_encoder *encoder, DP_TP_CTL_ENABLE); } +void hsw_fdi_disable(struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + u32 val; + + /* + * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) + * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, + * step 13 is the correct place for it. Step 18 is where it was + * originally before the BUN. + */ + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + val &= ~FDI_RX_ENABLE; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); + + val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E)); + val &= ~DDI_BUF_CTL_ENABLE; + intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val); + + intel_wait_ddi_buf_idle(dev_priv, PORT_E); + + intel_ddi_disable_clock(encoder); + + val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A)); + val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); + val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); + intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val); + + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + val &= ~FDI_PCDCLK; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); + + val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + val &= ~FDI_RX_PLL_ENABLE; + intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val); +} + void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); @@ -1006,104 +1042,6 @@ void ilk_fdi_disable(struct intel_crtc *crtc) udelay(100); } -static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) -{ - u32 tmp; - - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); - tmp |= FDI_MPHY_IOSFSB_RESET_CTL; - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); - - if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS, 100)) - drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); - - tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); - tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; - intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); - - if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) - drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); -} - -/* WaMPhyProgramming:hsw */ -void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) -{ - u32 tmp; - - lpt_fdi_reset_mphy(dev_priv); - - tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); - tmp &= ~(0xFF << 24); - tmp |= (0x12 << 24); - intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); - tmp |= (1 << 11); - intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); - tmp |= (1 << 11); - intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); - tmp |= (1 << 24) | (1 << 21) | (1 << 18); - intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); - tmp |= (1 << 24) | (1 << 21) | (1 << 18); - intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); - tmp &= ~(7 << 13); - tmp |= (5 << 13); - intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); - tmp &= ~(7 << 13); - tmp |= (5 << 13); - intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); - tmp &= ~0xFF; - tmp |= 0x1C; - intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); - tmp &= ~0xFF; - tmp |= 0x1C; - intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); - tmp &= ~(0xFF << 16); - tmp |= (0x1C << 16); - intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); - tmp &= ~(0xFF << 16); - tmp |= (0x1C << 16); - intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); - tmp |= (1 << 27); - intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); - tmp |= (1 << 27); - intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); - tmp &= ~(0xF << 28); - tmp |= (4 << 28); - intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); - - tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); - tmp &= ~(0xF << 28); - tmp |= (4 << 28); - intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); -} - static const struct intel_fdi_funcs ilk_funcs = { .fdi_link_train = ilk_fdi_link_train, }; diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h index 640d6585c137..1cdb86172702 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.h +++ b/drivers/gpu/drm/i915/display/intel_fdi.h @@ -23,8 +23,8 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state); void intel_fdi_init_hook(struct drm_i915_private *dev_priv); void hsw_fdi_link_train(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +void hsw_fdi_disable(struct intel_encoder *encoder); void intel_fdi_pll_freq_update(struct drm_i915_private *i915); -void lpt_fdi_program_mphy(struct drm_i915_private *i915); void intel_fdi_link_train(struct intel_crtc *crtc, const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index eb841960840d..28d9eeb7b4f3 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -434,7 +434,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n", pipe_name(pipe)); } - intel_fbc_handle_fifo_underrun_irq(dev_priv); + intel_fbc_handle_fifo_underrun_irq(&dev_priv->fbc); } /** diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index ceb1bf8a8c3c..3b8b84177085 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -334,6 +334,15 @@ intel_gpio_setup(struct intel_gmbus *bus, unsigned int pin) algo->data = bus; } +static bool has_gmbus_irq(struct drm_i915_private *i915) +{ + /* + * encoder->shutdown() may want to use GMBUS + * after irqs have already been disabled. + */ + return HAS_GMBUS_IRQ(i915) && intel_irqs_enabled(i915); +} + static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) { DEFINE_WAIT(wait); @@ -344,7 +353,7 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en) * we also need to check for NAKs besides the hw ready/idle signal, we * need to wake up periodically and check that ourselves. */ - if (!HAS_GMBUS_IRQ(dev_priv)) + if (!has_gmbus_irq(dev_priv)) irq_en = 0; add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); @@ -375,7 +384,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) /* Important: The hw handles only the first bit, so set only one! */ irq_enable = 0; - if (HAS_GMBUS_IRQ(dev_priv)) + if (has_gmbus_irq(dev_priv)) irq_enable = GMBUS_IDLE_EN; add_wait_queue(&dev_priv->gmbus_wait_queue, &wait); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 371736bdc01f..3b5b9e7b05b7 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -1800,6 +1800,11 @@ static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi, READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI; } +static bool intel_hdmi_is_ycbcr420(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420; +} + static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_downstream_limits, bool has_hdmi_sink) @@ -1864,8 +1869,12 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi, return MODE_OK; } -static int intel_hdmi_port_clock(int clock, int bpc) +static int intel_hdmi_tmds_clock(int clock, int bpc, bool ycbcr420_output) { + /* YCBCR420 TMDS rate requirement is half the pixel clock */ + if (ycbcr420_output) + clock /= 2; + /* * Need to adjust the port link by: * 1.5x for 12bpc @@ -1874,18 +1883,29 @@ static int intel_hdmi_port_clock(int clock, int bpc) return clock * bpc / 8; } -static bool intel_hdmi_bpc_possible(struct drm_connector *connector, - int bpc, bool has_hdmi_sink, bool ycbcr420_output) +static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc) +{ + switch (bpc) { + case 12: + return !HAS_GMCH(i915); + case 10: + return DISPLAY_VER(i915) >= 11; + case 8: + return true; + default: + MISSING_CASE(bpc); + return false; + } +} + +static bool intel_hdmi_sink_bpc_possible(struct drm_connector *connector, + int bpc, bool has_hdmi_sink, bool ycbcr420_output) { - struct drm_i915_private *i915 = to_i915(connector->dev); const struct drm_display_info *info = &connector->display_info; const struct drm_hdmi_info *hdmi = &info->hdmi; switch (bpc) { case 12: - if (HAS_GMCH(i915)) - return false; - if (!has_hdmi_sink) return false; @@ -1894,9 +1914,6 @@ static bool intel_hdmi_bpc_possible(struct drm_connector *connector, else return info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36; case 10: - if (DISPLAY_VER(i915) < 11) - return false; - if (!has_hdmi_sink) return false; @@ -1916,26 +1933,26 @@ static enum drm_mode_status intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock, bool has_hdmi_sink, bool ycbcr420_output) { + struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector)); enum drm_mode_status status; - if (ycbcr420_output) - clock /= 2; - /* check if we can do 8bpc */ - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 8), + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 8, ycbcr420_output), true, has_hdmi_sink); /* if we can't do 8bpc we may still be able to do 12bpc */ if (status != MODE_OK && - intel_hdmi_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output)) - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 12), + intel_hdmi_source_bpc_possible(i915, 12) && + intel_hdmi_sink_bpc_possible(connector, 12, has_hdmi_sink, ycbcr420_output)) + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 12, ycbcr420_output), true, has_hdmi_sink); /* if we can't do 8,12bpc we may still be able to do 10bpc */ if (status != MODE_OK && - intel_hdmi_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output)) - status = hdmi_port_clock_valid(hdmi, intel_hdmi_port_clock(clock, 10), + intel_hdmi_source_bpc_possible(i915, 10) && + intel_hdmi_sink_bpc_possible(connector, 10, has_hdmi_sink, ycbcr420_output)) + status = hdmi_port_clock_valid(hdmi, intel_hdmi_tmds_clock(clock, 10, ycbcr420_output), true, has_hdmi_sink); return status; @@ -2000,7 +2017,7 @@ bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, if (connector_state->crtc != crtc_state->uapi.crtc) continue; - if (!intel_hdmi_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) + if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, ycbcr420_output)) return false; } @@ -2015,6 +2032,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + if (!intel_hdmi_source_bpc_possible(dev_priv, bpc)) + return false; + /* * HDMI deep color affects the clocks, so it's only possible * when not cloning with other encoder types. @@ -2023,7 +2043,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, return false; /* Display Wa_1405510057:icl,ehl */ - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && + if (intel_hdmi_is_ycbcr420(crtc_state) && bpc == 10 && DISPLAY_VER(dev_priv) == 11 && (adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start) % 8 == 2) @@ -2031,8 +2051,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, return intel_hdmi_deep_color_possible(crtc_state, bpc, crtc_state->has_hdmi_sink, - crtc_state->output_format == - INTEL_OUTPUT_FORMAT_YCBCR420); + intel_hdmi_is_ycbcr420(crtc_state)); } static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, @@ -2040,12 +2059,13 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, int clock) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); + bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state); int bpc; for (bpc = 12; bpc >= 10; bpc -= 2) { if (hdmi_deep_color_possible(crtc_state, bpc) && hdmi_port_clock_valid(intel_hdmi, - intel_hdmi_port_clock(clock, bpc), + intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output), true, crtc_state->has_hdmi_sink) == MODE_OK) return bpc; } @@ -2065,13 +2085,10 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) clock *= 2; - /* YCBCR420 TMDS rate requirement is half the pixel clock */ - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) - clock /= 2; - bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock); - crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc); + crtc_state->port_clock = intel_hdmi_tmds_clock(clock, bpc, + intel_hdmi_is_ycbcr420(crtc_state)); /* * pipe_bpp could already be below 8bpc due to @@ -2141,34 +2158,44 @@ static bool intel_hdmi_has_audio(struct intel_encoder *encoder, return intel_conn_state->force_audio == HDMI_AUDIO_ON; } +static enum intel_output_format +intel_hdmi_output_format(struct intel_connector *connector, + bool ycbcr_420_output) +{ + if (connector->base.ycbcr_420_allowed && ycbcr_420_output) + return INTEL_OUTPUT_FORMAT_YCBCR420; + else + return INTEL_OUTPUT_FORMAT_RGB; +} + static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { - struct drm_connector *connector = conn_state->connector; - struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; + const struct drm_display_info *info = &connector->base.display_info; + struct drm_i915_private *i915 = to_i915(connector->base.dev); + bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode); int ret; - bool ycbcr_420_only; - ycbcr_420_only = drm_mode_is_420_only(&connector->display_info, adjusted_mode); - if (connector->ycbcr_420_allowed && ycbcr_420_only) { - crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - } else { - if (!connector->ycbcr_420_allowed && ycbcr_420_only) - drm_dbg_kms(&i915->drm, - "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); + crtc_state->output_format = intel_hdmi_output_format(connector, ycbcr_420_only); + + if (ycbcr_420_only && !intel_hdmi_is_ycbcr420(crtc_state)) { + drm_dbg_kms(&i915->drm, + "YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n"); crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; } ret = intel_hdmi_compute_clock(encoder, crtc_state); if (ret) { - if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420 && - connector->ycbcr_420_allowed && - drm_mode_is_420_also(&connector->display_info, adjusted_mode)) { - crtc_state->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; - ret = intel_hdmi_compute_clock(encoder, crtc_state); - } + if (intel_hdmi_is_ycbcr420(crtc_state) || + !connector->base.ycbcr_420_allowed || + !drm_mode_is_420_also(info, adjusted_mode)) + return ret; + + crtc_state->output_format = intel_hdmi_output_format(connector, true); + ret = intel_hdmi_compute_clock(encoder, crtc_state); } return ret; @@ -2208,7 +2235,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (ret) return ret; - if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { + if (intel_hdmi_is_ycbcr420(pipe_config)) { ret = intel_panel_fitting(pipe_config, conn_state); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 7f3c638c8950..4970bf146c4a 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -74,7 +74,7 @@ #include "intel_de.h" #include "intel_lpe_audio.h" -#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL) +#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->audio.lpe.platdev != NULL) static struct platform_device * lpe_audio_platdev_create(struct drm_i915_private *dev_priv) @@ -96,7 +96,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) return ERR_PTR(-ENOMEM); } - rsc[0].start = rsc[0].end = dev_priv->lpe_audio.irq; + rsc[0].start = rsc[0].end = dev_priv->audio.lpe.irq; rsc[0].flags = IORESOURCE_IRQ; rsc[0].name = "hdmi-lpe-audio-irq"; @@ -148,7 +148,7 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) * than us fiddle with its internals. */ - platform_device_unregister(dev_priv->lpe_audio.platdev); + platform_device_unregister(dev_priv->audio.lpe.platdev); } static void lpe_audio_irq_unmask(struct irq_data *d) @@ -167,7 +167,7 @@ static struct irq_chip lpe_audio_irqchip = { static int lpe_audio_irq_init(struct drm_i915_private *dev_priv) { - int irq = dev_priv->lpe_audio.irq; + int irq = dev_priv->audio.lpe.irq; drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); irq_set_chip_and_handler_name(irq, @@ -204,15 +204,15 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) { int ret; - dev_priv->lpe_audio.irq = irq_alloc_desc(0); - if (dev_priv->lpe_audio.irq < 0) { + dev_priv->audio.lpe.irq = irq_alloc_desc(0); + if (dev_priv->audio.lpe.irq < 0) { drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n", - dev_priv->lpe_audio.irq); - ret = dev_priv->lpe_audio.irq; + dev_priv->audio.lpe.irq); + ret = dev_priv->audio.lpe.irq; goto err; } - drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq); + drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->audio.lpe.irq); ret = lpe_audio_irq_init(dev_priv); @@ -223,10 +223,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) goto err_free_irq; } - dev_priv->lpe_audio.platdev = lpe_audio_platdev_create(dev_priv); + dev_priv->audio.lpe.platdev = lpe_audio_platdev_create(dev_priv); - if (IS_ERR(dev_priv->lpe_audio.platdev)) { - ret = PTR_ERR(dev_priv->lpe_audio.platdev); + if (IS_ERR(dev_priv->audio.lpe.platdev)) { + ret = PTR_ERR(dev_priv->audio.lpe.platdev); drm_err(&dev_priv->drm, "Failed to create lpe audio platform device: %d\n", ret); @@ -241,10 +241,10 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv) return 0; err_free_irq: - irq_free_desc(dev_priv->lpe_audio.irq); + irq_free_desc(dev_priv->audio.lpe.irq); err: - dev_priv->lpe_audio.irq = -1; - dev_priv->lpe_audio.platdev = NULL; + dev_priv->audio.lpe.irq = -1; + dev_priv->audio.lpe.platdev = NULL; return ret; } @@ -262,7 +262,7 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv) if (!HAS_LPE_AUDIO(dev_priv)) return; - ret = generic_handle_irq(dev_priv->lpe_audio.irq); + ret = generic_handle_irq(dev_priv->audio.lpe.irq); if (ret) drm_err_ratelimited(&dev_priv->drm, "error handling LPE audio irq: %d\n", ret); @@ -303,10 +303,10 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) lpe_audio_platdev_destroy(dev_priv); - irq_free_desc(dev_priv->lpe_audio.irq); + irq_free_desc(dev_priv->audio.lpe.irq); - dev_priv->lpe_audio.irq = -1; - dev_priv->lpe_audio.platdev = NULL; + dev_priv->audio.lpe.irq = -1; + dev_priv->audio.lpe.platdev = NULL; } /** @@ -333,7 +333,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, if (!HAS_LPE_AUDIO(dev_priv)) return; - pdata = dev_get_platdata(&dev_priv->lpe_audio.platdev->dev); + pdata = dev_get_platdata(&dev_priv->audio.lpe.platdev->dev); ppdata = &pdata->port[port - PORT_B]; spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags); @@ -361,7 +361,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv, } if (pdata->notify_audio_lpe) - pdata->notify_audio_lpe(dev_priv->lpe_audio.platdev, port - PORT_B); + pdata->notify_audio_lpe(dev_priv->audio.lpe.platdev, port - PORT_B); spin_unlock_irqrestore(&pdata->lpe_audio_slock, irqflags); } diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c new file mode 100644 index 000000000000..a55c4bfacd0d --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -0,0 +1,501 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "g4x_dp.h" +#include "intel_crt.h" +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_fdi.h" +#include "intel_lvds.h" +#include "intel_pch_display.h" +#include "intel_pch_refclk.h" +#include "intel_pps.h" +#include "intel_sdvo.h" + +static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, enum port port, + i915_reg_t dp_reg) +{ + enum pipe port_pipe; + bool state; + + state = g4x_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe); + + I915_STATE_WARN(state && port_pipe == pipe, + "PCH DP %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); + + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH DP %c still using transcoder B\n", + port_name(port)); +} + +static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe, enum port port, + i915_reg_t hdmi_reg) +{ + enum pipe port_pipe; + bool state; + + state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe); + + I915_STATE_WARN(state && port_pipe == pipe, + "PCH HDMI %c enabled on transcoder %c, should be disabled\n", + port_name(port), pipe_name(pipe)); + + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B, + "IBX PCH HDMI %c still using transcoder B\n", + port_name(port)); +} + +static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + enum pipe port_pipe; + + assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B); + assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C); + assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D); + + I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) && + port_pipe == pipe, + "PCH VGA enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); + + I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) && + port_pipe == pipe, + "PCH LVDS enabled on transcoder %c, should be disabled\n", + pipe_name(pipe)); + + /* PCH SDVOB multiplex with HDMIB */ + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB); + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC); + assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID); +} + +static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + u32 val; + bool enabled; + + val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe)); + enabled = !!(val & TRANS_ENABLE); + I915_STATE_WARN(enabled, + "transcoder assertion failed, should be off on pipe %c but is still active\n", + pipe_name(pipe)); +} + +static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, + enum pipe pch_transcoder) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder), + intel_de_read(dev_priv, HTOTAL(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder), + intel_de_read(dev_priv, HBLANK(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder), + intel_de_read(dev_priv, HSYNC(cpu_transcoder))); + + intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder), + intel_de_read(dev_priv, VTOTAL(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder), + intel_de_read(dev_priv, VBLANK(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder), + intel_de_read(dev_priv, VSYNC(cpu_transcoder))); + intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder), + intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder))); +} + +static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + i915_reg_t reg; + u32 val, pipeconf_val; + + /* Make sure PCH DPLL is enabled */ + assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll); + + /* FDI must be feeding us bits for PCH ports */ + assert_fdi_tx_enabled(dev_priv, pipe); + assert_fdi_rx_enabled(dev_priv, pipe); + + if (HAS_PCH_CPT(dev_priv)) { + reg = TRANS_CHICKEN2(pipe); + val = intel_de_read(dev_priv, reg); + /* + * Workaround: Set the timing override bit + * before enabling the pch transcoder. + */ + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; + val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); + intel_de_write(dev_priv, reg, val); + } + + reg = PCH_TRANSCONF(pipe); + val = intel_de_read(dev_priv, reg); + pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe)); + + if (HAS_PCH_IBX(dev_priv)) { + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_FRAME_START_DELAY_MASK; + val |= TRANS_FRAME_START_DELAY(dev_priv->framestart_delay - 1); + + /* + * Make the BPC in transcoder be consistent with + * that in pipeconf reg. For HDMI we must use 8bpc + * here for both 8bpc and 12bpc. + */ + val &= ~PIPECONF_BPC_MASK; + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) + val |= PIPECONF_8BPC; + else + val |= pipeconf_val & PIPECONF_BPC_MASK; + } + + val &= ~TRANS_INTERLACE_MASK; + if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { + if (HAS_PCH_IBX(dev_priv) && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) + val |= TRANS_LEGACY_INTERLACED_ILK; + else + val |= TRANS_INTERLACED; + } else { + val |= TRANS_PROGRESSIVE; + } + + intel_de_write(dev_priv, reg, val | TRANS_ENABLE); + if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100)) + drm_err(&dev_priv->drm, "failed to enable transcoder %c\n", + pipe_name(pipe)); +} + +static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + i915_reg_t reg; + u32 val; + + /* FDI relies on the transcoder */ + assert_fdi_tx_disabled(dev_priv, pipe); + assert_fdi_rx_disabled(dev_priv, pipe); + + /* Ports must be off as well */ + assert_pch_ports_disabled(dev_priv, pipe); + + reg = PCH_TRANSCONF(pipe); + val = intel_de_read(dev_priv, reg); + val &= ~TRANS_ENABLE; + intel_de_write(dev_priv, reg, val); + /* wait for PCH transcoder off, transcoder state */ + if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50)) + drm_err(&dev_priv->drm, "failed to disable transcoder %c\n", + pipe_name(pipe)); + + if (HAS_PCH_CPT(dev_priv)) { + /* Workaround: Clear the timing override chicken bit again. */ + reg = TRANS_CHICKEN2(pipe); + val = intel_de_read(dev_priv, reg); + val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; + intel_de_write(dev_priv, reg, val); + } +} + +/* + * Enable PCH resources required for PCH ports: + * - PCH PLLs + * - FDI training & RX/TX + * - update transcoder timings + * - DP transcoding bits + * - transcoder + */ +void ilk_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + enum pipe pipe = crtc->pipe; + u32 temp; + + assert_pch_transcoder_disabled(dev_priv, pipe); + + /* For PCH output, training FDI link */ + intel_fdi_link_train(crtc, crtc_state); + + /* + * We need to program the right clock selection + * before writing the pixel multiplier into the DPLL. + */ + if (HAS_PCH_CPT(dev_priv)) { + u32 sel; + + temp = intel_de_read(dev_priv, PCH_DPLL_SEL); + temp |= TRANS_DPLL_ENABLE(pipe); + sel = TRANS_DPLLB_SEL(pipe); + if (crtc_state->shared_dpll == + intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) + temp |= sel; + else + temp &= ~sel; + intel_de_write(dev_priv, PCH_DPLL_SEL, temp); + } + + /* + * XXX: pch pll's can be enabled any time before we enable the PCH + * transcoder, and we actually should do this to not upset any PCH + * transcoder that already use the clock when we share it. + * + * Note that enable_shared_dpll tries to do the right thing, but + * get_shared_dpll unconditionally resets the pll - we need that + * to have the right LVDS enable sequence. + */ + intel_enable_shared_dpll(crtc_state); + + /* set transcoder timing, panel must allow it */ + assert_pps_unlocked(dev_priv, pipe); + ilk_pch_transcoder_set_timings(crtc_state, pipe); + + intel_fdi_normal_train(crtc); + + /* For PCH DP, enable TRANS_DP_CTL */ + if (HAS_PCH_CPT(dev_priv) && + intel_crtc_has_dp_encoder(crtc_state)) { + const struct drm_display_mode *adjusted_mode = + &crtc_state->hw.adjusted_mode; + u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; + i915_reg_t reg = TRANS_DP_CTL(pipe); + enum port port; + + temp = intel_de_read(dev_priv, reg); + temp &= ~(TRANS_DP_PORT_SEL_MASK | + TRANS_DP_SYNC_MASK | + TRANS_DP_BPC_MASK); + temp |= TRANS_DP_OUTPUT_ENABLE; + temp |= bpc << 9; /* same format but at 11:9 */ + + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) + temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) + temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; + + port = intel_get_crtc_new_encoder(state, crtc_state)->port; + drm_WARN_ON(&dev_priv->drm, port < PORT_B || port > PORT_D); + temp |= TRANS_DP_PORT_SEL(port); + + intel_de_write(dev_priv, reg, temp); + } + + ilk_enable_pch_transcoder(crtc_state); +} + +void ilk_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + ilk_fdi_disable(crtc); +} + +void ilk_pch_post_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + ilk_disable_pch_transcoder(crtc); + + if (HAS_PCH_CPT(dev_priv)) { + i915_reg_t reg; + u32 temp; + + /* disable TRANS_DP_CTL */ + reg = TRANS_DP_CTL(pipe); + temp = intel_de_read(dev_priv, reg); + temp &= ~(TRANS_DP_OUTPUT_ENABLE | + TRANS_DP_PORT_SEL_MASK); + temp |= TRANS_DP_PORT_SEL_NONE; + intel_de_write(dev_priv, reg, temp); + + /* disable DPLL_SEL */ + temp = intel_de_read(dev_priv, PCH_DPLL_SEL); + temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); + intel_de_write(dev_priv, PCH_DPLL_SEL, temp); + } + + ilk_fdi_pll_disable(crtc); +} + +static void ilk_pch_clock_get(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + /* read out port_clock from the DPLL */ + i9xx_crtc_clock_get(crtc, crtc_state); + + /* + * In case there is an active pipe without active ports, + * we may need some idea for the dotclock anyway. + * Calculate one based on the FDI configuration. + */ + crtc_state->hw.adjusted_mode.crtc_clock = + intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, crtc_state), + &crtc_state->fdi_m_n); +} + +void ilk_pch_get_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_shared_dpll *pll; + enum pipe pipe = crtc->pipe; + enum intel_dpll_id pll_id; + bool pll_active; + u32 tmp; + + if ((intel_de_read(dev_priv, PCH_TRANSCONF(pipe)) & TRANS_ENABLE) == 0) + return; + + crtc_state->has_pch_encoder = true; + + tmp = intel_de_read(dev_priv, FDI_RX_CTL(pipe)); + crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> + FDI_DP_PORT_WIDTH_SHIFT) + 1; + + ilk_get_fdi_m_n_config(crtc, crtc_state); + + if (HAS_PCH_IBX(dev_priv)) { + /* + * The pipe->pch transcoder and pch transcoder->pll + * mapping is fixed. + */ + pll_id = (enum intel_dpll_id) pipe; + } else { + tmp = intel_de_read(dev_priv, PCH_DPLL_SEL); + if (tmp & TRANS_DPLLB_SEL(pipe)) + pll_id = DPLL_ID_PCH_PLL_B; + else + pll_id = DPLL_ID_PCH_PLL_A; + } + + crtc_state->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, pll_id); + pll = crtc_state->shared_dpll; + + pll_active = intel_dpll_get_hw_state(dev_priv, pll, + &crtc_state->dpll_hw_state); + drm_WARN_ON(&dev_priv->drm, !pll_active); + + tmp = crtc_state->dpll_hw_state.dpll; + crtc_state->pixel_multiplier = + ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) + >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; + + ilk_pch_clock_get(crtc_state); +} + +static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + u32 val, pipeconf_val; + + /* FDI must be feeding us bits for PCH ports */ + assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); + assert_fdi_rx_enabled(dev_priv, PIPE_A); + + val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); + /* Workaround: set timing override bit. */ + val |= TRANS_CHICKEN2_TIMING_OVERRIDE; + /* Configure frame start delay to match the CPU */ + val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK; + val |= TRANS_CHICKEN2_FRAME_START_DELAY(dev_priv->framestart_delay - 1); + intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); + + val = TRANS_ENABLE; + pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); + + if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == + PIPECONF_INTERLACED_ILK) + val |= TRANS_INTERLACED; + else + val |= TRANS_PROGRESSIVE; + + intel_de_write(dev_priv, LPT_TRANSCONF, val); + if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, + TRANS_STATE_ENABLE, 100)) + drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n"); +} + +static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) +{ + u32 val; + + val = intel_de_read(dev_priv, LPT_TRANSCONF); + val &= ~TRANS_ENABLE; + intel_de_write(dev_priv, LPT_TRANSCONF, val); + /* wait for PCH transcoder off, transcoder state */ + if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF, + TRANS_STATE_ENABLE, 50)) + drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n"); + + /* Workaround: clear timing override bit. */ + val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A)); + val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; + intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val); +} + +void lpt_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + assert_pch_transcoder_disabled(dev_priv, PIPE_A); + + lpt_program_iclkip(crtc_state); + + /* Set transcoder timing. */ + ilk_pch_transcoder_set_timings(crtc_state, PIPE_A); + + lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); +} + +void lpt_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + lpt_disable_pch_transcoder(dev_priv); + + lpt_disable_iclkip(dev_priv); +} + +void lpt_pch_get_config(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 tmp; + + if ((intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) == 0) + return; + + crtc_state->has_pch_encoder = true; + + tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A)); + crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> + FDI_DP_PORT_WIDTH_SHIFT) + 1; + + ilk_get_fdi_m_n_config(crtc, crtc_state); + + crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); +} diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h new file mode 100644 index 000000000000..2c387fe3a467 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_display.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _INTEL_PCH_DISPLAY_H_ +#define _INTEL_PCH_DISPLAY_H_ + +struct intel_atomic_state; +struct intel_crtc; +struct intel_crtc_state; + +void ilk_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void ilk_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void ilk_pch_post_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void ilk_pch_get_config(struct intel_crtc_state *crtc_state); + +void lpt_pch_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void lpt_pch_disable(struct intel_atomic_state *state, + struct intel_crtc *crtc); +void lpt_pch_get_config(struct intel_crtc_state *crtc_state); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c new file mode 100644 index 000000000000..b688fd87e3da --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -0,0 +1,648 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "intel_de.h" +#include "intel_display_types.h" +#include "intel_panel.h" +#include "intel_pch_refclk.h" +#include "intel_sbi.h" + +static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv) +{ + u32 tmp; + + tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); + tmp |= FDI_MPHY_IOSFSB_RESET_CTL; + intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); + + if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS, 100)) + drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n"); + + tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2); + tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; + intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp); + + if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) & + FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) + drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n"); +} + +/* WaMPhyProgramming:hsw */ +static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv) +{ + u32 tmp; + + lpt_fdi_reset_mphy(dev_priv); + + tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); + tmp &= ~(0xFF << 24); + tmp |= (0x12 << 24); + intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); + tmp |= (1 << 11); + intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); + tmp |= (1 << 11); + intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); + tmp |= (1 << 24) | (1 << 21) | (1 << 18); + intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); + tmp |= (1 << 24) | (1 << 21) | (1 << 18); + intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); + tmp &= ~(7 << 13); + tmp |= (5 << 13); + intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); + tmp &= ~0xFF; + tmp |= 0x1C; + intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); + tmp &= ~0xFF; + tmp |= 0x1C; + intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); + tmp &= ~(0xFF << 16); + tmp |= (0x1C << 16); + intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); + tmp &= ~(0xFF << 16); + tmp |= (0x1C << 16); + intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); + tmp |= (1 << 27); + intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); + + tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); + tmp &= ~(0xF << 28); + tmp |= (4 << 28); + intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); +} + +void lpt_disable_iclkip(struct drm_i915_private *dev_priv) +{ + u32 temp; + + intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE); + + mutex_lock(&dev_priv->sb_lock); + + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + temp |= SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + +/* Program iCLKIP clock to the desired frequency */ +void lpt_program_iclkip(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + int clock = crtc_state->hw.adjusted_mode.crtc_clock; + u32 divsel, phaseinc, auxdiv, phasedir = 0; + u32 temp; + + lpt_disable_iclkip(dev_priv); + + /* The iCLK virtual clock root frequency is in MHz, + * but the adjusted_mode->crtc_clock in KHz. To get the + * divisors, it is necessary to divide one by another, so we + * convert the virtual clock precision to KHz here for higher + * precision. + */ + for (auxdiv = 0; auxdiv < 2; auxdiv++) { + u32 iclk_virtual_root_freq = 172800 * 1000; + u32 iclk_pi_range = 64; + u32 desired_divisor; + + desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, + clock << auxdiv); + divsel = (desired_divisor / iclk_pi_range) - 2; + phaseinc = desired_divisor % iclk_pi_range; + + /* + * Near 20MHz is a corner case which is + * out of range for the 7-bit divisor + */ + if (divsel <= 0x7f) + break; + } + + /* This should not happen with any sane values */ + drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) & + ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); + drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) & + ~SBI_SSCDIVINTPHASE_INCVAL_MASK); + + drm_dbg_kms(&dev_priv->drm, + "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", + clock, auxdiv, divsel, phasedir, phaseinc); + + mutex_lock(&dev_priv->sb_lock); + + /* Program SSCDIVINTPHASE6 */ + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); + temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; + temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); + temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; + temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); + temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); + temp |= SBI_SSCDIVINTPHASE_PROPAGATE; + intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); + + /* Program SSCAUXDIV */ + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); + temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); + temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); + intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); + + /* Enable modulator and associated divider */ + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + temp &= ~SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); + + /* Wait for initialization time */ + udelay(24); + + intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE); +} + +int lpt_get_iclkip(struct drm_i915_private *dev_priv) +{ + u32 divsel, phaseinc, auxdiv; + u32 iclk_virtual_root_freq = 172800 * 1000; + u32 iclk_pi_range = 64; + u32 desired_divisor; + u32 temp; + + if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) + return 0; + + mutex_lock(&dev_priv->sb_lock); + + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + if (temp & SBI_SSCCTL_DISABLE) { + mutex_unlock(&dev_priv->sb_lock); + return 0; + } + + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); + divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> + SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; + phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> + SBI_SSCDIVINTPHASE_INCVAL_SHIFT; + + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); + auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> + SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; + + mutex_unlock(&dev_priv->sb_lock); + + desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; + + return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, + desired_divisor << auxdiv); +} + +/* Implements 3 different sequences from BSpec chapter "Display iCLK + * Programming" based on the parameters passed: + * - Sequence to enable CLKOUT_DP + * - Sequence to enable CLKOUT_DP without spread + * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O + */ +static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, + bool with_spread, bool with_fdi) +{ + u32 reg, tmp; + + if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread, + "FDI requires downspread\n")) + with_spread = true; + if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) && + with_fdi, "LP PCH doesn't have FDI\n")) + with_fdi = false; + + mutex_lock(&dev_priv->sb_lock); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_DISABLE; + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + + udelay(24); + + if (with_spread) { + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + tmp &= ~SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + + if (with_fdi) + lpt_fdi_program_mphy(dev_priv); + } + + reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); + tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + +/* Sequence to disable CLKOUT_DP */ +void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) +{ + u32 reg, tmp; + + mutex_lock(&dev_priv->sb_lock); + + reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; + tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); + tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; + intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); + + tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); + if (!(tmp & SBI_SSCCTL_DISABLE)) { + if (!(tmp & SBI_SSCCTL_PATHALT)) { + tmp |= SBI_SSCCTL_PATHALT; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + udelay(32); + } + tmp |= SBI_SSCCTL_DISABLE; + intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); + } + + mutex_unlock(&dev_priv->sb_lock); +} + +#define BEND_IDX(steps) ((50 + (steps)) / 5) + +static const u16 sscdivintphase[] = { + [BEND_IDX( 50)] = 0x3B23, + [BEND_IDX( 45)] = 0x3B23, + [BEND_IDX( 40)] = 0x3C23, + [BEND_IDX( 35)] = 0x3C23, + [BEND_IDX( 30)] = 0x3D23, + [BEND_IDX( 25)] = 0x3D23, + [BEND_IDX( 20)] = 0x3E23, + [BEND_IDX( 15)] = 0x3E23, + [BEND_IDX( 10)] = 0x3F23, + [BEND_IDX( 5)] = 0x3F23, + [BEND_IDX( 0)] = 0x0025, + [BEND_IDX( -5)] = 0x0025, + [BEND_IDX(-10)] = 0x0125, + [BEND_IDX(-15)] = 0x0125, + [BEND_IDX(-20)] = 0x0225, + [BEND_IDX(-25)] = 0x0225, + [BEND_IDX(-30)] = 0x0325, + [BEND_IDX(-35)] = 0x0325, + [BEND_IDX(-40)] = 0x0425, + [BEND_IDX(-45)] = 0x0425, + [BEND_IDX(-50)] = 0x0525, +}; + +/* + * Bend CLKOUT_DP + * steps -50 to 50 inclusive, in steps of 5 + * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) + * change in clock period = -(steps / 10) * 5.787 ps + */ +static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) +{ + u32 tmp; + int idx = BEND_IDX(steps); + + if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0)) + return; + + if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase))) + return; + + mutex_lock(&dev_priv->sb_lock); + + if (steps % 10 != 0) + tmp = 0xAAAAAAAB; + else + tmp = 0x00000000; + intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); + + tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); + tmp &= 0xffff0000; + tmp |= sscdivintphase[idx]; + intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); + + mutex_unlock(&dev_priv->sb_lock); +} + +#undef BEND_IDX + +static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv) +{ + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); + u32 ctl = intel_de_read(dev_priv, SPLL_CTL); + + if ((ctl & SPLL_PLL_ENABLE) == 0) + return false; + + if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC && + (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) + return true; + + if (IS_BROADWELL(dev_priv) && + (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW) + return true; + + return false; +} + +static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv, + enum intel_dpll_id id) +{ + u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP); + u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id)); + + if ((ctl & WRPLL_PLL_ENABLE) == 0) + return false; + + if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC) + return true; + + if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) && + (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW && + (fuse_strap & HSW_CPU_SSC_ENABLE) == 0) + return true; + + return false; +} + +static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + bool has_fdi = false; + + for_each_intel_encoder(&dev_priv->drm, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_ANALOG: + has_fdi = true; + break; + default: + break; + } + } + + /* + * The BIOS may have decided to use the PCH SSC + * reference so we must not disable it until the + * relevant PLLs have stopped relying on it. We'll + * just leave the PCH SSC reference enabled in case + * any active PLL is using it. It will get disabled + * after runtime suspend if we don't have FDI. + * + * TODO: Move the whole reference clock handling + * to the modeset sequence proper so that we can + * actually enable/disable/reconfigure these things + * safely. To do that we need to introduce a real + * clock hierarchy. That would also allow us to do + * clock bending finally. + */ + dev_priv->pch_ssc_use = 0; + + if (spll_uses_pch_ssc(dev_priv)) { + drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n"); + dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL); + } + + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) { + drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n"); + dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1); + } + + if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) { + drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n"); + dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2); + } + + if (dev_priv->pch_ssc_use) + return; + + if (has_fdi) { + lpt_bend_clkout_dp(dev_priv, 0); + lpt_enable_clkout_dp(dev_priv, true, true); + } else { + lpt_disable_clkout_dp(dev_priv); + } +} + +static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv) +{ + struct intel_encoder *encoder; + int i; + u32 val, final; + bool has_lvds = false; + bool has_cpu_edp = false; + bool has_panel = false; + bool has_ck505 = false; + bool can_ssc = false; + bool using_ssc_source = false; + + /* We need to take the global config into account */ + for_each_intel_encoder(&dev_priv->drm, encoder) { + switch (encoder->type) { + case INTEL_OUTPUT_LVDS: + has_panel = true; + has_lvds = true; + break; + case INTEL_OUTPUT_EDP: + has_panel = true; + if (encoder->port == PORT_A) + has_cpu_edp = true; + break; + default: + break; + } + } + + if (HAS_PCH_IBX(dev_priv)) { + has_ck505 = dev_priv->vbt.display_clock_mode; + can_ssc = has_ck505; + } else { + has_ck505 = false; + can_ssc = true; + } + + /* Check if any DPLLs are using the SSC source */ + for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) { + u32 temp = intel_de_read(dev_priv, PCH_DPLL(i)); + + if (!(temp & DPLL_VCO_ENABLE)) + continue; + + if ((temp & PLL_REF_INPUT_MASK) == + PLLB_REF_INPUT_SPREADSPECTRUMIN) { + using_ssc_source = true; + break; + } + } + + drm_dbg_kms(&dev_priv->drm, + "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", + has_panel, has_lvds, has_ck505, using_ssc_source); + + /* Ironlake: try to setup display ref clock before DPLL + * enabling. This is only under driver's control after + * PCH B stepping, previous chipset stepping should be + * ignoring this setting. + */ + val = intel_de_read(dev_priv, PCH_DREF_CONTROL); + + /* As we must carefully and slowly disable/enable each source in turn, + * compute the final state we want first and check if we need to + * make any changes at all. + */ + final = val; + final &= ~DREF_NONSPREAD_SOURCE_MASK; + if (has_ck505) + final |= DREF_NONSPREAD_CK505_ENABLE; + else + final |= DREF_NONSPREAD_SOURCE_ENABLE; + + final &= ~DREF_SSC_SOURCE_MASK; + final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + final &= ~DREF_SSC1_ENABLE; + + if (has_panel) { + final |= DREF_SSC_SOURCE_ENABLE; + + if (intel_panel_use_ssc(dev_priv) && can_ssc) + final |= DREF_SSC1_ENABLE; + + if (has_cpu_edp) { + if (intel_panel_use_ssc(dev_priv) && can_ssc) + final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + else + final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + } else { + final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } + } else if (using_ssc_source) { + final |= DREF_SSC_SOURCE_ENABLE; + final |= DREF_SSC1_ENABLE; + } + + if (final == val) + return; + + /* Always enable nonspread source */ + val &= ~DREF_NONSPREAD_SOURCE_MASK; + + if (has_ck505) + val |= DREF_NONSPREAD_CK505_ENABLE; + else + val |= DREF_NONSPREAD_SOURCE_ENABLE; + + if (has_panel) { + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_ENABLE; + + /* SSC must be turned on before enabling the CPU output */ + if (intel_panel_use_ssc(dev_priv) && can_ssc) { + drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n"); + val |= DREF_SSC1_ENABLE; + } else { + val &= ~DREF_SSC1_ENABLE; + } + + /* Get SSC going before enabling the outputs */ + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + + /* Enable CPU source on CPU attached eDP */ + if (has_cpu_edp) { + if (intel_panel_use_ssc(dev_priv) && can_ssc) { + drm_dbg_kms(&dev_priv->drm, + "Using SSC on eDP\n"); + val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; + } else { + val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; + } + } else { + val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + } + + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + } else { + drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n"); + + val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; + + /* Turn off CPU output */ + val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; + + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + + if (!using_ssc_source) { + drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n"); + + /* Turn off the SSC source */ + val &= ~DREF_SSC_SOURCE_MASK; + val |= DREF_SSC_SOURCE_DISABLE; + + /* Turn off SSC1 */ + val &= ~DREF_SSC1_ENABLE; + + intel_de_write(dev_priv, PCH_DREF_CONTROL, val); + intel_de_posting_read(dev_priv, PCH_DREF_CONTROL); + udelay(200); + } + } + + BUG_ON(val != final); +} + +/* + * Initialize reference clocks when the driver loads + */ +void intel_init_pch_refclk(struct drm_i915_private *dev_priv) +{ + if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) + ilk_init_pch_refclk(dev_priv); + else if (HAS_PCH_LPT(dev_priv)) + lpt_init_pch_refclk(dev_priv); +} diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.h b/drivers/gpu/drm/i915/display/intel_pch_refclk.h new file mode 100644 index 000000000000..12ab2c75a800 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef _INTEL_PCH_REFCLK_H_ +#define _INTEL_PCH_REFCLK_H_ + +#include <linux/types.h> + +struct drm_i915_private; +struct intel_crtc_state; + +void lpt_program_iclkip(const struct intel_crtc_state *crtc_state); +void lpt_disable_iclkip(struct drm_i915_private *dev_priv); +int lpt_get_iclkip(struct drm_i915_private *dev_priv); + +void intel_init_pch_refclk(struct drm_i915_private *dev_priv); +void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 7a205fd5023b..a1a663f362e7 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -28,13 +28,13 @@ #include "i915_drv.h" #include "intel_atomic.h" +#include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dp_aux.h" #include "intel_hdmi.h" #include "intel_psr.h" #include "intel_snps_phy.h" -#include "intel_sprite.h" #include "skl_universal_plane.h" /** @@ -588,7 +588,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) static bool transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) { - if (DISPLAY_VER(dev_priv) >= 12) + if (IS_ALDERLAKE_P(dev_priv)) + return trans == TRANSCODER_A || trans == TRANSCODER_B; + else if (DISPLAY_VER(dev_priv) >= 12) return trans == TRANSCODER_A; else return trans == TRANSCODER_EDP; @@ -1346,6 +1348,7 @@ void intel_psr_disable(struct intel_dp *intel_dp, */ void intel_psr_pause(struct intel_dp *intel_dp) { + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); struct intel_psr *psr = &intel_dp->psr; if (!CAN_PSR(intel_dp)) @@ -1358,6 +1361,9 @@ void intel_psr_pause(struct intel_dp *intel_dp) return; } + /* If we ever hit this, we will need to add refcount to pause/resume */ + drm_WARN_ON(&dev_priv->drm, psr->paused); + intel_psr_exit(intel_dp); intel_psr_wait_exit_locked(intel_dp); psr->paused = true; @@ -1463,10 +1469,19 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, val |= plane_state->uapi.dst.x1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); - /* TODO: consider auxiliary surfaces */ - x = plane_state->uapi.src.x1 >> 16; - y = (plane_state->uapi.src.y1 >> 16) + clip->y1; + x = plane_state->view.color_plane[color_plane].x; + + /* + * From Bspec: UV surface Start Y Position = half of Y plane Y + * start position. + */ + if (!color_plane) + y = plane_state->view.color_plane[color_plane].y + clip->y1; + else + y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2; + val = y << 16 | x; + intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), val); @@ -1558,9 +1573,6 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c * also planes are not updated if they have a negative X * position so for now doing a full update in this cases * - * TODO: We are missing multi-planar formats handling, until it is - * implemented it will send full frame updates. - * * Plane scaling and rotation is not supported by selective fetch and both * properties can change without a modeset, so need to be check at every * atomic commmit. @@ -1570,7 +1582,6 @@ static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state if (plane_state->uapi.dst.y1 < 0 || plane_state->uapi.dst.x1 < 0 || plane_state->scaler_id >= 0 || - plane_state->hw.fb->format->num_planes > 1 || plane_state->uapi.rotation != DRM_MODE_ROTATE_0) return false; @@ -1696,6 +1707,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { struct drm_rect *sel_fetch_area, inter; + struct intel_plane *linked = new_plane_state->planar_linked_plane; if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc || !new_plane_state->uapi.visible) @@ -1714,6 +1726,24 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1; sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1; crtc_state->update_planes |= BIT(plane->id); + + /* + * Sel_fetch_area is calculated for UV plane. Use + * same area for Y plane as well. + */ + if (linked) { + struct intel_plane_state *linked_new_plane_state; + struct drm_rect *linked_sel_fetch_area; + + linked_new_plane_state = intel_atomic_get_plane_state(state, linked); + if (IS_ERR(linked_new_plane_state)) + return PTR_ERR(linked_new_plane_state); + + linked_sel_fetch_area = &linked_new_plane_state->psr2_sel_fetch_area; + linked_sel_fetch_area->y1 = sel_fetch_area->y1; + linked_sel_fetch_area->y2 = sel_fetch_area->y2; + crtc_state->update_planes |= BIT(linked->id); + } } skip_sel_fetch_set_loop: @@ -1721,11 +1751,17 @@ skip_sel_fetch_set_loop: return 0; } -static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state, - const struct intel_crtc_state *crtc_state) +void intel_psr_pre_plane_update(struct intel_atomic_state *state, + struct intel_crtc *crtc) { + struct drm_i915_private *i915 = to_i915(state->base.dev); + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); struct intel_encoder *encoder; + if (!HAS_PSR(i915)) + return; + for_each_intel_encoder_mask_with_psr(state->base.dev, encoder, crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1740,6 +1776,7 @@ static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state, * - All planes will go inactive * - Changing between PSR versions */ + needs_to_disable |= intel_crtc_needs_modeset(crtc_state); needs_to_disable |= !crtc_state->has_psr; needs_to_disable |= !crtc_state->active_planes; needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled; @@ -1751,20 +1788,6 @@ static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state, } } -void intel_psr_pre_plane_update(const struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc_state *crtc_state; - struct intel_crtc *crtc; - int i; - - if (!HAS_PSR(dev_priv)) - return; - - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) - _intel_psr_pre_plane_update(state, crtc_state); -} - static void _intel_psr_post_plane_update(const struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { @@ -1809,15 +1832,21 @@ void intel_psr_post_plane_update(const struct intel_atomic_state *state) _intel_psr_post_plane_update(state, crtc_state); } -/** - * psr_wait_for_idle - wait for PSR1 to idle - * @intel_dp: Intel DP - * @out_value: PSR status in case of failure - * - * Returns: 0 on success or -ETIMEOUT if PSR status does not idle. - * - */ -static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value) +static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + /* + * Any state lower than EDP_PSR2_STATUS_STATE_DEEP_SLEEP is enough. + * As all higher states has bit 4 of PSR2 state set we can just wait for + * EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared. + */ + return intel_de_wait_for_clear(dev_priv, + EDP_PSR2_STATUS(intel_dp->psr.transcoder), + EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50); +} + +static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); @@ -1827,15 +1856,13 @@ static int psr_wait_for_idle(struct intel_dp *intel_dp, u32 *out_value) * exit training time + 1.5 ms of aux channel handshake. 50 ms is * defensive enough to cover everything. */ - return __intel_wait_for_register(&dev_priv->uncore, - EDP_PSR_STATUS(intel_dp->psr.transcoder), - EDP_PSR_STATUS_STATE_MASK, - EDP_PSR_STATUS_STATE_IDLE, 2, 50, - out_value); + return intel_de_wait_for_clear(dev_priv, + EDP_PSR_STATUS(intel_dp->psr.transcoder), + EDP_PSR_STATUS_STATE_MASK, 50); } /** - * intel_psr_wait_for_idle - wait for PSR1 to idle + * intel_psr_wait_for_idle - wait for PSR be ready for a pipe update * @new_crtc_state: new CRTC state * * This function is expected to be called from pipe_update_start() where it is @@ -1852,19 +1879,23 @@ void intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state) for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder, new_crtc_state->uapi.encoder_mask) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - u32 psr_status; + int ret; mutex_lock(&intel_dp->psr.lock); - if (!intel_dp->psr.enabled || intel_dp->psr.psr2_enabled) { + + if (!intel_dp->psr.enabled) { mutex_unlock(&intel_dp->psr.lock); continue; } - /* when the PSR1 is enabled */ - if (psr_wait_for_idle(intel_dp, &psr_status)) - drm_err(&dev_priv->drm, - "PSR idle timed out 0x%x, atomic update may fail\n", - psr_status); + if (intel_dp->psr.psr2_enabled) + ret = _psr2_ready_for_pipe_update_locked(intel_dp); + else + ret = _psr1_ready_for_pipe_update_locked(intel_dp); + + if (ret) + drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n"); + mutex_unlock(&intel_dp->psr.lock); } } diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index facffbacd357..3d9c0e13c329 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -20,7 +20,8 @@ struct intel_plane; struct intel_encoder; void intel_psr_init_dpcd(struct intel_dp *intel_dp); -void intel_psr_pre_plane_update(const struct intel_atomic_state *state); +void intel_psr_pre_plane_update(struct intel_atomic_state *state, + struct intel_crtc *crtc); void intel_psr_post_plane_update(const struct intel_atomic_state *state); void intel_psr_disable(struct intel_dp *intel_dp, const struct intel_crtc_state *old_crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 5e20f340730f..c2251218a39e 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -58,7 +58,6 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); const struct intel_ddi_buf_trans *trans; enum phy phy = intel_port_to_phy(dev_priv, encoder->port); - int level = intel_ddi_level(encoder, crtc_state, 0); int n_entries, ln; trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries); @@ -66,6 +65,7 @@ void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder, return; for (ln = 0; ln < 4; ln++) { + int level = intel_ddi_level(encoder, crtc_state, ln); u32 val = 0; val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 08116f41da26..1b99a9501a45 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -45,6 +45,7 @@ #include "intel_atomic_plane.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "intel_frontbuffer.h" #include "intel_sprite.h" #include "i9xx_plane.h" @@ -118,7 +119,7 @@ static void i9xx_plane_linear_gamma(u16 gamma[8]) } static void -chv_update_csc(const struct intel_plane_state *plane_state) +chv_sprite_update_csc(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -190,7 +191,7 @@ chv_update_csc(const struct intel_plane_state *plane_state) #define COS_0 1 static void -vlv_update_clrc(const struct intel_plane_state *plane_state) +vlv_sprite_update_clrc(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -393,7 +394,7 @@ static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, return sprctl; } -static void vlv_update_gamma(const struct intel_plane_state *plane_state) +static void vlv_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -417,45 +418,58 @@ static void vlv_update_gamma(const struct intel_plane_state *plane_state) } static void -vlv_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +vlv_sprite_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; - u32 sprsurf_offset = plane_state->view.color_plane[0].offset; - u32 linear_offset; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); - u32 x = plane_state->view.color_plane[0].x; - u32 y = plane_state->view.color_plane[0].y; unsigned long irqflags; - u32 sprctl; - - sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state); /* Sizes are 0 based */ crtc_w--; crtc_h--; - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), - plane_state->view.color_plane[0].stride); + plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w); - intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +vlv_sprite_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + enum plane_id plane_id = plane->id; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 sprsurf_offset = plane_state->view.color_plane[0].offset; + u32 x = plane_state->view.color_plane[0].x; + u32 y = plane_state->view.color_plane[0].y; + u32 sprctl, linear_offset; + unsigned long irqflags; + + sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) - chv_update_csc(plane_state); + chv_sprite_update_csc(plane_state); if (key->flags) { intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id), @@ -466,6 +480,8 @@ vlv_update_plane(struct intel_plane *plane, key->max_value); } + intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); + intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset); intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x); @@ -478,15 +494,15 @@ vlv_update_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); - vlv_update_clrc(plane_state); - vlv_update_gamma(plane_state); + vlv_sprite_update_clrc(plane_state); + vlv_sprite_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -vlv_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +vlv_sprite_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -502,8 +518,8 @@ vlv_disable_plane(struct intel_plane *plane, } static bool -vlv_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) +vlv_sprite_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; @@ -805,7 +821,7 @@ static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state, i++; } -static void ivb_update_gamma(const struct intel_plane_state *plane_state) +static void ivb_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -835,28 +851,21 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state) } static void -ivb_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +ivb_sprite_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; - u32 sprsurf_offset = plane_state->view.color_plane[0].offset; - u32 linear_offset; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); - u32 x = plane_state->view.color_plane[0].x; - u32 y = plane_state->view.color_plane[0].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - u32 sprctl, sprscale = 0; + u32 sprscale = 0; unsigned long irqflags; - sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); - /* Sizes are 0 based */ src_w--; src_h--; @@ -866,17 +875,38 @@ ivb_update_plane(struct intel_plane *plane, if (crtc_w != src_w || crtc_h != src_h) sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, SPRSTRIDE(pipe), - plane_state->view.color_plane[0].stride); + plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w); if (IS_IVYBRIDGE(dev_priv)) intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +ivb_sprite_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 sprsurf_offset = plane_state->view.color_plane[0].offset; + u32 x = plane_state->view.color_plane[0].x; + u32 y = plane_state->view.color_plane[0].y; + u32 sprctl, linear_offset; + unsigned long irqflags; + + sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (key->flags) { intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value); intel_de_write_fw(dev_priv, SPRKEYMSK(pipe), @@ -902,14 +932,14 @@ ivb_update_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPRSURF(pipe), intel_plane_ggtt_offset(plane_state) + sprsurf_offset); - ivb_update_gamma(plane_state); + ivb_sprite_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -ivb_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +ivb_sprite_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -927,8 +957,8 @@ ivb_disable_plane(struct intel_plane *plane, } static bool -ivb_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) +ivb_sprite_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; @@ -1106,7 +1136,7 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, return dvscntr; } -static void g4x_update_gamma(const struct intel_plane_state *plane_state) +static void g4x_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -1136,7 +1166,7 @@ static void ilk_sprite_linear_gamma(u16 gamma[17]) gamma[i] = (i << 10) / 16; } -static void ilk_update_gamma(const struct intel_plane_state *plane_state) +static void ilk_sprite_update_gamma(const struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -1163,28 +1193,21 @@ static void ilk_update_gamma(const struct intel_plane_state *plane_state) } static void -g4x_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +g4x_sprite_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; - u32 dvssurf_offset = plane_state->view.color_plane[0].offset; - u32 linear_offset; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; u32 crtc_w = drm_rect_width(&plane_state->uapi.dst); u32 crtc_h = drm_rect_height(&plane_state->uapi.dst); - u32 x = plane_state->view.color_plane[0].x; - u32 y = plane_state->view.color_plane[0].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - u32 dvscntr, dvsscale = 0; + u32 dvsscale = 0; unsigned long irqflags; - dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); - /* Sizes are 0 based */ src_w--; src_h--; @@ -1194,16 +1217,37 @@ g4x_update_plane(struct intel_plane *plane, if (crtc_w != src_w || crtc_h != src_h) dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DVSSTRIDE(pipe), - plane_state->view.color_plane[0].stride); + plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w); intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale); + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +g4x_sprite_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u32 dvssurf_offset = plane_state->view.color_plane[0].offset; + u32 x = plane_state->view.color_plane[0].x; + u32 y = plane_state->view.color_plane[0].y; + u32 dvscntr, linear_offset; + unsigned long irqflags; + + dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + if (key->flags) { intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value); intel_de_write_fw(dev_priv, DVSKEYMSK(pipe), @@ -1224,16 +1268,16 @@ g4x_update_plane(struct intel_plane *plane, intel_plane_ggtt_offset(plane_state) + dvssurf_offset); if (IS_G4X(dev_priv)) - g4x_update_gamma(plane_state); + g4x_sprite_update_gamma(plane_state); else - ilk_update_gamma(plane_state); + ilk_sprite_update_gamma(plane_state); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -g4x_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +g4x_sprite_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; @@ -1250,8 +1294,8 @@ g4x_disable_plane(struct intel_plane *plane, } static bool -g4x_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) +g4x_sprite_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum intel_display_power_domain power_domain; @@ -1299,7 +1343,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, int src_x, src_w, src_h, crtc_w, crtc_h; const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - unsigned int stride = plane_state->view.color_plane[0].stride; + unsigned int stride = plane_state->view.color_plane[0].mapping_stride; unsigned int cpp = fb->format->cpp[0]; unsigned int width_bytes; int min_width, min_height; @@ -1567,7 +1611,7 @@ out: return ret; } -static const u32 g4x_plane_formats[] = { +static const u32 g4x_sprite_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, @@ -1575,13 +1619,7 @@ static const u32 g4x_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const u64 i9xx_plane_format_modifiers[] = { - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u32 snb_plane_formats[] = { +static const u32 snb_sprite_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010, @@ -1594,7 +1632,7 @@ static const u32 snb_plane_formats[] = { DRM_FORMAT_VYUY, }; -static const u32 vlv_plane_formats[] = { +static const u32 vlv_sprite_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, DRM_FORMAT_XRGB8888, @@ -1629,13 +1667,8 @@ static const u32 chv_pipe_b_sprite_formats[] = { static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: @@ -1655,13 +1688,8 @@ static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: @@ -1686,13 +1714,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, static bool vlv_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: + if (!intel_fb_plane_supports_modifier(to_intel_plane(_plane), modifier)) return false; - } switch (format) { case DRM_FORMAT_C8: @@ -1762,9 +1785,10 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, return plane; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - plane->update_plane = vlv_update_plane; - plane->disable_plane = vlv_disable_plane; - plane->get_hw_state = vlv_plane_get_hw_state; + plane->update_noarm = vlv_sprite_update_noarm; + plane->update_arm = vlv_sprite_update_arm; + plane->disable_arm = vlv_sprite_disable_arm; + plane->get_hw_state = vlv_sprite_get_hw_state; plane->check_plane = vlv_sprite_check; plane->max_stride = i965_plane_max_stride; plane->min_cdclk = vlv_plane_min_cdclk; @@ -1773,16 +1797,16 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, formats = chv_pipe_b_sprite_formats; num_formats = ARRAY_SIZE(chv_pipe_b_sprite_formats); } else { - formats = vlv_plane_formats; - num_formats = ARRAY_SIZE(vlv_plane_formats); + formats = vlv_sprite_formats; + num_formats = ARRAY_SIZE(vlv_sprite_formats); } - modifiers = i9xx_plane_format_modifiers; plane_funcs = &vlv_sprite_funcs; } else if (DISPLAY_VER(dev_priv) >= 7) { - plane->update_plane = ivb_update_plane; - plane->disable_plane = ivb_disable_plane; - plane->get_hw_state = ivb_plane_get_hw_state; + plane->update_noarm = ivb_sprite_update_noarm; + plane->update_arm = ivb_sprite_update_arm; + plane->disable_arm = ivb_sprite_disable_arm; + plane->get_hw_state = ivb_sprite_get_hw_state; plane->check_plane = g4x_sprite_check; if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { @@ -1793,28 +1817,27 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->min_cdclk = ivb_sprite_min_cdclk; } - formats = snb_plane_formats; - num_formats = ARRAY_SIZE(snb_plane_formats); - modifiers = i9xx_plane_format_modifiers; + formats = snb_sprite_formats; + num_formats = ARRAY_SIZE(snb_sprite_formats); plane_funcs = &snb_sprite_funcs; } else { - plane->update_plane = g4x_update_plane; - plane->disable_plane = g4x_disable_plane; - plane->get_hw_state = g4x_plane_get_hw_state; + plane->update_noarm = g4x_sprite_update_noarm; + plane->update_arm = g4x_sprite_update_arm; + plane->disable_arm = g4x_sprite_disable_arm; + plane->get_hw_state = g4x_sprite_get_hw_state; plane->check_plane = g4x_sprite_check; plane->max_stride = g4x_sprite_max_stride; plane->min_cdclk = g4x_sprite_min_cdclk; - modifiers = i9xx_plane_format_modifiers; if (IS_SANDYBRIDGE(dev_priv)) { - formats = snb_plane_formats; - num_formats = ARRAY_SIZE(snb_plane_formats); + formats = snb_sprite_formats; + num_formats = ARRAY_SIZE(snb_sprite_formats); plane_funcs = &snb_sprite_funcs; } else { - formats = g4x_plane_formats; - num_formats = ARRAY_SIZE(g4x_plane_formats); + formats = g4x_sprite_formats; + num_formats = ARRAY_SIZE(g4x_sprite_formats); plane_funcs = &g4x_sprite_funcs; } @@ -1833,11 +1856,15 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->id = PLANE_SPRITE0 + sprite; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); + modifiers = intel_fb_plane_get_modifiers(dev_priv, INTEL_PLANE_CAP_TILING_X); + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, DRM_PLANE_TYPE_OVERLAY, "sprite %c", sprite_name(pipe, sprite)); + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index c085eb87705c..4f63e4967731 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -27,14 +27,10 @@ struct intel_plane_state; #define VBLANK_EVASION_TIME_US 100 #endif -int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, - int usecs); struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, int plane); int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); -void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); int chv_plane_check_rotation(const struct intel_plane_state *plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 2275f99ce9d7..bf8d3c7ca2d9 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -442,10 +442,10 @@ calculate_rc_params(struct rc_parameters *rc, } } -int intel_dsc_compute_params(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config) +int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; u16 compressed_bpp = pipe_config->dsc.compressed_bpp; const struct rc_parameters *rc_params; @@ -598,7 +598,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val |= DSC_422_ENABLE; if (vdsc_cfg->vbr_enable) pps_val |= DSC_VBR_ENABLE; - drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0, pps_val); @@ -622,7 +622,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) /* Populate PICTURE_PARAMETER_SET_1 registers */ pps_val = 0; pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel); - drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1, pps_val); @@ -647,7 +647,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) | DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances); - drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2, pps_val); @@ -672,7 +672,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) | DSC_SLICE_WIDTH(vdsc_cfg->slice_width); - drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3, pps_val); @@ -697,7 +697,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) | DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay); - drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4, pps_val); @@ -722,7 +722,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) | DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval); - drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5, pps_val); @@ -749,7 +749,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) | DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) | DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp); - drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6, pps_val); @@ -774,7 +774,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) | DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset); - drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7, pps_val); @@ -799,7 +799,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) | DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset); - drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8, pps_val); @@ -824,7 +824,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) pps_val = 0; pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) | DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); - drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9, pps_val); @@ -851,7 +851,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) | DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) | DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST); - drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10, pps_val); @@ -879,7 +879,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) vdsc_cfg->slice_width) | DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height / vdsc_cfg->slice_height); - drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); + drm_dbg_kms(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16, pps_val); @@ -906,8 +906,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) rc_buf_thresh_dword[i / 4] |= (u32)(vdsc_cfg->rc_buf_thresh[i] << BITS_PER_BYTE * (i % 4)); - drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i, - rc_buf_thresh_dword[i / 4]); + drm_dbg_kms(&dev_priv->drm, "RC_BUF_THRESH_%d = 0x%08x\n", i, + rc_buf_thresh_dword[i / 4]); } if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0, @@ -963,8 +963,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) RC_MAX_QP_SHIFT) | (vdsc_cfg->rc_range_params[i].range_min_qp << RC_MIN_QP_SHIFT)) << 16 * (i % 2)); - drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i, - rc_range_params_dword[i / 2]); + drm_dbg_kms(&dev_priv->drm, "RC_RANGE_PARAM_%d = 0x%08x\n", i, + rc_range_params_dword[i / 2]); } if (!is_pipe_dsc(crtc, cpu_transcoder)) { intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0, @@ -1055,8 +1055,8 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) } } -static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); @@ -1064,6 +1064,9 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, struct drm_dsc_picture_parameter_set pps; enum port port; + if (!crtc_state->dsc.compression_enable) + return; + drm_dsc_pps_payload_pack(&pps, vdsc_cfg); for_each_dsi_port(port, intel_dsi->ports) { @@ -1074,14 +1077,16 @@ static void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, } } -static void intel_dsc_dp_pps_write(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_dsc_dp_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) { - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; + if (!crtc_state->dsc.compression_enable) + return; + /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header); @@ -1142,8 +1147,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state) } } -void intel_dsc_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state) +void intel_dsc_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1155,13 +1159,6 @@ void intel_dsc_enable(struct intel_encoder *encoder, intel_dsc_pps_configure(crtc_state); - if (!crtc_state->bigjoiner_slave) { - if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) - intel_dsc_dsi_pps_write(encoder, crtc_state); - else - intel_dsc_dp_pps_write(encoder, crtc_state); - } - dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE; if (crtc_state->dsc.dsc_split) { dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE; diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h index 0c5d80a572da..4ec75f715986 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.h +++ b/drivers/gpu/drm/i915/display/intel_vdsc.h @@ -15,15 +15,17 @@ struct intel_encoder; bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state); void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state); -void intel_dsc_enable(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state); +void intel_dsc_enable(const struct intel_crtc_state *crtc_state); void intel_dsc_disable(const struct intel_crtc_state *crtc_state); -int intel_dsc_compute_params(struct intel_encoder *encoder, - struct intel_crtc_state *pipe_config); +int intel_dsc_compute_params(struct intel_crtc_state *pipe_config); void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state); void intel_dsc_get_config(struct intel_crtc_state *crtc_state); enum intel_display_power_domain intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder); struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc); +void intel_dsc_dsi_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); +void intel_dsc_dp_pps_write(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state); #endif /* __INTEL_VDSC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index c335b1dbafcf..139e8936edc5 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -60,7 +60,7 @@ intel_vrr_check_modeset(struct intel_atomic_state *state) * Between those two points the vblank exit starts (and hence registers get * latched) ASAP after a push is sent. * - * framestart_delay is programmable 0-3. + * framestart_delay is programmable 1-4. */ static int intel_vrr_vblank_exit_length(const struct intel_crtc_state *crtc_state) { @@ -138,13 +138,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, i915->window2_delay; else /* - * FIXME: s/4/framestart_delay+1/ to get consistent + * FIXME: s/4/framestart_delay/ to get consistent * earliest/latest points for register latching regardless * of the framestart_delay used? * * FIXME: this really needs the extra scanline to provide consistent * behaviour for all framestart_delay values. Otherwise with - * framestart_delay==3 we will end up extending the min vblank by + * framestart_delay==4 we will end up extending the min vblank by * one extra line. */ crtc_state->vrr.pipeline_full = @@ -193,6 +193,18 @@ void intel_vrr_send_push(const struct intel_crtc_state *crtc_state) TRANS_PUSH_EN | TRANS_PUSH_SEND); } +bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (!crtc_state->vrr.enable) + return false; + + return intel_de_read(dev_priv, TRANS_PUSH(cpu_transcoder)) & TRANS_PUSH_SEND; +} + void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) { struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h index 96f9c9c27ab9..1c2da572693d 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.h +++ b/drivers/gpu/drm/i915/display/intel_vrr.h @@ -23,6 +23,7 @@ void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, void intel_vrr_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); void intel_vrr_send_push(const struct intel_crtc_state *crtc_state); +bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state); void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state); void intel_vrr_get_config(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index 37eabeff8197..c2e94118566b 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -4,6 +4,7 @@ */ #include "intel_de.h" #include "intel_display_types.h" +#include "intel_fb.h" #include "skl_scaler.h" #include "skl_universal_plane.h" diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index a0e53a3b267a..28890876bdeb 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -163,50 +163,6 @@ static const u32 icl_hdr_plane_formats[] = { DRM_FORMAT_XVYU16161616, }; -static const u64 skl_plane_format_modifiers_noccs[] = { - I915_FORMAT_MOD_Yf_TILED, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 skl_plane_format_modifiers_ccs[] = { - I915_FORMAT_MOD_Yf_TILED_CCS, - I915_FORMAT_MOD_Y_TILED_CCS, - I915_FORMAT_MOD_Yf_TILED, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 gen12_plane_format_modifiers_mc_ccs[] = { - I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 gen12_plane_format_modifiers_rc_ccs[] = { - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS, - I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC, - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -static const u64 adlp_step_a_plane_format_modifiers[] = { - I915_FORMAT_MOD_Y_TILED, - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) { switch (format) { @@ -633,7 +589,7 @@ static u32 skl_plane_stride(const struct intel_plane_state *plane_state, { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; - u32 stride = plane_state->view.color_plane[color_plane].stride; + u32 stride = plane_state->view.color_plane[color_plane].scanout_stride; if (color_plane >= fb->format->num_planes) return 0; @@ -642,8 +598,8 @@ static u32 skl_plane_stride(const struct intel_plane_state *plane_state, } static void -skl_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) +skl_plane_disable_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; @@ -985,6 +941,9 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } + if (plane_state->force_black) + plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE; + return plane_color_ctl; } @@ -1008,115 +967,145 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state, } } -static void intel_load_plane_csc_black(struct intel_plane *intel_plane) +static u32 skl_plane_surf(const struct intel_plane_state *plane_state, + int color_plane) { - struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); - enum pipe pipe = intel_plane->pipe; - enum plane_id plane = intel_plane->id; - u16 postoff = 0; + u32 plane_surf; - drm_dbg_kms(&dev_priv->drm, "plane color CTM to black %s:%d\n", - intel_plane->base.name, plane); - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 0), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 1), 0); + plane_surf = intel_plane_ggtt_offset(plane_state) + + skl_surf_address(plane_state, color_plane); - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 2), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 3), 0); + if (plane_state->decrypt) + plane_surf |= PLANE_SURF_DECRYPT; - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 4), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 5), 0); + return plane_surf; +} - intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 0), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 1), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 2), 0); +static void icl_plane_csc_load_black(struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 0), 0); + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 1), 0); + + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 2), 0); + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 3), 0); + + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 4), 0); + intel_de_write_fw(i915, PLANE_CSC_COEFF(pipe, plane_id, 5), 0); + + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 0), 0); + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 1), 0); + intel_de_write_fw(i915, PLANE_CSC_PREOFF(pipe, plane_id, 2), 0); - intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 0), postoff); - intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 1), postoff); - intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 2), postoff); + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 0), 0); + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 1), 0); + intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); } static void -skl_program_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) +skl_program_plane_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 surf_addr = skl_surf_address(plane_state, color_plane); u32 stride = skl_plane_stride(plane_state, color_plane); const struct drm_framebuffer *fb = plane_state->hw.fb; - int aux_plane = skl_main_to_aux_plane(fb, color_plane); int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; - u32 x = plane_state->view.color_plane[color_plane].x; - u32 y = plane_state->view.color_plane[color_plane].y; u32 src_w = drm_rect_width(&plane_state->uapi.src) >> 16; u32 src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - u8 alpha = plane_state->hw.alpha >> 8; - u32 plane_color_ctl = 0, aux_dist = 0; unsigned long irqflags; - u32 keymsk, keymax, plane_surf; - u32 plane_ctl = plane_state->ctl; - - plane_ctl |= skl_plane_ctl_crtc(crtc_state); - - if (DISPLAY_VER(dev_priv) >= 10) - plane_color_ctl = plane_state->color_ctl | - glk_plane_color_ctl_crtc(crtc_state); /* Sizes are 0 based */ src_w--; src_h--; - keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); - - keymsk = key->channel_mask & 0x7ffffff; - if (alpha < 0xff) - keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; - /* The scaler will handle the output position */ if (plane_state->scaler_id >= 0) { crtc_x = 0; crtc_y = 0; } - if (aux_plane) { - aux_dist = skl_surf_address(plane_state, aux_plane) - surf_addr; - - if (DISPLAY_VER(dev_priv) < 12) - aux_dist |= skl_plane_stride(plane_state, aux_plane); - } - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + /* + * FIXME: pxp session invalidation can hit any time even at time of commit + * or after the commit, display content will be garbage. + */ + if (plane_state->force_black) + icl_plane_csc_load_black(plane); + intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride); intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x); intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); - if (icl_is_hdr_plane(dev_priv, plane_id)) intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl); - if (DISPLAY_VER(dev_priv) >= 10) - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), - plane_color_ctl); - if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id)) icl_program_input_csc(plane, crtc_state, plane_state); - if (fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC) + if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) intel_uncore_write64_fw(&dev_priv->uncore, PLANE_CC_VAL(pipe, plane_id), plane_state->ccval); skl_write_plane_wm(plane, crtc_state); + intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void +skl_program_plane_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + int color_plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum plane_id plane_id = plane->id; + enum pipe pipe = plane->pipe; + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + const struct drm_framebuffer *fb = plane_state->hw.fb; + int aux_plane = skl_main_to_aux_plane(fb, color_plane); + u32 x = plane_state->view.color_plane[color_plane].x; + u32 y = plane_state->view.color_plane[color_plane].y; + u32 keymsk, keymax, aux_dist = 0, plane_color_ctl = 0; + u8 alpha = plane_state->hw.alpha >> 8; + u32 plane_ctl = plane_state->ctl; + unsigned long irqflags; + + plane_ctl |= skl_plane_ctl_crtc(crtc_state); + + if (DISPLAY_VER(dev_priv) >= 10) + plane_color_ctl = plane_state->color_ctl | + glk_plane_color_ctl_crtc(crtc_state); + + keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); + + keymsk = key->channel_mask & 0x7ffffff; + if (alpha < 0xff) + keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; + + if (aux_plane) { + aux_dist = skl_surf_address(plane_state, aux_plane) - + skl_surf_address(plane_state, color_plane); + + if (DISPLAY_VER(dev_priv) < 12) + aux_dist |= skl_plane_stride(plane_state, aux_plane); + } + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), key->min_value); intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk); @@ -1125,17 +1114,22 @@ skl_program_plane(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), (y << 16) | x); + intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); + if (DISPLAY_VER(dev_priv) < 11) intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), (plane_state->view.color_plane[1].y << 16) | plane_state->view.color_plane[1].x); - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane); + if (DISPLAY_VER(dev_priv) >= 10) + intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); /* * Enable the scaler before the plane so that we don't * get a catastrophic underrun even if the two operations * end up happening in two different frames. + * + * TODO: split into noarm+arm pair */ if (plane_state->scaler_id >= 0) skl_program_plane_scaler(plane, crtc_state, plane_state); @@ -1146,23 +1140,8 @@ skl_program_plane(struct intel_plane *plane, * the control register just before the surface register. */ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); - plane_surf = intel_plane_ggtt_offset(plane_state) + surf_addr; - plane_color_ctl = intel_de_read_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id)); - - /* - * FIXME: pxp session invalidation can hit any time even at time of commit - * or after the commit, display content will be garbage. - */ - if (plane_state->decrypt) { - plane_surf |= PLANE_SURF_DECRYPT; - } else if (plane_state->force_black) { - intel_load_plane_csc_black(plane); - plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE; - } - - intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), - plane_color_ctl); - intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), plane_surf); + intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), + skl_plane_surf(plane_state, color_plane)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -1177,7 +1156,6 @@ skl_plane_async_flip(struct intel_plane *plane, unsigned long irqflags; enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - u32 surf_addr = plane_state->view.color_plane[0].offset; u32 plane_ctl = plane_state->ctl; plane_ctl |= skl_plane_ctl_crtc(crtc_state); @@ -1189,15 +1167,15 @@ skl_plane_async_flip(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl); intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), - intel_plane_ggtt_offset(plane_state) + surf_addr); + skl_plane_surf(plane_state, 0)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } static void -skl_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +skl_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { int color_plane = 0; @@ -1205,7 +1183,21 @@ skl_update_plane(struct intel_plane *plane, /* Program the UV plane on planar master */ color_plane = 1; - skl_program_plane(plane, crtc_state, plane_state, color_plane); + skl_program_plane_noarm(plane, crtc_state, plane_state, color_plane); +} + +static void +skl_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + int color_plane = 0; + + if (plane_state->planar_linked_plane && !plane_state->planar_slave) + /* Program the UV plane on planar master */ + color_plane = 1; + + skl_program_plane_arm(plane, crtc_state, plane_state, color_plane); } static bool intel_format_is_p01x(u32 format) @@ -1232,7 +1224,7 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, return 0; if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) && - is_ccs_modifier(fb->modifier)) { + intel_fb_is_ccs_modifier(fb->modifier)) { drm_dbg_kms(&dev_priv->drm, "RC support only with 0/180 degree rotation (%x)\n", rotation); @@ -1284,13 +1276,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state, /* Y-tiling is not supported in IF-ID Interlace mode */ if (crtc_state->hw.enable && crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE && - (fb->modifier == I915_FORMAT_MOD_Y_TILED || - fb->modifier == I915_FORMAT_MOD_Yf_TILED || - fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS || - fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC)) { + fb->modifier != DRM_FORMAT_MOD_LINEAR && + fb->modifier != I915_FORMAT_MOD_X_TILED) { drm_dbg_kms(&dev_priv->drm, "Y/Yf tiling not supported in IF-ID mode\n"); return -EINVAL; @@ -1487,7 +1474,7 @@ int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, if (fb->modifier == I915_FORMAT_MOD_X_TILED) { int cpp = fb->format->cpp[0]; - while ((*x + w) * cpp > plane_state->view.color_plane[0].stride) { + while ((*x + w) * cpp > plane_state->view.color_plane[0].mapping_stride) { if (*offset == 0) { drm_dbg_kms(&dev_priv->drm, "Unable to find suitable display surface offset due to X-tiling\n"); @@ -1536,7 +1523,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * CCS AUX surface doesn't have its own x/y offsets, we must make sure * they match with the main surface x/y offsets. */ - if (is_ccs_modifier(fb->modifier)) { + if (intel_fb_is_ccs_modifier(fb->modifier)) { while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset, aux_plane)) { if (offset == 0) @@ -1600,7 +1587,7 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, uv_plane); - if (is_ccs_modifier(fb->modifier)) { + if (intel_fb_is_ccs_modifier(fb->modifier)) { int ccs_plane = main_to_ccs_plane(fb, uv_plane); u32 aux_offset = plane_state->view.color_plane[ccs_plane].offset; u32 alignment = intel_surf_alignment(fb, uv_plane); @@ -1656,8 +1643,7 @@ static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) int hsub, vsub; int x, y; - if (!is_ccs_plane(fb, ccs_plane) || - is_gen12_ccs_cc_plane(fb, ccs_plane)) + if (!intel_fb_is_ccs_aux_plane(fb, ccs_plane)) continue; intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, fb, @@ -1699,7 +1685,7 @@ static int skl_check_plane_surface(struct intel_plane_state *plane_state) * Handle the AUX surface first since the main surface setup depends on * it. */ - if (is_ccs_modifier(fb->modifier)) { + if (intel_fb_is_ccs_modifier(fb->modifier)) { ret = skl_check_ccs_aux_surface(plane_state); if (ret) return ret; @@ -1737,6 +1723,18 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb) } } +static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0; +} + +static bool pxp_is_borked(struct drm_i915_gem_object *obj) +{ + return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj); +} + static int skl_plane_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { @@ -1781,6 +1779,11 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, if (ret) return ret; + if (DISPLAY_VER(dev_priv) >= 11) { + plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb)); + plane_state->force_black = pxp_is_borked(intel_fb_obj(fb)); + } + /* HW only has 8 bits pixel precision, disable plane if invisible */ if (!(plane_state->hw.alpha >> 8)) plane_state->uapi.visible = false; @@ -1870,49 +1873,20 @@ static const u32 *icl_get_plane_formats(struct drm_i915_private *dev_priv, } } -static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) -{ - if (plane_id == PLANE_CURSOR) - return false; - - if (DISPLAY_VER(dev_priv) >= 11) - return true; - - if (IS_GEMINILAKE(dev_priv)) - return pipe != PIPE_C; - - return pipe != PIPE_C && - (plane_id == PLANE_PRIMARY || - plane_id == PLANE_SPRITE0); -} - static bool skl_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { struct intel_plane *plane = to_intel_plane(_plane); - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - case I915_FORMAT_MOD_Y_TILED: - case I915_FORMAT_MOD_Yf_TILED: - break; - case I915_FORMAT_MOD_Y_TILED_CCS: - case I915_FORMAT_MOD_Yf_TILED_CCS: - if (!plane->has_ccs) - return false; - break; - default: + if (!intel_fb_plane_supports_modifier(plane, modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: - if (is_ccs_modifier(modifier)) + if (intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_RGB565: @@ -1953,52 +1927,20 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } -static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv, - enum plane_id plane_id) -{ - /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ - if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) || - IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D0)) - return false; - - /* Wa_22011186057 */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - return false; - - return plane_id < PLANE_SPRITE4; -} - static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { - struct drm_i915_private *dev_priv = to_i915(_plane->dev); struct intel_plane *plane = to_intel_plane(_plane); - switch (modifier) { - case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: - if (!gen12_plane_supports_mc_ccs(dev_priv, plane->id)) - return false; - fallthrough; - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - case I915_FORMAT_MOD_Y_TILED: - break; - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: - case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: - /* Wa_22011186057 */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - return false; - break; - default: + if (!intel_fb_plane_supports_modifier(plane, modifier)) return false; - } switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ARGB8888: case DRM_FORMAT_ABGR8888: - if (is_ccs_modifier(modifier)) + if (intel_fb_is_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_YUYV: @@ -2010,7 +1952,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_P010: case DRM_FORMAT_P012: case DRM_FORMAT_P016: - if (modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS) + if (intel_fb_is_mc_ccs_modifier(modifier)) return true; fallthrough; case DRM_FORMAT_RGB565: @@ -2039,18 +1981,6 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, } } -static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv, - enum plane_id plane_id) -{ - /* Wa_22011186057 */ - if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) - return adlp_step_a_plane_format_modifiers; - else if (gen12_plane_supports_mc_ccs(dev_priv, plane_id)) - return gen12_plane_format_modifiers_mc_ccs; - else - return gen12_plane_format_modifiers_rc_ccs; -} - static const struct drm_plane_funcs skl_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -2091,6 +2021,64 @@ skl_plane_disable_flip_done(struct intel_plane *plane) spin_unlock_irq(&i915->irq_lock); } +static bool skl_plane_has_rc_ccs(struct drm_i915_private *i915, + enum pipe pipe, enum plane_id plane_id) +{ + /* Wa_22011186057 */ + if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) + return false; + + if (DISPLAY_VER(i915) >= 11) + return true; + + if (IS_GEMINILAKE(i915)) + return pipe != PIPE_C; + + return pipe != PIPE_C && + (plane_id == PLANE_PRIMARY || + plane_id == PLANE_SPRITE0); +} + +static bool gen12_plane_has_mc_ccs(struct drm_i915_private *i915, + enum plane_id plane_id) +{ + if (DISPLAY_VER(i915) < 12) + return false; + + /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */ + if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || + IS_TGL_DISPLAY_STEP(i915, STEP_A0, STEP_D0)) + return false; + + /* Wa_22011186057 */ + if (IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0)) + return false; + + return plane_id < PLANE_SPRITE4; +} + +static u8 skl_get_plane_caps(struct drm_i915_private *i915, + enum pipe pipe, enum plane_id plane_id) +{ + u8 caps = INTEL_PLANE_CAP_TILING_X; + + if (DISPLAY_VER(i915) < 13 || IS_ALDERLAKE_P(i915)) + caps |= INTEL_PLANE_CAP_TILING_Y; + if (DISPLAY_VER(i915) < 12) + caps |= INTEL_PLANE_CAP_TILING_Yf; + + if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) { + caps |= INTEL_PLANE_CAP_CCS_RC; + if (DISPLAY_VER(i915) >= 12) + caps |= INTEL_PLANE_CAP_CCS_RC_CC; + } + + if (gen12_plane_has_mc_ccs(i915, plane_id)) + caps |= INTEL_PLANE_CAP_CCS_MC; + + return caps; +} + struct intel_plane * skl_universal_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) @@ -2113,12 +2101,10 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->id = plane_id; plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane_id); - plane->has_fbc = skl_plane_has_fbc(dev_priv, pipe, plane_id); - if (plane->has_fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; - } + if (skl_plane_has_fbc(dev_priv, pipe, plane_id)) + plane->fbc = &dev_priv->fbc; + if (plane->fbc) + plane->fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; if (DISPLAY_VER(dev_priv) >= 11) { plane->min_width = icl_plane_min_width; @@ -2136,8 +2122,9 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, } plane->max_stride = skl_plane_max_stride; - plane->update_plane = skl_update_plane; - plane->disable_plane = skl_disable_plane; + plane->update_noarm = skl_plane_update_noarm; + plane->update_arm = skl_plane_update_arm; + plane->disable_arm = skl_plane_disable_arm; plane->get_hw_state = skl_plane_get_hw_state; plane->check_plane = skl_plane_check; @@ -2159,29 +2146,28 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, formats = skl_get_plane_formats(dev_priv, pipe, plane_id, &num_formats); - plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); - if (DISPLAY_VER(dev_priv) >= 12) { - modifiers = gen12_get_plane_modifiers(dev_priv, plane_id); + if (DISPLAY_VER(dev_priv) >= 12) plane_funcs = &gen12_plane_funcs; - } else { - if (plane->has_ccs) - modifiers = skl_plane_format_modifiers_ccs; - else - modifiers = skl_plane_format_modifiers_noccs; + else plane_funcs = &skl_plane_funcs; - } if (plane_id == PLANE_PRIMARY) plane_type = DRM_PLANE_TYPE_PRIMARY; else plane_type = DRM_PLANE_TYPE_OVERLAY; + modifiers = intel_fb_plane_get_modifiers(dev_priv, + skl_get_plane_caps(dev_priv, pipe, plane_id)); + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, 0, plane_funcs, formats, num_formats, modifiers, plane_type, "plane %d%c", plane_id + 1, pipe_name(pipe)); + + kfree(modifiers); + if (ret) goto fail; diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 07584695514b..20141f33ed64 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -38,9 +38,12 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" +#include "intel_dsi_vbt.h" #include "intel_fifo_underrun.h" #include "intel_panel.h" #include "skl_scaler.h" +#include "vlv_dsi.h" +#include "vlv_dsi_pll.h" #include "vlv_sideband.h" /* return pixels in terms of txbyteclkhs */ @@ -1258,7 +1261,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 pclk; + drm_dbg_kms(&dev_priv->drm, "\n"); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); @@ -1270,6 +1275,9 @@ static void intel_dsi_get_config(struct intel_encoder *encoder, pclk = vlv_dsi_get_pclk(encoder, pipe_config); } + if (intel_dsi->dual_link) + pclk *= 2; + if (pclk) { pipe_config->hw.adjusted_mode.crtc_clock = pclk; pipe_config->port_clock = pclk; diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h new file mode 100644 index 000000000000..0c2b279df9d4 --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_dsi.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __VLV_DSI_H__ +#define __VLV_DSI_H__ + +#include <linux/types.h> + +enum port; +struct drm_i915_private; +struct intel_dsi; + +void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); +enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); +void vlv_dsi_init(struct drm_i915_private *dev_priv); + +#endif /* __VLV_DSI_H__ */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 5413b52ab6ba..1b81797dd02e 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -31,6 +31,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" +#include "vlv_dsi_pll.h" #include "vlv_sideband.h" static const u16 lfsr_converts[] = { diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h new file mode 100644 index 000000000000..ab9291ad1e79 --- /dev/null +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __VLV_DSI_PLL_H__ +#define __VLV_DSI_PLL_H__ + +#include <linux/types.h> + +enum port; +struct drm_i915_private; +struct intel_crtc_state; +struct intel_encoder; + +int vlv_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void vlv_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void vlv_dsi_pll_disable(struct intel_encoder *encoder); +u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); + +bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); +int bxt_dsi_pll_compute(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void bxt_dsi_pll_enable(struct intel_encoder *encoder, + const struct intel_crtc_state *config); +void bxt_dsi_pll_disable(struct intel_encoder *encoder); +u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, + struct intel_crtc_state *config); +void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); + +void assert_dsi_pll_enabled(struct drm_i915_private *i915); +void assert_dsi_pll_disabled(struct drm_i915_private *i915); + +#endif /* __VLV_DSI_PLL_H__ */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c index f0435c6feb68..8a248003dfae 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c @@ -69,10 +69,16 @@ static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj) bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, unsigned int flags) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); struct clflush *clflush; assert_object_held(obj); + if (IS_DGFX(i915)) { + WARN_ON_ONCE(obj->cache_dirty); + return false; + } + /* * Stolen memory is always coherent with the GPU as it is explicitly * marked as wc by the system, or the system is cache-coherent. @@ -105,16 +111,24 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, if (clflush) { i915_sw_fence_await_reservation(&clflush->base.chain, obj->base.resv, NULL, true, - i915_fence_timeout(to_i915(obj->base.dev)), + i915_fence_timeout(i915), I915_FENCE_GFP); dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma); dma_fence_work_commit(&clflush->base); + /* + * We must have successfully populated the pages(since we are + * holding a pin on the pages as per the flush worker) to reach + * this point, which must mean we have already done the required + * flush-on-acquire, hence resetting cache_dirty here should be + * safe. + */ + obj->cache_dirty = false; } else if (obj->mm.pages) { __do_clflush(obj); + obj->cache_dirty = false; } else { GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); } - obj->cache_dirty = false; return true; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index fb33d0322960..347dab952e90 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -479,7 +479,7 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data) if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) && !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) { drm_dbg(&i915->drm, - "Bonding on gen12+ aside from TGL, RKL, and ADL_S not supported\n"); + "Bonding not supported on this platform\n"); return -ENODEV; } @@ -1001,7 +1001,7 @@ static void free_engines_rcu(struct rcu_head *rcu) free_engines(engines); } -static int __i915_sw_fence_call +static int engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_gem_engines *engines = diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index e8a58c997170..1b526039a60d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -248,8 +248,19 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) if (IS_ERR(pages)) return PTR_ERR(pages); - /* XXX: consider doing a vmap flush or something */ - if (!HAS_LLC(i915) || i915_gem_object_can_bypass_llc(obj)) + /* + * DG1 is special here since it still snoops transactions even with + * CACHE_NONE. This is not the case with other HAS_SNOOP platforms. We + * might need to revisit this as we add new discrete platforms. + * + * XXX: Consider doing a vmap flush or something, where possible. + * Currently we just do a heavy handed wbinvd_on_all_cpus() here since + * the underlying sg_table might not even point to struct pages, so we + * can't just call drm_clflush_sg or similar, like we do elsewhere in + * the driver. + */ + if (i915_gem_object_can_bypass_llc(obj) || + (!HAS_LLC(i915) && !IS_DG1(i915))) wbinvd_on_all_cpus(); sg_page_sizes = i915_sg_dma_sizes(pages->sgl); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index b684a62bf3b0..26532c07d467 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -18,10 +18,32 @@ static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + if (IS_DGFX(i915)) + return false; + return !(obj->cache_level == I915_CACHE_NONE || obj->cache_level == I915_CACHE_WT); } +bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + + if (obj->cache_dirty) + return false; + + if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) + return true; + + if (IS_DGFX(i915)) + return false; + + /* Currently in use by HW (display engine)? Keep flushed. */ + return i915_gem_object_is_framebuffer(obj); +} + static void flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 4d7da07442f2..60ee60f7bb09 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -29,6 +29,7 @@ #include "i915_gem_ioctls.h" #include "i915_trace.h" #include "i915_user_extensions.h" +#include "i915_vma_snapshot.h" struct eb_vma { struct i915_vma *vma; @@ -307,11 +308,15 @@ struct i915_execbuffer { struct eb_fence *fences; unsigned long num_fences; +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + struct i915_capture_list *capture_lists[MAX_ENGINE_INSTANCE + 1]; +#endif }; static int eb_parse(struct i915_execbuffer *eb); static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle); static void eb_unpin_engine(struct i915_execbuffer *eb); +static void eb_capture_release(struct i915_execbuffer *eb); static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb) { @@ -990,7 +995,7 @@ static int eb_validate_vmas(struct i915_execbuffer *eb) } if (!(ev->flags & EXEC_OBJECT_WRITE)) { - err = dma_resv_reserve_shared(vma->resv, 1); + err = dma_resv_reserve_shared(vma->obj->base.resv, 1); if (err) return err; } @@ -1043,6 +1048,7 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final) i915_vma_put(vma); } + eb_capture_release(eb); eb_unpin_engine(eb); } @@ -1880,36 +1886,113 @@ eb_find_first_request_added(struct i915_execbuffer *eb) return NULL; } -static int eb_move_to_gpu(struct i915_execbuffer *eb) +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + +/* Stage with GFP_KERNEL allocations before we enter the signaling critical path */ +static void eb_capture_stage(struct i915_execbuffer *eb) { const unsigned int count = eb->buffer_count; - unsigned int i = count; - int err = 0, j; + unsigned int i = count, j; + struct i915_vma_snapshot *vsnap; while (i--) { struct eb_vma *ev = &eb->vma[i]; struct i915_vma *vma = ev->vma; unsigned int flags = ev->flags; - struct drm_i915_gem_object *obj = vma->obj; - assert_vma_held(vma); + if (!(flags & EXEC_OBJECT_CAPTURE)) + continue; - if (flags & EXEC_OBJECT_CAPTURE) { + vsnap = i915_vma_snapshot_alloc(GFP_KERNEL); + if (!vsnap) + continue; + + i915_vma_snapshot_init(vsnap, vma, "user"); + for_each_batch_create_order(eb, j) { struct i915_capture_list *capture; - for_each_batch_create_order(eb, j) { - if (!eb->requests[j]) - break; + capture = kmalloc(sizeof(*capture), GFP_KERNEL); + if (!capture) + continue; - capture = kmalloc(sizeof(*capture), GFP_KERNEL); - if (capture) { - capture->next = - eb->requests[j]->capture_list; - capture->vma = vma; - eb->requests[j]->capture_list = capture; - } - } + capture->next = eb->capture_lists[j]; + capture->vma_snapshot = i915_vma_snapshot_get(vsnap); + eb->capture_lists[j] = capture; + } + i915_vma_snapshot_put(vsnap); + } +} + +/* Commit once we're in the critical path */ +static void eb_capture_commit(struct i915_execbuffer *eb) +{ + unsigned int j; + + for_each_batch_create_order(eb, j) { + struct i915_request *rq = eb->requests[j]; + + if (!rq) + break; + + rq->capture_list = eb->capture_lists[j]; + eb->capture_lists[j] = NULL; + } +} + +/* + * Release anything that didn't get committed due to errors. + * The capture_list will otherwise be freed at request retire. + */ +static void eb_capture_release(struct i915_execbuffer *eb) +{ + unsigned int j; + + for_each_batch_create_order(eb, j) { + if (eb->capture_lists[j]) { + i915_request_free_capture_list(eb->capture_lists[j]); + eb->capture_lists[j] = NULL; } + } +} + +static void eb_capture_list_clear(struct i915_execbuffer *eb) +{ + memset(eb->capture_lists, 0, sizeof(eb->capture_lists)); +} + +#else + +static void eb_capture_stage(struct i915_execbuffer *eb) +{ +} + +static void eb_capture_commit(struct i915_execbuffer *eb) +{ +} + +static void eb_capture_release(struct i915_execbuffer *eb) +{ +} + +static void eb_capture_list_clear(struct i915_execbuffer *eb) +{ +} + +#endif + +static int eb_move_to_gpu(struct i915_execbuffer *eb) +{ + const unsigned int count = eb->buffer_count; + unsigned int i = count; + int err = 0, j; + + while (i--) { + struct eb_vma *ev = &eb->vma[i]; + struct i915_vma *vma = ev->vma; + unsigned int flags = ev->flags; + struct drm_i915_gem_object *obj = vma->obj; + + assert_vma_held(vma); /* * If the GPU is not _reading_ through the CPU cache, we need @@ -1990,6 +2073,8 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb) /* Unconditionally flush any chipset caches (for streaming writes). */ intel_gt_chipset_flush(eb->gt); + eb_capture_commit(eb); + return 0; err_skip: @@ -2164,7 +2249,7 @@ static int eb_parse(struct i915_execbuffer *eb) goto err_trampoline; } - err = dma_resv_reserve_shared(shadow->resv, 1); + err = dma_resv_reserve_shared(shadow->obj->base.resv, 1); if (err) goto err_trampoline; @@ -3114,7 +3199,7 @@ eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence, /* Allocate a request for this batch buffer nice and early. */ eb->requests[i] = i915_request_create(eb_find_context(eb, i)); if (IS_ERR(eb->requests[i])) { - out_fence = ERR_PTR(PTR_ERR(eb->requests[i])); + out_fence = ERR_CAST(eb->requests[i]); eb->requests[i] = NULL; return out_fence; } @@ -3132,13 +3217,14 @@ eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence, } /* - * Whilst this request exists, batch_obj will be on the - * active_list, and so will hold the active reference. Only when - * this request is retired will the batch_obj be moved onto - * the inactive_list and lose its active reference. Hence we do - * not need to explicitly hold another reference here. + * Not really on stack, but we don't want to call + * kfree on the batch_snapshot when we put it, so use the + * _onstack interface. */ - eb->requests[i]->batch = eb->batches[i]->vma; + if (eb->batches[i]->vma) + i915_vma_snapshot_init_onstack(&eb->requests[i]->batch_snapshot, + eb->batches[i]->vma, + "batch"); if (eb->batch_pool) { GEM_BUG_ON(intel_context_is_parallel(eb->context)); intel_gt_buffer_pool_mark_active(eb->batch_pool, @@ -3187,6 +3273,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, eb.fences = NULL; eb.num_fences = 0; + eb_capture_list_clear(&eb); + memset(eb.requests, 0, sizeof(struct i915_request *) * ARRAY_SIZE(eb.requests)); eb.composite_fence = NULL; @@ -3273,10 +3361,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, } ww_acquire_done(&eb.ww.ctx); + eb_capture_stage(&eb); out_fence = eb_requests_create(&eb, in_fence, out_fence_fd); if (IS_ERR(out_fence)) { err = PTR_ERR(out_fence); + out_fence = NULL; if (eb.requests[0]) goto err_request; else diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index a57a6b7013c2..c5150a1ee3d2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -145,24 +145,10 @@ static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { .put_pages = i915_gem_object_put_pages_internal, }; -/** - * i915_gem_object_create_internal: create an object with volatile pages - * @i915: the i915 device - * @size: the size in bytes of backing storage to allocate for the object - * - * Creates a new object that wraps some internal memory for private use. - * This object is not backed by swappable storage, and as such its contents - * are volatile and only valid whilst pinned. If the object is reaped by the - * shrinker, its pages and data will be discarded. Equally, it is not a full - * GEM object and so not valid for access from userspace. This makes it useful - * for hardware interfaces like ringbuffers (which are pinned from the time - * the request is written to the time the hardware stops accessing it), but - * not for contexts (which need to be preserved when not active for later - * reuse). Note that it is not cleared upon allocation. - */ struct drm_i915_gem_object * -i915_gem_object_create_internal(struct drm_i915_private *i915, - phys_addr_t size) +__i915_gem_object_create_internal(struct drm_i915_private *i915, + const struct drm_i915_gem_object_ops *ops, + phys_addr_t size) { static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; @@ -179,7 +165,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, 0); + i915_gem_object_init(obj, ops, &lock_class, 0); obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE; /* @@ -199,3 +185,25 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, return obj; } + +/** + * i915_gem_object_create_internal: create an object with volatile pages + * @i915: the i915 device + * @size: the size in bytes of backing storage to allocate for the object + * + * Creates a new object that wraps some internal memory for private use. + * This object is not backed by swappable storage, and as such its contents + * are volatile and only valid whilst pinned. If the object is reaped by the + * shrinker, its pages and data will be discarded. Equally, it is not a full + * GEM object and so not valid for access from userspace. This makes it useful + * for hardware interfaces like ringbuffers (which are pinned from the time + * the request is written to the time the hardware stops accessing it), but + * not for contexts (which need to be preserved when not active for later + * reuse). Note that it is not cleared upon allocation. + */ +struct drm_i915_gem_object * +i915_gem_object_create_internal(struct drm_i915_private *i915, + phys_addr_t size) +{ + return __i915_gem_object_create_internal(i915, &i915_gem_object_internal_ops, size); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 1e426a42a36c..5fac9b560b73 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -31,6 +31,7 @@ #include "i915_gem_context.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" +#include "i915_gem_ttm.h" #include "i915_memcpy.h" #include "i915_trace.h" @@ -91,7 +92,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, } /** - * i915_gem_object_fini - Clean up a GEM object initialization + * __i915_gem_object_fini - Clean up a GEM object initialization * @obj: The gem object to cleanup * * This function cleans up gem object fields that are set up by @@ -107,25 +108,29 @@ void __i915_gem_object_fini(struct drm_i915_gem_object *obj) } /** - * Mark up the object's coherency levels for a given cache_level + * i915_gem_object_set_cache_coherency - Mark up the object's coherency levels + * for a given cache_level * @obj: #drm_i915_gem_object * @cache_level: cache level */ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level) { + struct drm_i915_private *i915 = to_i915(obj->base.dev); + obj->cache_level = cache_level; if (cache_level != I915_CACHE_NONE) obj->cache_coherent = (I915_BO_CACHE_COHERENT_FOR_READ | I915_BO_CACHE_COHERENT_FOR_WRITE); - else if (HAS_LLC(to_i915(obj->base.dev))) + else if (HAS_LLC(i915)) obj->cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ; else obj->cache_coherent = 0; obj->cache_dirty = - !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); + !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE) && + !IS_DGFX(i915); } bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj) @@ -364,15 +369,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj) atomic_inc(&i915->mm.free_count); /* - * This serializes freeing with the shrinker. Since the free - * is delayed, first by RCU then by the workqueue, we want the - * shrinker to be able to free pages of unreferenced objects, - * or else we may oom whilst there are plenty of deferred - * freed objects. - */ - i915_gem_object_make_unshrinkable(obj); - - /* * Since we require blocking on struct_mutex to unbind the freed * object from the GPU before releasing resources back to the * system, we can not do that directly from the RCU callback (which may @@ -456,7 +452,7 @@ i915_gem_object_read_from_page_iomap(struct drm_i915_gem_object *obj, u64 offset * from can't cross a page boundary. The caller must ensure that @obj pages * are pinned and that @obj is synced wrt. any related writes. * - * Returns 0 on success or -ENODEV if the type of @obj's backing store is + * Return: %0 on success or -ENODEV if the type of @obj's backing store is * unsupported. */ int i915_gem_object_read_from_page(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size) @@ -732,6 +728,57 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = { .export = i915_gem_prime_export, }; +/** + * i915_gem_object_get_moving_fence - Get the object's moving fence if any + * @obj: The object whose moving fence to get. + * + * A non-signaled moving fence means that there is an async operation + * pending on the object that needs to be waited on before setting up + * any GPU- or CPU PTEs to the object's pages. + * + * Return: A refcounted pointer to the object's moving fence if any, + * NULL otherwise. + */ +struct dma_fence * +i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj) +{ + return dma_fence_get(i915_gem_to_ttm(obj)->moving); +} + +/** + * i915_gem_object_wait_moving_fence - Wait for the object's moving fence if any + * @obj: The object whose moving fence to wait for. + * @intr: Whether to wait interruptible. + * + * If the moving fence signaled without an error, it is detached from the + * object and put. + * + * Return: 0 if successful, -ERESTARTSYS if the wait was interrupted, + * negative error code if the async operation represented by the + * moving fence failed. + */ +int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, + bool intr) +{ + struct dma_fence *fence = i915_gem_to_ttm(obj)->moving; + int ret; + + assert_object_held(obj); + if (!fence) + return 0; + + ret = dma_fence_wait(fence, intr); + if (ret) + return ret; + + if (fence->error) + return fence->error; + + i915_gem_to_ttm(obj)->moving = NULL; + dma_fence_put(fence); + return 0; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/huge_gem_object.c" #include "selftests/huge_pages.c" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index 59201801cec5..66f20b803b01 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -93,7 +93,6 @@ void i915_gem_flush_free_objects(struct drm_i915_private *i915); struct sg_table * __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj); -void i915_gem_object_truncate(struct drm_i915_gem_object *obj); /** * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle @@ -296,6 +295,12 @@ i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) } static inline bool +i915_gem_object_has_self_managed_shrink_list(const struct drm_i915_gem_object *obj) +{ + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST); +} + +static inline bool i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) { return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); @@ -449,7 +454,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) } int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); -void i915_gem_object_truncate(struct drm_i915_gem_object *obj); +int i915_gem_object_truncate(struct drm_i915_gem_object *obj); void i915_gem_object_writeback(struct drm_i915_gem_object *obj); /** @@ -512,11 +517,18 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj) i915_gem_object_unpin_pages(obj); } +struct dma_fence * +i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj); + +int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj, + bool intr); + void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, unsigned int cache_level); bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj); void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj); +bool i915_gem_cpu_write_needs_clflush(struct drm_i915_gem_object *obj); int __must_check i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write); @@ -533,25 +545,15 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); +void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); +void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj); -static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) -{ - if (obj->cache_dirty) - return false; - - if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) - return true; - - /* Currently in use by HW (display engine)? Keep flushed. */ - return i915_gem_object_is_framebuffer(obj); -} - static inline void __start_cpu_write(struct drm_i915_gem_object *obj) { obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; - if (cpu_write_needs_clflush(obj)) + if (i915_gem_cpu_write_needs_clflush(obj)) obj->cache_dirty = true; } @@ -613,6 +615,14 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj, enum intel_memory_type type); +int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, + size_t size, struct intel_memory_region *mr, + struct address_space *mapping, + unsigned int max_segment); +void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, + bool dirty, bool backup); +void __shmem_writeback(size_t size, struct address_space *mapping); + #ifdef CONFIG_MMU_NOTIFIER static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index da85169006d4..f9f7e44099fe 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -34,9 +34,11 @@ struct i915_lut_handle { struct drm_i915_gem_object_ops { unsigned int flags; -#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) -#define I915_GEM_OBJECT_IS_PROXY BIT(2) -#define I915_GEM_OBJECT_NO_MMAP BIT(3) +#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) +/* Skip the shrinker management in set_pages/unset_pages */ +#define I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST BIT(2) +#define I915_GEM_OBJECT_IS_PROXY BIT(3) +#define I915_GEM_OBJECT_NO_MMAP BIT(4) /* Interface between the GEM object and its backing storage. * get_pages() is called once prior to the use of the associated set @@ -54,8 +56,11 @@ struct drm_i915_gem_object_ops { int (*get_pages)(struct drm_i915_gem_object *obj); void (*put_pages)(struct drm_i915_gem_object *obj, struct sg_table *pages); - void (*truncate)(struct drm_i915_gem_object *obj); + int (*truncate)(struct drm_i915_gem_object *obj); void (*writeback)(struct drm_i915_gem_object *obj); + int (*shrinker_release_pages)(struct drm_i915_gem_object *obj, + bool no_gpu_wait, + bool should_writeback); int (*pread)(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pread *arg); @@ -486,9 +491,37 @@ struct drm_i915_gem_object { * instead go through the pin/unpin interfaces. */ atomic_t pages_pin_count; + + /** + * @shrink_pin: Prevents the pages from being made visible to + * the shrinker, while the shrink_pin is non-zero. Most users + * should pretty much never have to care about this, outside of + * some special use cases. + * + * By default most objects will start out as visible to the + * shrinker(if I915_GEM_OBJECT_IS_SHRINKABLE) as soon as the + * backing pages are attached to the object, like in + * __i915_gem_object_set_pages(). They will then be removed the + * shrinker list once the pages are released. + * + * The @shrink_pin is incremented by calling + * i915_gem_object_make_unshrinkable(), which will also remove + * the object from the shrinker list, if the pin count was zero. + * + * Callers will then typically call + * i915_gem_object_make_shrinkable() or + * i915_gem_object_make_purgeable() to decrement the pin count, + * and make the pages visible again. + */ atomic_t shrink_pin; /** + * @ttm_shrinkable: True when the object is using shmem pages + * underneath. Protected by the object lock. + */ + bool ttm_shrinkable; + + /** * Priority list of potential placements for this object. */ struct intel_memory_region **placements; @@ -512,6 +545,7 @@ struct drm_i915_gem_object { */ struct list_head region_link; + struct i915_refct_sgt *rsgt; struct sg_table *pages; void *mapping; @@ -547,7 +581,7 @@ struct drm_i915_gem_object { struct i915_gem_object_page_iter get_dma_page; /** - * Element within i915->mm.unbound_list or i915->mm.bound_list, + * Element within i915->mm.shrink_list or i915->mm.purge_list, * locked by i915->mm.obj_lock. */ struct list_head link; @@ -565,7 +599,7 @@ struct drm_i915_gem_object { } mm; struct { - struct sg_table *cached_io_st; + struct i915_refct_sgt *cached_io_rsgt; struct i915_gem_object_page_iter get_io_page; struct drm_i915_gem_object *backup; bool created:1; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 8eb1c3a6fc9c..49c6e55c68ce 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -26,6 +26,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, /* Make the pages coherent with the GPU (flushing any swapin). */ if (obj->cache_dirty) { + WARN_ON_ONCE(IS_DGFX(i915)); obj->write_domain = 0; if (i915_gem_object_has_struct_page(obj)) drm_clflush_sg(pages); @@ -68,7 +69,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, shrinkable = false; } - if (shrinkable) { + if (shrinkable && !i915_gem_object_has_self_managed_shrink_list(obj)) { struct list_head *list; unsigned long flags; @@ -158,11 +159,13 @@ retry: } /* Immediately discard the backing storage */ -void i915_gem_object_truncate(struct drm_i915_gem_object *obj) +int i915_gem_object_truncate(struct drm_i915_gem_object *obj) { drm_gem_free_mmap_offset(&obj->base); if (obj->ops->truncate) - obj->ops->truncate(obj); + return obj->ops->truncate(obj); + + return 0; } /* Try to discard unwanted pages */ @@ -208,7 +211,8 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) if (i915_gem_object_is_volatile(obj)) obj->mm.madv = I915_MADV_WILLNEED; - i915_gem_object_make_unshrinkable(obj); + if (!i915_gem_object_has_self_managed_shrink_list(obj)) + i915_gem_object_make_unshrinkable(obj); if (obj->mm.mapping) { unmap_object(obj, page_mask_bits(obj->mm.mapping)); @@ -414,6 +418,12 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, } if (!ptr) { + err = i915_gem_object_wait_moving_fence(obj, true); + if (err) { + ptr = ERR_PTR(err); + goto err_unpin; + } + if (GEM_WARN_ON(type == I915_MAP_WC && !static_cpu_has(X86_FEATURE_PAT))) ptr = ERR_PTR(-ENODEV); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index a016ccec36f3..a4350227e9ae 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -11,7 +11,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, struct intel_memory_region *mem) { - obj->mm.region = intel_memory_region_get(mem); + obj->mm.region = mem; mutex_lock(&mem->objects.lock); list_add(&obj->mm.region_link, &mem->objects.list); @@ -25,8 +25,6 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj) mutex_lock(&mem->objects.lock); list_del(&obj->mm.region_link); mutex_unlock(&mem->objects.lock); - - intel_memory_region_put(mem); } struct drm_i915_gem_object * diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index d77da59fae04..cc9fe258fba7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -25,62 +25,67 @@ static void check_release_pagevec(struct pagevec *pvec) cond_resched(); } -static int shmem_get_pages(struct drm_i915_gem_object *obj) +void shmem_sg_free_table(struct sg_table *st, struct address_space *mapping, + bool dirty, bool backup) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct intel_memory_region *mem = obj->mm.region; - const unsigned long page_count = obj->base.size / PAGE_SIZE; + struct sgt_iter sgt_iter; + struct pagevec pvec; + struct page *page; + + mapping_clear_unevictable(mapping); + + pagevec_init(&pvec); + for_each_sgt_page(page, sgt_iter, st) { + if (dirty) + set_page_dirty(page); + + if (backup) + mark_page_accessed(page); + + if (!pagevec_add(&pvec, page)) + check_release_pagevec(&pvec); + } + if (pagevec_count(&pvec)) + check_release_pagevec(&pvec); + + sg_free_table(st); +} + +int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st, + size_t size, struct intel_memory_region *mr, + struct address_space *mapping, + unsigned int max_segment) +{ + const unsigned long page_count = size / PAGE_SIZE; unsigned long i; - struct address_space *mapping; - struct sg_table *st; struct scatterlist *sg; - struct sgt_iter sgt_iter; struct page *page; unsigned long last_pfn = 0; /* suppress gcc warning */ - unsigned int max_segment = i915_sg_segment_size(); - unsigned int sg_page_sizes; gfp_t noreclaim; int ret; /* - * Assert that the object is not currently in any GPU domain. As it - * wasn't in the GTT, there shouldn't be any way it could have been in - * a GPU cache - */ - GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); - GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); - - /* * If there's no chance of allocating enough pages for the whole * object, bail early. */ - if (obj->base.size > resource_size(&mem->region)) + if (size > resource_size(&mr->region)) return -ENOMEM; - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) + if (sg_alloc_table(st, page_count, GFP_KERNEL)) return -ENOMEM; -rebuild_st: - if (sg_alloc_table(st, page_count, GFP_KERNEL)) { - kfree(st); - return -ENOMEM; - } - /* * Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. * * Fail silently without starting the shrinker */ - mapping = obj->base.filp->f_mapping; mapping_set_unevictable(mapping); noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); noreclaim |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; st->nents = 0; - sg_page_sizes = 0; for (i = 0; i < page_count; i++) { const unsigned int shrink[] = { I915_SHRINK_BOUND | I915_SHRINK_UNBOUND, @@ -135,10 +140,9 @@ rebuild_st: if (!i || sg->length >= max_segment || page_to_pfn(page) != last_pfn + 1) { - if (i) { - sg_page_sizes |= sg->length; + if (i) sg = sg_next(sg); - } + st->nents++; sg_set_page(sg, page, PAGE_SIZE, 0); } else { @@ -149,14 +153,67 @@ rebuild_st: /* Check that the i965g/gm workaround works. */ GEM_BUG_ON(gfp & __GFP_DMA32 && last_pfn >= 0x00100000UL); } - if (sg) { /* loop terminated early; short sg table */ - sg_page_sizes |= sg->length; + if (sg) /* loop terminated early; short sg table */ sg_mark_end(sg); - } /* Trim unused sg entries to avoid wasting memory. */ i915_sg_trim(st); + return 0; +err_sg: + sg_mark_end(sg); + if (sg != st->sgl) { + shmem_sg_free_table(st, mapping, false, false); + } else { + mapping_clear_unevictable(mapping); + sg_free_table(st); + } + + /* + * shmemfs first checks if there is enough memory to allocate the page + * and reports ENOSPC should there be insufficient, along with the usual + * ENOMEM for a genuine allocation failure. + * + * We use ENOSPC in our driver to mean that we have run out of aperture + * space and so want to translate the error from shmemfs back to our + * usual understanding of ENOMEM. + */ + if (ret == -ENOSPC) + ret = -ENOMEM; + + return ret; +} + +static int shmem_get_pages(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct intel_memory_region *mem = obj->mm.region; + struct address_space *mapping = obj->base.filp->f_mapping; + const unsigned long page_count = obj->base.size / PAGE_SIZE; + unsigned int max_segment = i915_sg_segment_size(); + struct sg_table *st; + struct sgt_iter sgt_iter; + struct page *page; + int ret; + + /* + * Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); + GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); + +rebuild_st: + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + ret = shmem_sg_alloc_table(i915, st, obj->base.size, mem, mapping, + max_segment); + if (ret) + goto err_st; + ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { /* @@ -168,6 +225,7 @@ rebuild_st: for_each_sgt_page(page, sgt_iter, st) put_page(page); sg_free_table(st); + kfree(st); max_segment = PAGE_SIZE; goto rebuild_st; @@ -185,28 +243,12 @@ rebuild_st: if (i915_gem_object_can_bypass_llc(obj)) obj->cache_dirty = true; - __i915_gem_object_set_pages(obj, st, sg_page_sizes); + __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); return 0; -err_sg: - sg_mark_end(sg); err_pages: - mapping_clear_unevictable(mapping); - if (sg != st->sgl) { - struct pagevec pvec; - - pagevec_init(&pvec); - for_each_sgt_page(page, sgt_iter, st) { - if (!pagevec_add(&pvec, page)) - check_release_pagevec(&pvec); - } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); - } - sg_free_table(st); - kfree(st); - + shmem_sg_free_table(st, mapping, false, false); /* * shmemfs first checks if there is enough memory to allocate the page * and reports ENOSPC should there be insufficient, along with the usual @@ -216,13 +258,16 @@ err_pages: * space and so want to translate the error from shmemfs back to our * usual understanding of ENOMEM. */ +err_st: if (ret == -ENOSPC) ret = -ENOMEM; + kfree(st); + return ret; } -static void +static int shmem_truncate(struct drm_i915_gem_object *obj) { /* @@ -234,12 +279,12 @@ shmem_truncate(struct drm_i915_gem_object *obj) shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); obj->mm.madv = __I915_MADV_PURGED; obj->mm.pages = ERR_PTR(-EFAULT); + + return 0; } -static void -shmem_writeback(struct drm_i915_gem_object *obj) +void __shmem_writeback(size_t size, struct address_space *mapping) { - struct address_space *mapping; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, @@ -255,10 +300,9 @@ shmem_writeback(struct drm_i915_gem_object *obj) * instead of invoking writeback so they are aged and paged out * as normal. */ - mapping = obj->base.filp->f_mapping; /* Begin writeback on each dirty page */ - for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { + for (i = 0; i < size >> PAGE_SHIFT; i++) { struct page *page; page = find_lock_page(mapping, i); @@ -281,6 +325,12 @@ put: } } +static void +shmem_writeback(struct drm_i915_gem_object *obj) +{ + __shmem_writeback(obj->base.size, obj->base.filp->f_mapping); +} + void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages, @@ -313,11 +363,6 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages) { - struct sgt_iter sgt_iter; - struct pagevec pvec; - struct page *page; - - GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev))); __i915_gem_object_release_shmem(obj, pages, true); i915_gem_gtt_finish_pages(obj, pages); @@ -325,25 +370,10 @@ void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_ if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj, pages); - mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); - - pagevec_init(&pvec); - for_each_sgt_page(page, sgt_iter, pages) { - if (obj->mm.dirty) - set_page_dirty(page); - - if (obj->mm.madv == I915_MADV_WILLNEED) - mark_page_accessed(page); - - if (!pagevec_add(&pvec, page)) - check_release_pagevec(&pvec); - } - if (pagevec_count(&pvec)) - check_release_pagevec(&pvec); - obj->mm.dirty = false; - - sg_free_table(pages); + shmem_sg_free_table(pages, file_inode(obj->base.filp)->i_mapping, + obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED); kfree(pages); + obj->mm.dirty = false; } static void @@ -634,9 +664,10 @@ static int init_shmem(struct intel_memory_region *mem) return 0; /* Don't error, we can simply fallback to the kernel mnt */ } -static void release_shmem(struct intel_memory_region *mem) +static int release_shmem(struct intel_memory_region *mem) { i915_gemfs_fini(mem->i915); + return 0; } static const struct intel_memory_region_ops shmem_region_ops = { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index af3eb7fd951d..157a9765f483 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -55,19 +55,25 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, return false; } -static void try_to_writeback(struct drm_i915_gem_object *obj, - unsigned int flags) +static int try_to_writeback(struct drm_i915_gem_object *obj, unsigned int flags) { + if (obj->ops->shrinker_release_pages) + return obj->ops->shrinker_release_pages(obj, + !(flags & I915_SHRINK_ACTIVE), + flags & I915_SHRINK_WRITEBACK); + switch (obj->mm.madv) { case I915_MADV_DONTNEED: i915_gem_object_truncate(obj); - return; + return 0; case __I915_MADV_PURGED: - return; + return 0; } if (flags & I915_SHRINK_WRITEBACK) i915_gem_object_writeback(obj); + + return 0; } /** @@ -221,8 +227,8 @@ i915_gem_shrink(struct i915_gem_ww_ctx *ww, } if (!__i915_gem_object_put_pages(obj)) { - try_to_writeback(obj, shrink); - count += obj->base.size >> PAGE_SHIFT; + if (!try_to_writeback(obj, shrink)) + count += obj->base.size >> PAGE_SHIFT; } if (!ww) i915_gem_object_unlock(obj); @@ -455,6 +461,16 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, #define obj_to_i915(obj__) to_i915((obj__)->base.dev) +/** + * i915_gem_object_make_unshrinkable - Hide the object from the shrinker. By + * default all object types that support shrinking(see IS_SHRINKABLE), will also + * make the object visible to the shrinker after allocating the system memory + * pages. + * @obj: The GEM object. + * + * This is typically used for special kernel internal objects that can't be + * easily processed by the shrinker, like if they are perma-pinned. + */ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = obj_to_i915(obj); @@ -479,13 +495,12 @@ void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } -static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, - struct list_head *head) +static void ___i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, + struct list_head *head) { struct drm_i915_private *i915 = obj_to_i915(obj); unsigned long flags; - GEM_BUG_ON(!i915_gem_object_has_pages(obj)); if (!i915_gem_object_is_shrinkable(obj)) return; @@ -505,14 +520,67 @@ static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } +/** + * __i915_gem_object_make_shrinkable - Move the object to the tail of the + * shrinkable list. Objects on this list might be swapped out. Used with + * WILLNEED objects. + * @obj: The GEM object. + * + * DO NOT USE. This is intended to be called on very special objects that don't + * yet have mm.pages, but are guaranteed to have potentially reclaimable pages + * underneath. + */ +void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) +{ + ___i915_gem_object_make_shrinkable(obj, + &obj_to_i915(obj)->mm.shrink_list); +} + +/** + * __i915_gem_object_make_purgeable - Move the object to the tail of the + * purgeable list. Objects on this list might be swapped out. Used with + * DONTNEED objects. + * @obj: The GEM object. + * + * DO NOT USE. This is intended to be called on very special objects that don't + * yet have mm.pages, but are guaranteed to have potentially reclaimable pages + * underneath. + */ +void __i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) +{ + ___i915_gem_object_make_shrinkable(obj, + &obj_to_i915(obj)->mm.purge_list); +} + +/** + * i915_gem_object_make_shrinkable - Move the object to the tail of the + * shrinkable list. Objects on this list might be swapped out. Used with + * WILLNEED objects. + * @obj: The GEM object. + * + * MUST only be called on objects which have backing pages. + * + * MUST be balanced with previous call to i915_gem_object_make_unshrinkable(). + */ void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) { - __i915_gem_object_make_shrinkable(obj, - &obj_to_i915(obj)->mm.shrink_list); + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + __i915_gem_object_make_shrinkable(obj); } +/** + * i915_gem_object_make_purgeable - Move the object to the tail of the purgeable + * list. Used with DONTNEED objects. Unlike with shrinkable objects, the + * shrinker will attempt to discard the backing pages, instead of trying to swap + * them out. + * @obj: The GEM object. + * + * MUST only be called on objects which have backing pages. + * + * MUST be balanced with previous call to i915_gem_object_make_unshrinkable(). + */ void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj) { - __i915_gem_object_make_shrinkable(obj, - &obj_to_i915(obj)->mm.purge_list); + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); + __i915_gem_object_make_purgeable(obj); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index ddd37ccb1362..bce03d74a0b4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -399,7 +399,7 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem) return 0; } - if (intel_vtd_active() && GRAPHICS_VER(i915) < 8) { + if (intel_vtd_active(i915) && GRAPHICS_VER(i915) < 8) { drm_notice(&i915->drm, "%s, disabling use of stolen memory\n", "DMAR active"); @@ -720,9 +720,10 @@ static int init_stolen_smem(struct intel_memory_region *mem) return i915_gem_init_stolen(mem); } -static void release_stolen_smem(struct intel_memory_region *mem) +static int release_stolen_smem(struct intel_memory_region *mem) { i915_gem_cleanup_stolen(mem->i915); + return 0; } static const struct intel_memory_region_ops i915_region_stolen_smem_ops = { @@ -759,10 +760,11 @@ err_fini: return err; } -static void release_stolen_lmem(struct intel_memory_region *mem) +static int release_stolen_lmem(struct intel_memory_region *mem) { io_mapping_fini(&mem->iomap); i915_gem_cleanup_stolen(mem->i915); + return 0; } static const struct intel_memory_region_ops i915_region_stolen_lmem_ops = { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 74a1ffd0d7dd..218a9b3037c7 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -14,13 +14,9 @@ #include "gem/i915_gem_object.h" #include "gem/i915_gem_region.h" #include "gem/i915_gem_ttm.h" +#include "gem/i915_gem_ttm_move.h" #include "gem/i915_gem_ttm_pm.h" - -#include "gt/intel_engine_pm.h" -#include "gt/intel_gt.h" -#include "gt/intel_migrate.h" - #define I915_TTM_PRIO_PURGE 0 #define I915_TTM_PRIO_NO_PAGES 1 #define I915_TTM_PRIO_HAS_PAGES 2 @@ -34,7 +30,9 @@ * struct i915_ttm_tt - TTM page vector with additional private information * @ttm: The base TTM page vector. * @dev: The struct device used for dma mapping and unmapping. - * @cached_st: The cached scatter-gather table. + * @cached_rsgt: The cached scatter-gather table. + * @is_shmem: Set if using shmem. + * @filp: The shmem file, if using shmem backend. * * Note that DMA may be going on right up to the point where the page- * vector is unpopulated in delayed destroy. Hence keep the @@ -45,7 +43,10 @@ struct i915_ttm_tt { struct ttm_tt ttm; struct device *dev; - struct sg_table *cached_st; + struct i915_refct_sgt cached_rsgt; + + bool is_shmem; + struct file *filp; }; static const struct ttm_place sys_placement_flags = { @@ -103,37 +104,15 @@ static int i915_ttm_err_to_gem(int err) return err; } -static bool gpu_binds_iomem(struct ttm_resource *mem) -{ - return mem->mem_type != TTM_PL_SYSTEM; -} - -static bool cpu_maps_iomem(struct ttm_resource *mem) -{ - /* Once / if we support GGTT, this is also false for cached ttm_tts */ - return mem->mem_type != TTM_PL_SYSTEM; -} - -static enum i915_cache_level -i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res, - struct ttm_tt *ttm) -{ - return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) && - ttm->caching == ttm_cached) ? I915_CACHE_LLC : - I915_CACHE_NONE; -} - -static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj); - static enum ttm_caching i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj) { /* - * Objects only allowed in system get cached cpu-mappings. - * Other objects get WC mapping for now. Even if in system. + * Objects only allowed in system get cached cpu-mappings, or when + * evicting lmem-only buffers to system for swapping. Other objects get + * WC mapping for now. Even if in system. */ - if (obj->mm.region->type == INTEL_MEMORY_SYSTEM && - obj->mm.n_placements <= 1) + if (obj->mm.n_placements <= 1) return ttm_cached; return ttm_write_combined; @@ -179,15 +158,103 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, placement->busy_placement = busy; } +static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) +{ + struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); + struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + const unsigned int max_segment = i915_sg_segment_size(); + const size_t size = ttm->num_pages << PAGE_SHIFT; + struct file *filp = i915_tt->filp; + struct sgt_iter sgt_iter; + struct sg_table *st; + struct page *page; + unsigned long i; + int err; + + if (!filp) { + struct address_space *mapping; + gfp_t mask; + + filp = shmem_file_setup("i915-shmem-tt", size, VM_NORESERVE); + if (IS_ERR(filp)) + return PTR_ERR(filp); + + mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; + + mapping = filp->f_mapping; + mapping_set_gfp_mask(mapping, mask); + GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); + + i915_tt->filp = filp; + } + + st = &i915_tt->cached_rsgt.table; + err = shmem_sg_alloc_table(i915, st, size, mr, filp->f_mapping, + max_segment); + if (err) + return err; + + err = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, + DMA_ATTR_SKIP_CPU_SYNC); + if (err) + goto err_free_st; + + i = 0; + for_each_sgt_page(page, sgt_iter, st) + ttm->pages[i++] = page; + + if (ttm->page_flags & TTM_TT_FLAG_SWAPPED) + ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; + + return 0; + +err_free_st: + shmem_sg_free_table(st, filp->f_mapping, false, false); + + return err; +} + +static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm) +{ + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED; + struct sg_table *st = &i915_tt->cached_rsgt.table; + + shmem_sg_free_table(st, file_inode(i915_tt->filp)->i_mapping, + backup, backup); +} + +static void i915_ttm_tt_release(struct kref *ref) +{ + struct i915_ttm_tt *i915_tt = + container_of(ref, typeof(*i915_tt), cached_rsgt.kref); + struct sg_table *st = &i915_tt->cached_rsgt.table; + + GEM_WARN_ON(st->sgl); + + kfree(i915_tt); +} + +static const struct i915_refct_sgt_ops tt_rsgt_ops = { + .release = i915_ttm_tt_release +}; + static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->resource->mem_type); struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + enum ttm_caching caching; struct i915_ttm_tt *i915_tt; int ret; + if (!obj) + return NULL; + i915_tt = kzalloc(sizeof(*i915_tt), GFP_KERNEL); if (!i915_tt) return NULL; @@ -196,38 +263,66 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, man->use_tt) page_flags |= TTM_TT_FLAG_ZERO_ALLOC; - ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, - i915_ttm_select_tt_caching(obj)); - if (ret) { - kfree(i915_tt); - return NULL; + caching = i915_ttm_select_tt_caching(obj); + if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) { + page_flags |= TTM_TT_FLAG_EXTERNAL | + TTM_TT_FLAG_EXTERNAL_MAPPABLE; + i915_tt->is_shmem = true; } + ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching); + if (ret) + goto err_free; + + __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo->base.size, + &tt_rsgt_ops); + i915_tt->dev = obj->base.dev->dev; return &i915_tt->ttm; + +err_free: + kfree(i915_tt); + return NULL; +} + +static int i915_ttm_tt_populate(struct ttm_device *bdev, + struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) +{ + struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + + if (i915_tt->is_shmem) + return i915_ttm_tt_shmem_populate(bdev, ttm, ctx); + + return ttm_pool_alloc(&bdev->pool, ttm, ctx); } static void i915_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + struct sg_table *st = &i915_tt->cached_rsgt.table; - if (i915_tt->cached_st) { - dma_unmap_sgtable(i915_tt->dev, i915_tt->cached_st, - DMA_BIDIRECTIONAL, 0); - sg_free_table(i915_tt->cached_st); - kfree(i915_tt->cached_st); - i915_tt->cached_st = NULL; + if (st->sgl) + dma_unmap_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); + + if (i915_tt->is_shmem) { + i915_ttm_tt_shmem_unpopulate(ttm); + } else { + sg_free_table(st); + ttm_pool_free(&bdev->pool, ttm); } - ttm_pool_free(&bdev->pool, ttm); } static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); + if (i915_tt->filp) + fput(i915_tt->filp); + ttm_tt_fini(ttm); - kfree(i915_tt); + i915_refct_sgt_put(&i915_tt->cached_rsgt); } static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, @@ -235,6 +330,17 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + if (!obj) + return false; + + /* + * EXTERNAL objects should never be swapped out by TTM, instead we need + * to handle that ourselves. TTM will already skip such objects for us, + * but we would like to avoid grabbing locks for no good reason. + */ + if (bo->ttm && bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) + return false; + /* Will do for now. Our pinned objects are still on TTM's LRU lists */ return i915_gem_object_evictable(obj); } @@ -245,28 +351,19 @@ static void i915_ttm_evict_flags(struct ttm_buffer_object *bo, *placement = i915_sys_placement; } -static int i915_ttm_move_notify(struct ttm_buffer_object *bo) -{ - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - int ret; - - ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); - if (ret) - return ret; - - ret = __i915_gem_object_put_pages(obj); - if (ret) - return ret; - - return 0; -} - -static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj) +/** + * i915_ttm_free_cached_io_rsgt - Free object cached LMEM information + * @obj: The GEM object + * This function frees any LMEM-related information that is cached on + * the object. For example the radix tree for fast page lookup and the + * cached refcounted sg-table + */ +void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj) { struct radix_tree_iter iter; void __rcu **slot; - if (!obj->ttm.cached_io_st) + if (!obj->ttm.cached_io_rsgt) return; rcu_read_lock(); @@ -274,93 +371,106 @@ static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj) radix_tree_delete(&obj->ttm.get_io_page.radix, iter.index); rcu_read_unlock(); - sg_free_table(obj->ttm.cached_io_st); - kfree(obj->ttm.cached_io_st); - obj->ttm.cached_io_st = NULL; + i915_refct_sgt_put(obj->ttm.cached_io_rsgt); + obj->ttm.cached_io_rsgt = NULL; } -static void -i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) +/** + * i915_ttm_purge - Clear an object of its memory + * @obj: The object + * + * This function is called to clear an object of it's memory when it is + * marked as not needed anymore. + * + * Return: 0 on success, negative error code on failure. + */ +int i915_ttm_purge(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct i915_ttm_tt *i915_tt = + container_of(bo->ttm, typeof(*i915_tt), ttm); + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = false, + }; + struct ttm_placement place = {}; + int ret; - if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { - obj->write_domain = I915_GEM_DOMAIN_WC; - obj->read_domains = I915_GEM_DOMAIN_WC; - } else { - obj->write_domain = I915_GEM_DOMAIN_CPU; - obj->read_domains = I915_GEM_DOMAIN_CPU; - } -} + if (obj->mm.madv == __I915_MADV_PURGED) + return 0; -static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) -{ - struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); - unsigned int cache_level; - unsigned int i; + ret = ttm_bo_validate(bo, &place, &ctx); + if (ret) + return ret; - /* - * If object was moved to an allowable region, update the object - * region to consider it migrated. Note that if it's currently not - * in an allowable region, it's evicted and we don't update the - * object region. - */ - if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { - for (i = 0; i < obj->mm.n_placements; ++i) { - struct intel_memory_region *mr = obj->mm.placements[i]; - - if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && - mr != obj->mm.region) { - i915_gem_object_release_memory_region(obj); - i915_gem_object_init_memory_region(obj, mr); - break; - } - } + if (bo->ttm && i915_tt->filp) { + /* + * The below fput(which eventually calls shmem_truncate) might + * be delayed by worker, so when directly called to purge the + * pages(like by the shrinker) we should try to be more + * aggressive and release the pages immediately. + */ + shmem_truncate_range(file_inode(i915_tt->filp), + 0, (loff_t)-1); + fput(fetch_and_zero(&i915_tt->filp)); } - obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); - - obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : - I915_BO_FLAG_STRUCT_PAGE; + obj->write_domain = 0; + obj->read_domains = 0; + i915_ttm_adjust_gem_after_move(obj); + i915_ttm_free_cached_io_rsgt(obj); + obj->mm.madv = __I915_MADV_PURGED; - cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, - bo->ttm); - i915_gem_object_set_cache_coherency(obj, cache_level); + return 0; } -static void i915_ttm_purge(struct drm_i915_gem_object *obj) +static int i915_ttm_shrinker_release_pages(struct drm_i915_gem_object *obj, + bool no_wait_gpu, + bool should_writeback) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct i915_ttm_tt *i915_tt = + container_of(bo->ttm, typeof(*i915_tt), ttm); struct ttm_operation_ctx ctx = { .interruptible = true, - .no_wait_gpu = false, + .no_wait_gpu = no_wait_gpu, }; struct ttm_placement place = {}; int ret; - if (obj->mm.madv == __I915_MADV_PURGED) - return; + if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM) + return 0; + + GEM_BUG_ON(!i915_tt->is_shmem); + + if (!i915_tt->filp) + return 0; + + ret = ttm_bo_wait_ctx(bo, &ctx); + if (ret) + return ret; - /* TTM's purge interface. Note that we might be reentering. */ + switch (obj->mm.madv) { + case I915_MADV_DONTNEED: + return i915_ttm_purge(obj); + case __I915_MADV_PURGED: + return 0; + } + + if (bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED) + return 0; + + bo->ttm->page_flags |= TTM_TT_FLAG_SWAPPED; ret = ttm_bo_validate(bo, &place, &ctx); - if (!ret) { - obj->write_domain = 0; - obj->read_domains = 0; - i915_ttm_adjust_gem_after_move(obj); - i915_ttm_free_cached_io_st(obj); - obj->mm.madv = __I915_MADV_PURGED; + if (ret) { + bo->ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; + return ret; } -} -static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) -{ - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - int ret = i915_ttm_move_notify(bo); + if (should_writeback) + __shmem_writeback(obj->base.size, i915_tt->filp->f_mapping); - GEM_WARN_ON(ret); - GEM_WARN_ON(obj->ttm.cached_io_st); - if (!ret && obj->mm.madv != I915_MADV_WILLNEED) - i915_ttm_purge(obj); + return 0; } static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) @@ -369,232 +479,101 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) if (likely(obj)) { __i915_gem_object_pages_fini(obj); - i915_ttm_free_cached_io_st(obj); + i915_ttm_free_cached_io_rsgt(obj); } } -static struct intel_memory_region * -i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type) -{ - struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); - - /* There's some room for optimization here... */ - GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM && - ttm_mem_type < I915_PL_LMEM0); - if (ttm_mem_type == I915_PL_SYSTEM) - return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM, - 0); - - return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL, - ttm_mem_type - I915_PL_LMEM0); -} - -static struct sg_table *i915_ttm_tt_get_st(struct ttm_tt *ttm) +static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm) { struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); struct sg_table *st; int ret; - if (i915_tt->cached_st) - return i915_tt->cached_st; - - st = kzalloc(sizeof(*st), GFP_KERNEL); - if (!st) - return ERR_PTR(-ENOMEM); + if (i915_tt->cached_rsgt.table.sgl) + return i915_refct_sgt_get(&i915_tt->cached_rsgt); + st = &i915_tt->cached_rsgt.table; ret = sg_alloc_table_from_pages_segment(st, ttm->pages, ttm->num_pages, 0, (unsigned long)ttm->num_pages << PAGE_SHIFT, i915_sg_segment_size(), GFP_KERNEL); if (ret) { - kfree(st); + st->sgl = NULL; return ERR_PTR(ret); } ret = dma_map_sgtable(i915_tt->dev, st, DMA_BIDIRECTIONAL, 0); if (ret) { sg_free_table(st); - kfree(st); return ERR_PTR(ret); } - i915_tt->cached_st = st; - return st; + return i915_refct_sgt_get(&i915_tt->cached_rsgt); } -static struct sg_table * +/** + * i915_ttm_resource_get_st - Get a refcounted sg-table pointing to the + * resource memory + * @obj: The GEM object used for sg-table caching + * @res: The struct ttm_resource for which an sg-table is requested. + * + * This function returns a refcounted sg-table representing the memory + * pointed to by @res. If @res is the object's current resource it may also + * cache the sg_table on the object or attempt to access an already cached + * sg-table. The refcounted sg-table needs to be put when no-longer in use. + * + * Return: A valid pointer to a struct i915_refct_sgt or error pointer on + * failure. + */ +struct i915_refct_sgt * i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, struct ttm_resource *res) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); - if (!gpu_binds_iomem(res)) + if (!i915_ttm_gtt_binds_lmem(res)) return i915_ttm_tt_get_st(bo->ttm); /* * If CPU mapping differs, we need to add the ttm_tt pages to * the resulting st. Might make sense for GGTT. */ - GEM_WARN_ON(!cpu_maps_iomem(res)); - return intel_region_ttm_resource_to_st(obj->mm.region, res); -} - -static int i915_ttm_accel_move(struct ttm_buffer_object *bo, - bool clear, - struct ttm_resource *dst_mem, - struct ttm_tt *dst_ttm, - struct sg_table *dst_st) -{ - struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), - bdev); - struct ttm_resource_manager *src_man = - ttm_manager_type(bo->bdev, bo->resource->mem_type); - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - struct sg_table *src_st; - struct i915_request *rq; - struct ttm_tt *src_ttm = bo->ttm; - enum i915_cache_level src_level, dst_level; - int ret; - - if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt)) - return -EINVAL; + GEM_WARN_ON(!i915_ttm_cpu_maps_iomem(res)); + if (bo->resource == res) { + if (!obj->ttm.cached_io_rsgt) { + struct i915_refct_sgt *rsgt; - dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm); - if (clear) { - if (bo->type == ttm_bo_type_kernel) - return -EINVAL; + rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, + res); + if (IS_ERR(rsgt)) + return rsgt; - intel_engine_pm_get(i915->gt.migrate.context->engine); - ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL, - dst_st->sgl, dst_level, - gpu_binds_iomem(dst_mem), - 0, &rq); - - if (!ret && rq) { - i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - i915_request_put(rq); - } - intel_engine_pm_put(i915->gt.migrate.context->engine); - } else { - src_st = src_man->use_tt ? i915_ttm_tt_get_st(src_ttm) : - obj->ttm.cached_io_st; - - src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm); - intel_engine_pm_get(i915->gt.migrate.context->engine); - ret = intel_context_migrate_copy(i915->gt.migrate.context, - NULL, src_st->sgl, src_level, - gpu_binds_iomem(bo->resource), - dst_st->sgl, dst_level, - gpu_binds_iomem(dst_mem), - &rq); - if (!ret && rq) { - i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); - i915_request_put(rq); + obj->ttm.cached_io_rsgt = rsgt; } - intel_engine_pm_put(i915->gt.migrate.context->engine); + return i915_refct_sgt_get(obj->ttm.cached_io_rsgt); } - return ret; -} - -static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear, - struct ttm_resource *dst_mem, - struct ttm_tt *dst_ttm, - struct sg_table *dst_st, - bool allow_accel) -{ - int ret = -EINVAL; - - if (allow_accel) - ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, dst_st); - if (ret) { - struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - struct intel_memory_region *dst_reg, *src_reg; - union { - struct ttm_kmap_iter_tt tt; - struct ttm_kmap_iter_iomap io; - } _dst_iter, _src_iter; - struct ttm_kmap_iter *dst_iter, *src_iter; - - dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type); - src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type); - GEM_BUG_ON(!dst_reg || !src_reg); - - dst_iter = !cpu_maps_iomem(dst_mem) ? - ttm_kmap_iter_tt_init(&_dst_iter.tt, dst_ttm) : - ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap, - dst_st, dst_reg->region.start); - - src_iter = !cpu_maps_iomem(bo->resource) ? - ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) : - ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap, - obj->ttm.cached_io_st, - src_reg->region.start); - - ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter); - } + return intel_region_ttm_resource_to_rsgt(obj->mm.region, res); } -static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *dst_mem, - struct ttm_place *hop) +static void i915_ttm_swap_notify(struct ttm_buffer_object *bo) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - struct ttm_resource_manager *dst_man = - ttm_manager_type(bo->bdev, dst_mem->mem_type); - struct ttm_tt *ttm = bo->ttm; - struct sg_table *dst_st; - bool clear; int ret; - /* Sync for now. We could do the actual copy async. */ - ret = ttm_bo_wait_ctx(bo, ctx); - if (ret) - return ret; + if (!obj) + return; ret = i915_ttm_move_notify(bo); - if (ret) - return ret; - - if (obj->mm.madv != I915_MADV_WILLNEED) { + GEM_WARN_ON(ret); + GEM_WARN_ON(obj->ttm.cached_io_rsgt); + if (!ret && obj->mm.madv != I915_MADV_WILLNEED) i915_ttm_purge(obj); - ttm_resource_free(bo, &dst_mem); - return 0; - } - - /* Populate ttm with pages if needed. Typically system memory. */ - if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { - ret = ttm_tt_populate(bo->bdev, ttm, ctx); - if (ret) - return ret; - } - - dst_st = i915_ttm_resource_get_st(obj, dst_mem); - if (IS_ERR(dst_st)) - return PTR_ERR(dst_st); - - clear = !cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); - if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) - __i915_ttm_move(bo, clear, dst_mem, bo->ttm, dst_st, true); - - ttm_bo_move_sync_cleanup(bo, dst_mem); - i915_ttm_adjust_domains_after_move(obj); - i915_ttm_free_cached_io_st(obj); - - if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) { - obj->ttm.cached_io_st = dst_st; - obj->ttm.get_io_page.sg_pos = dst_st->sgl; - obj->ttm.get_io_page.sg_idx = 0; - } - - i915_ttm_adjust_gem_after_move(obj); - return 0; } static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { - if (!cpu_maps_iomem(mem)) + if (!i915_ttm_cpu_maps_iomem(mem)) return 0; mem->bus.caching = ttm_write_combined; @@ -607,19 +586,26 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) { struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); - unsigned long base = obj->mm.region->iomap.base - obj->mm.region->region.start; struct scatterlist *sg; + unsigned long base; unsigned int ofs; + GEM_BUG_ON(!obj); GEM_WARN_ON(bo->ttm); + base = obj->mm.region->iomap.base - obj->mm.region->region.start; sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true); return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs; } +/* + * All callbacks need to take care not to downcast a struct ttm_buffer_object + * without checking its subclass, since it might be a TTM ghost object. + */ static struct ttm_device_funcs i915_ttm_bo_driver = { .ttm_tt_create = i915_ttm_tt_create, + .ttm_tt_populate = i915_ttm_tt_populate, .ttm_tt_unpopulate = i915_ttm_tt_unpopulate, .ttm_tt_destroy = i915_ttm_tt_destroy, .eviction_valuable = i915_ttm_eviction_valuable, @@ -649,7 +635,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, .interruptible = true, .no_wait_gpu = false, }; - struct sg_table *st; int real_num_busy; int ret; @@ -676,7 +661,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, return i915_ttm_err_to_gem(ret); } - i915_ttm_adjust_lru(obj); if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) { ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); if (ret) @@ -687,14 +671,19 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, } if (!i915_gem_object_has_pages(obj)) { - /* Object either has a page vector or is an iomem object */ - st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st; - if (IS_ERR(st)) - return PTR_ERR(st); + struct i915_refct_sgt *rsgt = + i915_ttm_resource_get_st(obj, bo->resource); + + if (IS_ERR(rsgt)) + return PTR_ERR(rsgt); - __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl)); + GEM_BUG_ON(obj->mm.rsgt); + obj->mm.rsgt = rsgt; + __i915_gem_object_set_pages(obj, &rsgt->table, + i915_sg_dma_sizes(rsgt->table.sgl)); } + i915_ttm_adjust_lru(obj); return ret; } @@ -766,12 +755,21 @@ static void i915_ttm_put_pages(struct drm_i915_gem_object *obj, * and shrinkers will move it out if needed. */ - i915_ttm_adjust_lru(obj); + if (obj->mm.rsgt) + i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt)); } -static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) +/** + * i915_ttm_adjust_lru - Adjust an object's position on relevant LRU lists. + * @obj: The object + */ +void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) { struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + struct i915_ttm_tt *i915_tt = + container_of(bo->ttm, typeof(*i915_tt), ttm); + bool shrinkable = + bo->ttm && i915_tt->filp && ttm_tt_is_populated(bo->ttm); /* * Don't manipulate the TTM LRUs while in TTM bo destruction. @@ -781,10 +779,53 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj) return; /* + * We skip managing the shrinker LRU in set_pages() and just manage + * everything here. This does at least solve the issue with having + * temporary shmem mappings(like with evicted lmem) not being visible to + * the shrinker. Only our shmem objects are shrinkable, everything else + * we keep as unshrinkable. + * + * To make sure everything plays nice we keep an extra shrink pin in TTM + * if the underlying pages are not currently shrinkable. Once we release + * our pin, like when the pages are moved to shmem, the pages will then + * be added to the shrinker LRU, assuming the caller isn't also holding + * a pin. + * + * TODO: consider maybe also bumping the shrinker list here when we have + * already unpinned it, which should give us something more like an LRU. + * + * TODO: There is a small window of opportunity for this function to + * get called from eviction after we've dropped the last GEM refcount, + * but before the TTM deleted flag is set on the object. Avoid + * adjusting the shrinker list in such cases, since the object is + * not available to the shrinker anyway due to its zero refcount. + * To fix this properly we should move to a TTM shrinker LRU list for + * these objects. + */ + if (kref_get_unless_zero(&obj->base.refcount)) { + if (shrinkable != obj->mm.ttm_shrinkable) { + if (shrinkable) { + if (obj->mm.madv == I915_MADV_WILLNEED) + __i915_gem_object_make_shrinkable(obj); + else + __i915_gem_object_make_purgeable(obj); + } else { + i915_gem_object_make_unshrinkable(obj); + } + + obj->mm.ttm_shrinkable = shrinkable; + } + i915_gem_object_put(obj); + } + + /* * Put on the correct LRU list depending on the MADV status */ spin_lock(&bo->bdev->lru_lock); - if (obj->mm.madv != I915_MADV_WILLNEED) { + if (shrinkable) { + /* Try to keep shmem_tt from being considered for shrinking. */ + bo->priority = TTM_MAX_BO_PRIORITY - 1; + } else if (obj->mm.madv != I915_MADV_WILLNEED) { bo->priority = I915_TTM_PRIO_PURGE; } else if (!i915_gem_object_has_pages(obj)) { if (bo->priority < I915_TTM_PRIO_HAS_PAGES) @@ -823,15 +864,39 @@ static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj) static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) { struct vm_area_struct *area = vmf->vma; - struct drm_i915_gem_object *obj = - i915_ttm_to_gem(area->vm_private_data); + struct ttm_buffer_object *bo = area->vm_private_data; + struct drm_device *dev = bo->base.dev; + struct drm_i915_gem_object *obj; + vm_fault_t ret; + int idx; + + obj = i915_ttm_to_gem(bo); + if (!obj) + return VM_FAULT_SIGBUS; /* Sanity check that we allow writing into this object */ if (unlikely(i915_gem_object_is_readonly(obj) && area->vm_flags & VM_WRITE)) return VM_FAULT_SIGBUS; - return ttm_bo_vm_fault(vmf); + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + if (drm_dev_enter(dev, &idx)) { + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT); + drm_dev_exit(idx); + } else { + ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot); + } + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + + i915_ttm_adjust_lru(obj); + + dma_resv_unlock(bo->base.resv); + return ret; } static int @@ -882,13 +947,18 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj) static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = { .name = "i915_gem_object_ttm", + .flags = I915_GEM_OBJECT_IS_SHRINKABLE | + I915_GEM_OBJECT_SELF_MANAGED_SHRINK_LIST, .get_pages = i915_ttm_get_pages, .put_pages = i915_ttm_put_pages, .truncate = i915_ttm_purge, + .shrinker_release_pages = i915_ttm_shrinker_release_pages, + .adjust_lru = i915_ttm_adjust_lru, .delayed_free = i915_ttm_delayed_free, .migrate = i915_ttm_migrate, + .mmap_offset = i915_ttm_mmap_offset, .mmap_ops = &vm_ops_ttm, }; @@ -901,6 +971,18 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo) mutex_destroy(&obj->ttm.get_io_page.lock); if (obj->ttm.created) { + /* + * We freely manage the shrinker LRU outide of the mm.pages life + * cycle. As a result when destroying the object we should be + * extra paranoid and ensure we remove it from the LRU, before + * we free the object. + * + * Touching the ttm_shrinkable outside of the object lock here + * should be safe now that the last GEM object ref was dropped. + */ + if (obj->mm.ttm_shrinkable) + i915_gem_object_make_unshrinkable(obj); + i915_ttm_backup_free(obj); /* This releases all gem object bindings to the backend. */ @@ -940,10 +1022,9 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags); /* Don't put on a region list until we're either locked or fully initialized. */ - obj->mm.region = intel_memory_region_get(mem); + obj->mm.region = mem; INIT_LIST_HEAD(&obj->mm.region_link); - i915_gem_object_make_unshrinkable(obj); INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN); mutex_init(&obj->ttm.get_io_page.lock); bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device : @@ -955,6 +1036,14 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, GEM_BUG_ON(page_size && obj->mm.n_placements); /* + * Keep an extra shrink pin to prevent the object from being made + * shrinkable too early. If the ttm_tt is ever allocated in shmem, we + * drop the pin. The TTM backend manages the shrinker LRU itself, + * outside of the normal mm.pages life cycle. + */ + i915_gem_object_make_unshrinkable(obj); + + /* * If this function fails, it will call the destructor, but * our caller still owns the object. So no freeing in the * destructor until obj->ttm.created is true. @@ -980,6 +1069,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, static const struct intel_memory_region_ops ttm_system_region_ops = { .init_object = __i915_gem_ttm_object_init, + .release = intel_region_ttm_fini, }; struct intel_memory_region * @@ -999,50 +1089,3 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915, intel_memory_region_set_name(mr, "system-ttm"); return mr; } - -/** - * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to - * another - * @dst: The destination object - * @src: The source object - * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used. - * @intr: Whether to perform waits interruptible: - * - * Note: The caller is responsible for assuring that the underlying - * TTM objects are populated if needed and locked. - * - * Return: Zero on success. Negative error code on error. If @intr == true, - * then it may return -ERESTARTSYS or -EINTR. - */ -int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, - struct drm_i915_gem_object *src, - bool allow_accel, bool intr) -{ - struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst); - struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src); - struct ttm_operation_ctx ctx = { - .interruptible = intr, - }; - struct sg_table *dst_st; - int ret; - - assert_object_held(dst); - assert_object_held(src); - - /* - * Sync for now. This will change with async moves. - */ - ret = ttm_bo_wait_ctx(dst_bo, &ctx); - if (!ret) - ret = ttm_bo_wait_ctx(src_bo, &ctx); - if (ret) - return ret; - - dst_st = gpu_binds_iomem(dst_bo->resource) ? - dst->ttm.cached_io_st : i915_ttm_tt_get_st(dst_bo->ttm); - - __i915_ttm_move(src_bo, false, dst_bo->resource, dst_bo->ttm, - dst_st, allow_accel); - - return 0; -} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h index 0b7291dd897c..9d698ad00853 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h @@ -5,6 +5,8 @@ #ifndef _I915_GEM_TTM_H_ #define _I915_GEM_TTM_H_ +#include <drm/ttm/ttm_placement.h> + #include "gem/i915_gem_object_types.h" /** @@ -35,7 +37,7 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo); static inline struct drm_i915_gem_object * i915_ttm_to_gem(struct ttm_buffer_object *bo) { - if (GEM_WARN_ON(bo->destroy != i915_ttm_bo_destroy)) + if (bo->destroy != i915_ttm_bo_destroy) return NULL; return container_of(bo, struct drm_i915_gem_object, __do_not_access); @@ -47,10 +49,6 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, resource_size_t page_size, unsigned int flags); -int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, - struct drm_i915_gem_object *src, - bool allow_accel, bool intr); - /* Internal I915 TTM declarations and definitions below. */ #define I915_PL_LMEM0 TTM_PL_PRIV @@ -60,4 +58,37 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, struct ttm_placement *i915_ttm_sys_placement(void); +void i915_ttm_free_cached_io_rsgt(struct drm_i915_gem_object *obj); + +struct i915_refct_sgt * +i915_ttm_resource_get_st(struct drm_i915_gem_object *obj, + struct ttm_resource *res); + +void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj); + +int i915_ttm_purge(struct drm_i915_gem_object *obj); + +/** + * i915_ttm_gtt_binds_lmem - Should the memory be viewed as LMEM by the GTT? + * @mem: struct ttm_resource representing the memory. + * + * Return: true if memory should be viewed as LMEM for GTT binding purposes, + * false otherwise. + */ +static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem) +{ + return mem->mem_type != I915_PL_SYSTEM; +} + +/** + * i915_ttm_cpu_maps_iomem - Should the memory be viewed as IOMEM by the CPU? + * @mem: struct ttm_resource representing the memory. + * + * Return: true if memory should be viewed as IOMEM for CPU mapping purposes. + */ +static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem) +{ + /* Once / if we support GGTT, this is also false for cached ttm_tts */ + return mem->mem_type != I915_PL_SYSTEM; +} #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c new file mode 100644 index 000000000000..80df9f592407 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c @@ -0,0 +1,874 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <linux/dma-fence-array.h> + +#include <drm/ttm/ttm_bo_driver.h> + +#include "i915_drv.h" +#include "intel_memory_region.h" +#include "intel_region_ttm.h" + +#include "gem/i915_gem_object.h" +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_ttm.h" +#include "gem/i915_gem_ttm_move.h" + +#include "gt/intel_engine_pm.h" +#include "gt/intel_gt.h" +#include "gt/intel_migrate.h" + +/** + * DOC: Selftest failure modes for failsafe migration: + * + * For fail_gpu_migration, the gpu blit scheduled is always a clear blit + * rather than a copy blit, and then we force the failure paths as if + * the blit fence returned an error. + * + * For fail_work_allocation we fail the kmalloc of the async worker, we + * sync the gpu blit. If it then fails, or fail_gpu_migration is set to + * true, then a memcpy operation is performed sync. + */ +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +static bool fail_gpu_migration; +static bool fail_work_allocation; + +void i915_ttm_migrate_set_failure_modes(bool gpu_migration, + bool work_allocation) +{ + fail_gpu_migration = gpu_migration; + fail_work_allocation = work_allocation; +} +#endif + +/** + * DOC: Set of utilities to dynamically collect dependencies and + * eventually coalesce them into a single fence which is fed into + * the GT migration code, since it only accepts a single dependency + * fence. + * The single fence returned from these utilities, in the case of + * dependencies from multiple fence contexts, a struct dma_fence_array, + * since the i915 request code can break that up and await the individual + * fences. + * + * Once we can do async unbinding, this is also needed to coalesce + * the migration fence with the unbind fences. + * + * While collecting the individual dependencies, we store the refcounted + * struct dma_fence pointers in a realloc-managed pointer array, since + * that can be easily fed into a dma_fence_array. Other options are + * available, like for example an xarray for similarity with drm/sched. + * Can be changed easily if needed. + * + * A struct i915_deps need to be initialized using i915_deps_init(). + * If i915_deps_add_dependency() or i915_deps_add_resv() return an + * error code they will internally call i915_deps_fini(), which frees + * all internal references and allocations. After a call to + * i915_deps_to_fence(), or i915_deps_sync(), the struct should similarly + * be viewed as uninitialized. + * + * We might want to break this out into a separate file as a utility. + */ + +#define I915_DEPS_MIN_ALLOC_CHUNK 8U + +/** + * struct i915_deps - Collect dependencies into a single dma-fence + * @single: Storage for pointer if the collection is a single fence. + * @fence: Allocated array of fence pointers if more than a single fence; + * otherwise points to the address of @single. + * @num_deps: Current number of dependency fences. + * @fences_size: Size of the @fences array in number of pointers. + * @gfp: Allocation mode. + */ +struct i915_deps { + struct dma_fence *single; + struct dma_fence **fences; + unsigned int num_deps; + unsigned int fences_size; + gfp_t gfp; +}; + +static void i915_deps_reset_fences(struct i915_deps *deps) +{ + if (deps->fences != &deps->single) + kfree(deps->fences); + deps->num_deps = 0; + deps->fences_size = 1; + deps->fences = &deps->single; +} + +static void i915_deps_init(struct i915_deps *deps, gfp_t gfp) +{ + deps->fences = NULL; + deps->gfp = gfp; + i915_deps_reset_fences(deps); +} + +static void i915_deps_fini(struct i915_deps *deps) +{ + unsigned int i; + + for (i = 0; i < deps->num_deps; ++i) + dma_fence_put(deps->fences[i]); + + if (deps->fences != &deps->single) + kfree(deps->fences); +} + +static int i915_deps_grow(struct i915_deps *deps, struct dma_fence *fence, + const struct ttm_operation_ctx *ctx) +{ + int ret; + + if (deps->num_deps >= deps->fences_size) { + unsigned int new_size = 2 * deps->fences_size; + struct dma_fence **new_fences; + + new_size = max(new_size, I915_DEPS_MIN_ALLOC_CHUNK); + new_fences = kmalloc_array(new_size, sizeof(*new_fences), deps->gfp); + if (!new_fences) + goto sync; + + memcpy(new_fences, deps->fences, + deps->fences_size * sizeof(*new_fences)); + swap(new_fences, deps->fences); + if (new_fences != &deps->single) + kfree(new_fences); + deps->fences_size = new_size; + } + deps->fences[deps->num_deps++] = dma_fence_get(fence); + return 0; + +sync: + if (ctx->no_wait_gpu && !dma_fence_is_signaled(fence)) { + ret = -EBUSY; + goto unref; + } + + ret = dma_fence_wait(fence, ctx->interruptible); + if (ret) + goto unref; + + ret = fence->error; + if (ret) + goto unref; + + return 0; + +unref: + i915_deps_fini(deps); + return ret; +} + +static int i915_deps_sync(struct i915_deps *deps, + const struct ttm_operation_ctx *ctx) +{ + struct dma_fence **fences = deps->fences; + unsigned int i; + int ret = 0; + + for (i = 0; i < deps->num_deps; ++i, ++fences) { + if (ctx->no_wait_gpu && !dma_fence_is_signaled(*fences)) { + ret = -EBUSY; + break; + } + + ret = dma_fence_wait(*fences, ctx->interruptible); + if (!ret) + ret = (*fences)->error; + if (ret) + break; + } + + i915_deps_fini(deps); + return ret; +} + +static int i915_deps_add_dependency(struct i915_deps *deps, + struct dma_fence *fence, + const struct ttm_operation_ctx *ctx) +{ + unsigned int i; + int ret; + + if (!fence) + return 0; + + if (dma_fence_is_signaled(fence)) { + ret = fence->error; + if (ret) + i915_deps_fini(deps); + return ret; + } + + for (i = 0; i < deps->num_deps; ++i) { + struct dma_fence *entry = deps->fences[i]; + + if (!entry->context || entry->context != fence->context) + continue; + + if (dma_fence_is_later(fence, entry)) { + dma_fence_put(entry); + deps->fences[i] = dma_fence_get(fence); + } + + return 0; + } + + return i915_deps_grow(deps, fence, ctx); +} + +static struct dma_fence *i915_deps_to_fence(struct i915_deps *deps, + const struct ttm_operation_ctx *ctx) +{ + struct dma_fence_array *array; + + if (deps->num_deps == 0) + return NULL; + + if (deps->num_deps == 1) { + deps->num_deps = 0; + return deps->fences[0]; + } + + /* + * TODO: Alter the allocation mode here to not try too hard to + * make things async. + */ + array = dma_fence_array_create(deps->num_deps, deps->fences, 0, 0, + false); + if (!array) + return ERR_PTR(i915_deps_sync(deps, ctx)); + + deps->fences = NULL; + i915_deps_reset_fences(deps); + + return &array->base; +} + +static int i915_deps_add_resv(struct i915_deps *deps, struct dma_resv *resv, + bool all, const bool no_excl, + const struct ttm_operation_ctx *ctx) +{ + struct dma_resv_iter iter; + struct dma_fence *fence; + + dma_resv_assert_held(resv); + dma_resv_for_each_fence(&iter, resv, all, fence) { + int ret; + + if (no_excl && dma_resv_iter_is_exclusive(&iter)) + continue; + + ret = i915_deps_add_dependency(deps, fence, ctx); + if (ret) + return ret; + } + + return 0; +} + +static enum i915_cache_level +i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res, + struct ttm_tt *ttm) +{ + return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && + !i915_ttm_gtt_binds_lmem(res) && + ttm->caching == ttm_cached) ? I915_CACHE_LLC : + I915_CACHE_NONE; +} + +static struct intel_memory_region * +i915_ttm_region(struct ttm_device *bdev, int ttm_mem_type) +{ + struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); + + /* There's some room for optimization here... */ + GEM_BUG_ON(ttm_mem_type != I915_PL_SYSTEM && + ttm_mem_type < I915_PL_LMEM0); + if (ttm_mem_type == I915_PL_SYSTEM) + return intel_memory_region_lookup(i915, INTEL_MEMORY_SYSTEM, + 0); + + return intel_memory_region_lookup(i915, INTEL_MEMORY_LOCAL, + ttm_mem_type - I915_PL_LMEM0); +} + +/** + * i915_ttm_adjust_domains_after_move - Adjust the GEM domains after a + * TTM move + * @obj: The gem object + */ +void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + + if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { + obj->write_domain = I915_GEM_DOMAIN_WC; + obj->read_domains = I915_GEM_DOMAIN_WC; + } else { + obj->write_domain = I915_GEM_DOMAIN_CPU; + obj->read_domains = I915_GEM_DOMAIN_CPU; + } +} + +/** + * i915_ttm_adjust_gem_after_move - Adjust the GEM state after a TTM move + * @obj: The gem object + * + * Adjusts the GEM object's region, mem_flags and cache coherency after a + * TTM move. + */ +void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj) +{ + struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); + unsigned int cache_level; + unsigned int i; + + /* + * If object was moved to an allowable region, update the object + * region to consider it migrated. Note that if it's currently not + * in an allowable region, it's evicted and we don't update the + * object region. + */ + if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) { + for (i = 0; i < obj->mm.n_placements; ++i) { + struct intel_memory_region *mr = obj->mm.placements[i]; + + if (intel_region_to_ttm_type(mr) == bo->resource->mem_type && + mr != obj->mm.region) { + i915_gem_object_release_memory_region(obj); + i915_gem_object_init_memory_region(obj, mr); + break; + } + } + } + + obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM); + + obj->mem_flags |= i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM : + I915_BO_FLAG_STRUCT_PAGE; + + cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource, + bo->ttm); + i915_gem_object_set_cache_coherency(obj, cache_level); +} + +/** + * i915_ttm_move_notify - Prepare an object for move + * @bo: The ttm buffer object. + * + * This function prepares an object for move by removing all GPU bindings, + * removing all CPU mapings and finally releasing the pages sg-table. + * + * Return: 0 if successful, negative error code on error. + */ +int i915_ttm_move_notify(struct ttm_buffer_object *bo) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + int ret; + + ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); + if (ret) + return ret; + + ret = __i915_gem_object_put_pages(obj); + if (ret) + return ret; + + return 0; +} + +static struct dma_fence *i915_ttm_accel_move(struct ttm_buffer_object *bo, + bool clear, + struct ttm_resource *dst_mem, + struct ttm_tt *dst_ttm, + struct sg_table *dst_st, + struct dma_fence *dep) +{ + struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915), + bdev); + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct i915_request *rq; + struct ttm_tt *src_ttm = bo->ttm; + enum i915_cache_level src_level, dst_level; + int ret; + + if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt)) + return ERR_PTR(-EINVAL); + + /* With fail_gpu_migration, we always perform a GPU clear. */ + if (I915_SELFTEST_ONLY(fail_gpu_migration)) + clear = true; + + dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm); + if (clear) { + if (bo->type == ttm_bo_type_kernel && + !I915_SELFTEST_ONLY(fail_gpu_migration)) + return ERR_PTR(-EINVAL); + + intel_engine_pm_get(i915->gt.migrate.context->engine); + ret = intel_context_migrate_clear(i915->gt.migrate.context, dep, + dst_st->sgl, dst_level, + i915_ttm_gtt_binds_lmem(dst_mem), + 0, &rq); + } else { + struct i915_refct_sgt *src_rsgt = + i915_ttm_resource_get_st(obj, bo->resource); + + if (IS_ERR(src_rsgt)) + return ERR_CAST(src_rsgt); + + src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm); + intel_engine_pm_get(i915->gt.migrate.context->engine); + ret = intel_context_migrate_copy(i915->gt.migrate.context, + dep, src_rsgt->table.sgl, + src_level, + i915_ttm_gtt_binds_lmem(bo->resource), + dst_st->sgl, dst_level, + i915_ttm_gtt_binds_lmem(dst_mem), + &rq); + + i915_refct_sgt_put(src_rsgt); + } + + intel_engine_pm_put(i915->gt.migrate.context->engine); + + if (ret && rq) { + i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(rq); + } + + return ret ? ERR_PTR(ret) : &rq->fence; +} + +/** + * struct i915_ttm_memcpy_arg - argument for the bo memcpy functionality. + * @_dst_iter: Storage space for the destination kmap iterator. + * @_src_iter: Storage space for the source kmap iterator. + * @dst_iter: Pointer to the destination kmap iterator. + * @src_iter: Pointer to the source kmap iterator. + * @clear: Whether to clear instead of copy. + * @src_rsgt: Refcounted scatter-gather list of source memory. + * @dst_rsgt: Refcounted scatter-gather list of destination memory. + */ +struct i915_ttm_memcpy_arg { + union { + struct ttm_kmap_iter_tt tt; + struct ttm_kmap_iter_iomap io; + } _dst_iter, + _src_iter; + struct ttm_kmap_iter *dst_iter; + struct ttm_kmap_iter *src_iter; + unsigned long num_pages; + bool clear; + struct i915_refct_sgt *src_rsgt; + struct i915_refct_sgt *dst_rsgt; +}; + +/** + * struct i915_ttm_memcpy_work - Async memcpy worker under a dma-fence. + * @fence: The dma-fence. + * @work: The work struct use for the memcpy work. + * @lock: The fence lock. Not used to protect anything else ATM. + * @irq_work: Low latency worker to signal the fence since it can't be done + * from the callback for lockdep reasons. + * @cb: Callback for the accelerated migration fence. + * @arg: The argument for the memcpy functionality. + */ +struct i915_ttm_memcpy_work { + struct dma_fence fence; + struct work_struct work; + /* The fence lock */ + spinlock_t lock; + struct irq_work irq_work; + struct dma_fence_cb cb; + struct i915_ttm_memcpy_arg arg; +}; + +static void i915_ttm_move_memcpy(struct i915_ttm_memcpy_arg *arg) +{ + ttm_move_memcpy(arg->clear, arg->num_pages, + arg->dst_iter, arg->src_iter); +} + +static void i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg, + struct ttm_buffer_object *bo, bool clear, + struct ttm_resource *dst_mem, + struct ttm_tt *dst_ttm, + struct i915_refct_sgt *dst_rsgt) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct intel_memory_region *dst_reg, *src_reg; + + dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type); + src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type); + GEM_BUG_ON(!dst_reg || !src_reg); + + arg->dst_iter = !i915_ttm_cpu_maps_iomem(dst_mem) ? + ttm_kmap_iter_tt_init(&arg->_dst_iter.tt, dst_ttm) : + ttm_kmap_iter_iomap_init(&arg->_dst_iter.io, &dst_reg->iomap, + &dst_rsgt->table, dst_reg->region.start); + + arg->src_iter = !i915_ttm_cpu_maps_iomem(bo->resource) ? + ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) : + ttm_kmap_iter_iomap_init(&arg->_src_iter.io, &src_reg->iomap, + &obj->ttm.cached_io_rsgt->table, + src_reg->region.start); + arg->clear = clear; + arg->num_pages = bo->base.size >> PAGE_SHIFT; + + arg->dst_rsgt = i915_refct_sgt_get(dst_rsgt); + arg->src_rsgt = clear ? NULL : + i915_ttm_resource_get_st(obj, bo->resource); +} + +static void i915_ttm_memcpy_release(struct i915_ttm_memcpy_arg *arg) +{ + i915_refct_sgt_put(arg->src_rsgt); + i915_refct_sgt_put(arg->dst_rsgt); +} + +static void __memcpy_work(struct work_struct *work) +{ + struct i915_ttm_memcpy_work *copy_work = + container_of(work, typeof(*copy_work), work); + struct i915_ttm_memcpy_arg *arg = ©_work->arg; + bool cookie = dma_fence_begin_signalling(); + + i915_ttm_move_memcpy(arg); + dma_fence_end_signalling(cookie); + + dma_fence_signal(©_work->fence); + + i915_ttm_memcpy_release(arg); + dma_fence_put(©_work->fence); +} + +static void __memcpy_irq_work(struct irq_work *irq_work) +{ + struct i915_ttm_memcpy_work *copy_work = + container_of(irq_work, typeof(*copy_work), irq_work); + struct i915_ttm_memcpy_arg *arg = ©_work->arg; + + dma_fence_signal(©_work->fence); + i915_ttm_memcpy_release(arg); + dma_fence_put(©_work->fence); +} + +static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + struct i915_ttm_memcpy_work *copy_work = + container_of(cb, typeof(*copy_work), cb); + + if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) { + INIT_WORK(©_work->work, __memcpy_work); + queue_work(system_unbound_wq, ©_work->work); + } else { + init_irq_work(©_work->irq_work, __memcpy_irq_work); + irq_work_queue(©_work->irq_work); + } +} + +static const char *get_driver_name(struct dma_fence *fence) +{ + return "i915_ttm_memcpy_work"; +} + +static const char *get_timeline_name(struct dma_fence *fence) +{ + return "unbound"; +} + +static const struct dma_fence_ops dma_fence_memcpy_ops = { + .get_driver_name = get_driver_name, + .get_timeline_name = get_timeline_name, +}; + +static struct dma_fence * +i915_ttm_memcpy_work_arm(struct i915_ttm_memcpy_work *work, + struct dma_fence *dep) +{ + int ret; + + spin_lock_init(&work->lock); + dma_fence_init(&work->fence, &dma_fence_memcpy_ops, &work->lock, 0, 0); + dma_fence_get(&work->fence); + ret = dma_fence_add_callback(dep, &work->cb, __memcpy_cb); + if (ret) { + if (ret != -ENOENT) + dma_fence_wait(dep, false); + + return ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? -EINVAL : + dep->error); + } + + return &work->fence; +} + +static struct dma_fence * +__i915_ttm_move(struct ttm_buffer_object *bo, bool clear, + struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, + struct i915_refct_sgt *dst_rsgt, bool allow_accel, + struct dma_fence *move_dep) +{ + struct i915_ttm_memcpy_work *copy_work = NULL; + struct i915_ttm_memcpy_arg _arg, *arg = &_arg; + struct dma_fence *fence = ERR_PTR(-EINVAL); + + if (allow_accel) { + fence = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, + &dst_rsgt->table, move_dep); + + /* + * We only need to intercept the error when moving to lmem. + * When moving to system, TTM or shmem will provide us with + * cleared pages. + */ + if (!IS_ERR(fence) && !i915_ttm_gtt_binds_lmem(dst_mem) && + !I915_SELFTEST_ONLY(fail_gpu_migration || + fail_work_allocation)) + goto out; + } + + /* If we've scheduled gpu migration. Try to arm error intercept. */ + if (!IS_ERR(fence)) { + struct dma_fence *dep = fence; + + if (!I915_SELFTEST_ONLY(fail_work_allocation)) + copy_work = kzalloc(sizeof(*copy_work), GFP_KERNEL); + + if (copy_work) { + arg = ©_work->arg; + i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm, + dst_rsgt); + fence = i915_ttm_memcpy_work_arm(copy_work, dep); + } else { + dma_fence_wait(dep, false); + fence = ERR_PTR(I915_SELFTEST_ONLY(fail_gpu_migration) ? + -EINVAL : fence->error); + } + dma_fence_put(dep); + + if (!IS_ERR(fence)) + goto out; + } else if (move_dep) { + int err = dma_fence_wait(move_dep, true); + + if (err) + return ERR_PTR(err); + } + + /* Error intercept failed or no accelerated migration to start with */ + if (!copy_work) + i915_ttm_memcpy_init(arg, bo, clear, dst_mem, dst_ttm, + dst_rsgt); + i915_ttm_move_memcpy(arg); + i915_ttm_memcpy_release(arg); + kfree(copy_work); + + return NULL; +out: + if (!fence && copy_work) { + i915_ttm_memcpy_release(arg); + kfree(copy_work); + } + + return fence; +} + +static struct dma_fence *prev_fence(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx) +{ + struct i915_deps deps; + int ret; + + /* + * Instead of trying hard with GFP_KERNEL to allocate memory, + * the dependency collection will just sync if it doesn't + * succeed. + */ + i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + ret = i915_deps_add_dependency(&deps, bo->moving, ctx); + if (!ret) + /* + * TODO: Only await excl fence here, and shared fences before + * signaling the migration fence. + */ + ret = i915_deps_add_resv(&deps, bo->base.resv, true, false, ctx); + if (ret) + return ERR_PTR(ret); + + return i915_deps_to_fence(&deps, ctx); +} + +/** + * i915_ttm_move - The TTM move callback used by i915. + * @bo: The buffer object. + * @evict: Whether this is an eviction. + * @dst_mem: The destination ttm resource. + * @hop: If we need multihop, what temporary memory type to move to. + * + * Return: 0 if successful, negative error code otherwise. + */ +int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *dst_mem, + struct ttm_place *hop) +{ + struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); + struct ttm_resource_manager *dst_man = + ttm_manager_type(bo->bdev, dst_mem->mem_type); + struct dma_fence *migration_fence = NULL; + struct ttm_tt *ttm = bo->ttm; + struct i915_refct_sgt *dst_rsgt; + bool clear; + int ret; + + if (GEM_WARN_ON(!obj)) { + ttm_bo_move_null(bo, dst_mem); + return 0; + } + + ret = i915_ttm_move_notify(bo); + if (ret) + return ret; + + if (obj->mm.madv != I915_MADV_WILLNEED) { + i915_ttm_purge(obj); + ttm_resource_free(bo, &dst_mem); + return 0; + } + + /* Populate ttm with pages if needed. Typically system memory. */ + if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { + ret = ttm_tt_populate(bo->bdev, ttm, ctx); + if (ret) + return ret; + } + + dst_rsgt = i915_ttm_resource_get_st(obj, dst_mem); + if (IS_ERR(dst_rsgt)) + return PTR_ERR(dst_rsgt); + + clear = !i915_ttm_cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm)); + if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) { + struct dma_fence *dep = prev_fence(bo, ctx); + + if (IS_ERR(dep)) { + i915_refct_sgt_put(dst_rsgt); + return PTR_ERR(dep); + } + + migration_fence = __i915_ttm_move(bo, clear, dst_mem, bo->ttm, + dst_rsgt, true, dep); + dma_fence_put(dep); + } + + /* We can possibly get an -ERESTARTSYS here */ + if (IS_ERR(migration_fence)) { + i915_refct_sgt_put(dst_rsgt); + return PTR_ERR(migration_fence); + } + + if (migration_fence) { + ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict, + true, dst_mem); + if (ret) { + dma_fence_wait(migration_fence, false); + ttm_bo_move_sync_cleanup(bo, dst_mem); + } + dma_fence_put(migration_fence); + } else { + ttm_bo_move_sync_cleanup(bo, dst_mem); + } + + i915_ttm_adjust_domains_after_move(obj); + i915_ttm_free_cached_io_rsgt(obj); + + if (i915_ttm_gtt_binds_lmem(dst_mem) || i915_ttm_cpu_maps_iomem(dst_mem)) { + obj->ttm.cached_io_rsgt = dst_rsgt; + obj->ttm.get_io_page.sg_pos = dst_rsgt->table.sgl; + obj->ttm.get_io_page.sg_idx = 0; + } else { + i915_refct_sgt_put(dst_rsgt); + } + + i915_ttm_adjust_lru(obj); + i915_ttm_adjust_gem_after_move(obj); + return 0; +} + +/** + * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to + * another + * @dst: The destination object + * @src: The source object + * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used. + * @intr: Whether to perform waits interruptible: + * + * Note: The caller is responsible for assuring that the underlying + * TTM objects are populated if needed and locked. + * + * Return: Zero on success. Negative error code on error. If @intr == true, + * then it may return -ERESTARTSYS or -EINTR. + */ +int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, + struct drm_i915_gem_object *src, + bool allow_accel, bool intr) +{ + struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst); + struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src); + struct ttm_operation_ctx ctx = { + .interruptible = intr, + }; + struct i915_refct_sgt *dst_rsgt; + struct dma_fence *copy_fence, *dep_fence; + struct i915_deps deps; + int ret, shared_err; + + assert_object_held(dst); + assert_object_held(src); + i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); + + /* + * We plan to add a shared fence only for the source. If that + * fails, we await all source fences before commencing + * the copy instead of only the exclusive. + */ + shared_err = dma_resv_reserve_shared(src_bo->base.resv, 1); + ret = i915_deps_add_resv(&deps, dst_bo->base.resv, true, false, &ctx); + if (!ret) + ret = i915_deps_add_resv(&deps, src_bo->base.resv, + !!shared_err, false, &ctx); + if (ret) + return ret; + + dep_fence = i915_deps_to_fence(&deps, &ctx); + if (IS_ERR(dep_fence)) + return PTR_ERR(dep_fence); + + dst_rsgt = i915_ttm_resource_get_st(dst, dst_bo->resource); + copy_fence = __i915_ttm_move(src_bo, false, dst_bo->resource, + dst_bo->ttm, dst_rsgt, allow_accel, + dep_fence); + + i915_refct_sgt_put(dst_rsgt); + if (IS_ERR_OR_NULL(copy_fence)) + return PTR_ERR_OR_ZERO(copy_fence); + + dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence); + + /* If we failed to reserve a shared slot, add an exclusive fence */ + if (shared_err) + dma_resv_add_excl_fence(src_bo->base.resv, copy_fence); + else + dma_resv_add_shared_fence(src_bo->base.resv, copy_fence); + + dma_fence_put(copy_fence); + + return 0; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h new file mode 100644 index 000000000000..d2e7f149e05c --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef _I915_GEM_TTM_MOVE_H_ +#define _I915_GEM_TTM_MOVE_H_ + +#include <linux/types.h> + +#include "i915_selftest.h" + +struct ttm_buffer_object; +struct ttm_operation_ctx; +struct ttm_place; +struct ttm_resource; +struct ttm_tt; + +struct drm_i915_gem_object; +struct i915_refct_sgt; + +int i915_ttm_move_notify(struct ttm_buffer_object *bo); + +I915_SELFTEST_DECLARE(void i915_ttm_migrate_set_failure_modes(bool gpu_migration, + bool work_allocation)); + +int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst, + struct drm_i915_gem_object *src, + bool allow_accel, bool intr); + +/* Internal I915 TTM declarations and definitions below. */ + +int i915_ttm_move(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *dst_mem, + struct ttm_place *hop); + +void i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj); + +void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj); + +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c index 3b6d14b5c604..9aad84059d56 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c @@ -12,6 +12,7 @@ #include "gem/i915_gem_region.h" #include "gem/i915_gem_ttm.h" +#include "gem/i915_gem_ttm_move.h" #include "gem/i915_gem_ttm_pm.h" /** @@ -79,6 +80,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply, err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false); GEM_WARN_ON(err); + ttm_bo_wait_ctx(backup_bo, &ctx); obj->ttm.backup = backup; return 0; @@ -169,6 +171,7 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply, err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu, false); GEM_WARN_ON(err); + ttm_bo_wait_ctx(backup_bo, &ctx); obj->ttm.backup = NULL; err = 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index cd149aa99364..dab3d30c09a0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -254,6 +254,6 @@ int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj, unsigned int flags) { might_sleep(); - /* NOP for now. */ - return 0; + + return i915_gem_object_wait_moving_fence(obj, !!(flags & I915_WAIT_INTERRUPTIBLE)); } diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c index dbdbdc344d87..7271fbf813fa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gemfs.c +++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c @@ -12,6 +12,7 @@ int i915_gemfs_init(struct drm_i915_private *i915) { + char huge_opt[] = "huge=within_size"; /* r/w */ struct file_system_type *type; struct vfsmount *gemfs; char *opts; @@ -31,10 +32,8 @@ int i915_gemfs_init(struct drm_i915_private *i915) */ opts = NULL; - if (intel_vtd_active()) { + if (intel_vtd_active(i915)) { if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { - static char huge_opt[] = "huge=within_size"; /* r/w */ - opts = huge_opt; drm_info(&i915->drm, "Transparent Hugepage mode '%s'\n", diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index b2003133deaf..c69c7d45aabc 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -22,6 +22,22 @@ #include "selftests/mock_region.h" #include "selftests/i915_random.h" +static struct i915_gem_context *hugepage_ctx(struct drm_i915_private *i915, + struct file *file) +{ + struct i915_gem_context *ctx = live_context(i915, file); + struct i915_address_space *vm; + + if (IS_ERR(ctx)) + return ctx; + + vm = ctx->vm; + if (vm) + WRITE_ONCE(vm->scrub_64K, true); + + return ctx; +} + static const unsigned int page_sizes[] = { I915_GTT_PAGE_SIZE_2M, I915_GTT_PAGE_SIZE_64K, @@ -552,7 +568,7 @@ out_unpin: out_put: i915_gem_object_put(obj); out_region: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return err; } @@ -959,6 +975,8 @@ static int igt_mock_ppgtt_64K(void *arg) __i915_gem_object_put_pages(obj); i915_gem_object_unlock(obj); i915_gem_object_put(obj); + + i915_gem_drain_freed_objects(i915); } } @@ -1080,10 +1098,6 @@ static int __igt_write_huge(struct intel_context *ce, if (IS_ERR(vma)) return PTR_ERR(vma); - err = i915_vma_unbind(vma); - if (err) - return err; - err = i915_vma_pin(vma, size, 0, flags | offset); if (err) { /* @@ -1117,7 +1131,7 @@ out_vma_unpin: return err; } -static int igt_write_huge(struct i915_gem_context *ctx, +static int igt_write_huge(struct drm_i915_private *i915, struct drm_i915_gem_object *obj) { struct i915_gem_engines *engines; @@ -1127,6 +1141,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, IGT_TIMEOUT(end_time); unsigned int max_page_size; unsigned int count; + struct i915_gem_context *ctx; + struct file *file; u64 max; u64 num; u64 size; @@ -1134,6 +1150,16 @@ static int igt_write_huge(struct i915_gem_context *ctx, int i, n; int err = 0; + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = hugepage_ctx(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); size = obj->base.size; @@ -1153,7 +1179,7 @@ static int igt_write_huge(struct i915_gem_context *ctx, } i915_gem_context_unlock_engines(ctx); if (!n) - return 0; + goto out; /* * To keep things interesting when alternating between engines in our @@ -1215,6 +1241,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, kfree(order); +out: + fput(file); return err; } @@ -1277,8 +1305,7 @@ static u32 igt_random_size(struct rnd_state *prng, static int igt_ppgtt_smoke_huge(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj; I915_RND_STATE(prng); struct { @@ -1302,6 +1329,7 @@ static int igt_ppgtt_smoke_huge(void *arg) u32 min = backends[i].min; u32 max = backends[i].max; u32 size = max; + try_again: size = igt_random_size(&prng, min, rounddown_pow_of_two(size)); @@ -1336,7 +1364,7 @@ try_again: goto out_unpin; } - err = igt_write_huge(ctx, obj); + err = igt_write_huge(i915, obj); if (err) { pr_err("%s write-huge failed with size=%u, i=%d\n", __func__, size, i); @@ -1363,8 +1391,7 @@ out_put: static int igt_ppgtt_sanity_check(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_private *i915 = arg; unsigned int supported = INTEL_INFO(i915)->page_sizes; struct { igt_create_fn fn; @@ -1431,7 +1458,7 @@ static int igt_ppgtt_sanity_check(void *arg) if (pages) obj->mm.page_sizes.sg = pages; - err = igt_write_huge(ctx, obj); + err = igt_write_huge(i915, obj); i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); @@ -1458,15 +1485,27 @@ out: static int igt_tmpfs_fallback(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; + struct drm_i915_private *i915 = arg; + struct i915_address_space *vm; + struct i915_gem_context *ctx; struct vfsmount *gemfs = i915->mm.gemfs; - struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx); struct drm_i915_gem_object *obj; struct i915_vma *vma; + struct file *file; u32 *vaddr; int err = 0; + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = hugepage_ctx(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + vm = i915_gem_context_get_eb_vm(ctx); + /* * Make sure that we don't burst into a ball of flames upon falling back * to tmpfs, which we rely on if on the off-chance we encouter a failure @@ -1510,33 +1549,47 @@ out_restore: i915->mm.gemfs = gemfs; i915_vm_put(vm); +out: + fput(file); return err; } static int igt_shrink_thp(void *arg) { - struct i915_gem_context *ctx = arg; - struct drm_i915_private *i915 = ctx->i915; - struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx); + struct drm_i915_private *i915 = arg; + struct i915_address_space *vm; + struct i915_gem_context *ctx; struct drm_i915_gem_object *obj; struct i915_gem_engines_iter it; struct intel_context *ce; struct i915_vma *vma; + struct file *file; unsigned int flags = PIN_USER; unsigned int n; bool should_swap; - int err = 0; + int err; + + if (!igt_can_allocate_thp(i915)) { + pr_info("missing THP support, skipping\n"); + return 0; + } + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = hugepage_ctx(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + vm = i915_gem_context_get_eb_vm(ctx); /* * Sanity check shrinking huge-paged object -- make sure nothing blows * up. */ - if (!igt_can_allocate_thp(i915)) { - pr_info("missing THP support, skipping\n"); - goto out_vm; - } - obj = i915_gem_object_create_shmem(i915, SZ_2M); if (IS_ERR(obj)) { err = PTR_ERR(obj); @@ -1626,7 +1679,8 @@ out_put: i915_gem_object_put(obj); out_vm: i915_vm_put(vm); - +out: + fput(file); return err; } @@ -1687,10 +1741,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ppgtt_smoke_huge), SUBTEST(igt_ppgtt_sanity_check), }; - struct i915_gem_context *ctx; - struct i915_address_space *vm; - struct file *file; - int err; if (!HAS_PPGTT(i915)) { pr_info("PPGTT not supported, skipping live-selftests\n"); @@ -1700,23 +1750,5 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) if (intel_gt_is_wedged(&i915->gt)) return 0; - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - err = PTR_ERR(ctx); - goto out_file; - } - - vm = ctx->vm; - if (vm) - WRITE_ONCE(vm->scrub_64K, true); - - err = i915_subtests(tests, ctx); - -out_file: - fput(file); - return err; + return i915_live_subtests(tests, i915); } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index b32f7fed2d9c..21b71568cd5f 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -88,7 +88,7 @@ static int live_nop_switch(void *arg) rq = i915_request_get(this); i915_request_add(this); } - if (i915_request_wait(rq, 0, HZ) < 0) { + if (i915_request_wait(rq, 0, 10 * HZ) < 0) { pr_err("Failed to populated %d contexts\n", nctx); intel_gt_set_wedged(&i915->gt); i915_request_put(rq); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c index 4a6bb64c3a35..3cc74b0fed06 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c @@ -102,7 +102,7 @@ static int igt_dmabuf_import_same_driver_lmem(void *arg) obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1); if (IS_ERR(obj)) { pr_err("__i915_gem_object_create_user failed with err=%ld\n", - PTR_ERR(dmabuf)); + PTR_ERR(obj)); err = PTR_ERR(obj); goto out_ret; } @@ -158,7 +158,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915, regions, num_regions); if (IS_ERR(obj)) { pr_err("__i915_gem_object_create_user failed with err=%ld\n", - PTR_ERR(dmabuf)); + PTR_ERR(obj)); err = PTR_ERR(obj); goto out_ret; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c index 28a700f08b49..4b8e6b098659 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c @@ -4,6 +4,7 @@ */ #include "gt/intel_migrate.h" +#include "gem/i915_gem_ttm_move.h" static int igt_fill_check_buffer(struct drm_i915_gem_object *obj, bool fill) @@ -227,13 +228,34 @@ out_put: return err; } +static int igt_lmem_pages_failsafe_migrate(void *arg) +{ + int fail_gpu, fail_alloc, ret; + + for (fail_gpu = 0; fail_gpu < 2; ++fail_gpu) { + for (fail_alloc = 0; fail_alloc < 2; ++fail_alloc) { + pr_info("Simulated failure modes: gpu: %d, alloc: %d\n", + fail_gpu, fail_alloc); + i915_ttm_migrate_set_failure_modes(fail_gpu, + fail_alloc); + ret = igt_lmem_pages_migrate(arg); + if (ret) + goto out_err; + } + } + +out_err: + i915_ttm_migrate_set_failure_modes(false, false); + return ret; +} + int i915_gem_migrate_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_smem_create_migrate), SUBTEST(igt_lmem_create_migrate), SUBTEST(igt_same_create_migrate), - SUBTEST(igt_lmem_pages_migrate), + SUBTEST(igt_lmem_pages_failsafe_migrate), }; if (!HAS_LMEM(i915)) diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 890191f286e3..4a166d25fe60 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -185,7 +185,6 @@ static void gen6_alloc_va_range(struct i915_address_space *vm, pt = stash->pt[0]; __i915_gem_object_pin_pages(pt->base); - i915_gem_object_make_unshrinkable(pt->base); fill32_px(pt, vm->scratch[0]->encode); @@ -262,13 +261,10 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - __i915_vma_put(ppgtt->vma); - gen6_ppgtt_free_pd(ppgtt); free_scratch(vm); mutex_destroy(&ppgtt->flush); - mutex_destroy(&ppgtt->pin_mutex); free_pd(&ppgtt->base.vm, ppgtt->base.pd); } @@ -331,37 +327,6 @@ static const struct i915_vma_ops pd_vma_ops = { .unbind_vma = pd_vma_unbind, }; -static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) -{ - struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; - struct i915_vma *vma; - - GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); - GEM_BUG_ON(size > ggtt->vm.total); - - vma = i915_vma_alloc(); - if (!vma) - return ERR_PTR(-ENOMEM); - - i915_active_init(&vma->active, NULL, NULL, 0); - - kref_init(&vma->ref); - mutex_init(&vma->pages_mutex); - vma->vm = i915_vm_get(&ggtt->vm); - vma->ops = &pd_vma_ops; - vma->private = ppgtt; - - vma->size = size; - vma->fence_size = size; - atomic_set(&vma->flags, I915_VMA_GGTT); - vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ - - INIT_LIST_HEAD(&vma->obj_link); - INIT_LIST_HEAD(&vma->closed_link); - - return vma; -} - int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); @@ -378,42 +343,92 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww) if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) return 0; - if (mutex_lock_interruptible(&ppgtt->pin_mutex)) - return -EINTR; + /* grab the ppgtt resv to pin the object */ + err = i915_vm_lock_objects(&ppgtt->base.vm, ww); + if (err) + return err; /* * PPGTT PDEs reside in the GGTT and consists of 512 entries. The * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - err = 0; - if (!atomic_read(&ppgtt->pin_count)) + if (!atomic_read(&ppgtt->pin_count)) { err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH); + + GEM_BUG_ON(ppgtt->vma->fence); + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma)); + } if (!err) atomic_inc(&ppgtt->pin_count); - mutex_unlock(&ppgtt->pin_mutex); return err; } -void gen6_ppgtt_unpin(struct i915_ppgtt *base) +static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj) { - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + obj->mm.pages = ZERO_SIZE_PTR; + return 0; +} - GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); - if (atomic_dec_and_test(&ppgtt->pin_count)) - i915_vma_unpin(ppgtt->vma); +static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ } -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) +static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = { + .name = "pd_dummy_obj", + .get_pages = pd_dummy_obj_get_pages, + .put_pages = pd_dummy_obj_put_pages, +}; + +static struct i915_page_directory * +gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt) { - struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt; + struct i915_page_directory *pd; + int err; - if (!atomic_read(&ppgtt->pin_count)) - return; + pd = __alloc_pd(I915_PDES); + if (unlikely(!pd)) + return ERR_PTR(-ENOMEM); - i915_vma_unpin(ppgtt->vma); - atomic_set(&ppgtt->pin_count, 0); + pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915, + &pd_dummy_obj_ops, + I915_PDES * SZ_4K); + if (IS_ERR(pd->pt.base)) { + err = PTR_ERR(pd->pt.base); + pd->pt.base = NULL; + goto err_pd; + } + + pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm); + pd->pt.base->shares_resv_from = &ppgtt->base.vm; + + ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL); + if (IS_ERR(ppgtt->vma)) { + err = PTR_ERR(ppgtt->vma); + ppgtt->vma = NULL; + goto err_pd; + } + + /* The dummy object we create is special, override ops.. */ + ppgtt->vma->ops = &pd_vma_ops; + ppgtt->vma->private = ppgtt; + return pd; + +err_pd: + free_pd(&ppgtt->base.vm, pd); + return ERR_PTR(err); +} + +void gen6_ppgtt_unpin(struct i915_ppgtt *base) +{ + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); + + GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); + if (atomic_dec_and_test(&ppgtt->pin_count)) + i915_vma_unpin(ppgtt->vma); } struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) @@ -427,7 +442,6 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) return ERR_PTR(-ENOMEM); mutex_init(&ppgtt->flush); - mutex_init(&ppgtt->pin_mutex); ppgtt_init(&ppgtt->base, gt, 0); ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t)); @@ -442,19 +456,13 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma; ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode; - ppgtt->base.pd = __alloc_pd(I915_PDES); - if (!ppgtt->base.pd) { - err = -ENOMEM; - goto err_free; - } - err = gen6_ppgtt_init_scratch(ppgtt); if (err) - goto err_pd; + goto err_free; - ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE); - if (IS_ERR(ppgtt->vma)) { - err = PTR_ERR(ppgtt->vma); + ppgtt->base.pd = gen6_alloc_top_pd(ppgtt); + if (IS_ERR(ppgtt->base.pd)) { + err = PTR_ERR(ppgtt->base.pd); goto err_scratch; } @@ -462,10 +470,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt) err_scratch: free_scratch(&ppgtt->base.vm); -err_pd: - free_pd(&ppgtt->base.vm, ppgtt->base.pd); err_free: - mutex_destroy(&ppgtt->pin_mutex); kfree(ppgtt); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h index 6a61a5c3a85a..5e5cf2ec3309 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.h +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.h @@ -19,7 +19,6 @@ struct gen6_ppgtt { u32 pp_dir; atomic_t pin_count; - struct mutex pin_mutex; bool scan_for_unused_pt; }; @@ -71,7 +70,6 @@ static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base) int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww); void gen6_ppgtt_unpin(struct i915_ppgtt *base); -void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); void gen6_ppgtt_enable(struct intel_gt *gt); void gen7_ppgtt_enable(struct intel_gt *gt); struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 461844dffd7e..e320610dd0b8 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -42,7 +42,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) vf_flush_wa = true; /* WaForGAMHang:kbl */ - if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0)) dc_flush_wa = true; } diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 037a9a6e4889..95c02096a61b 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -18,7 +18,7 @@ static u64 gen8_pde_encode(const dma_addr_t addr, const enum i915_cache_level level) { - u64 pde = addr | _PAGE_PRESENT | _PAGE_RW; + u64 pde = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (level != I915_CACHE_NONE) pde |= PPAT_CACHED_PDE; @@ -32,10 +32,10 @@ static u64 gen8_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) { - gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW; + gen8_pte_t pte = addr | GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (unlikely(flags & PTE_READ_ONLY)) - pte &= ~_PAGE_RW; + pte &= ~GEN8_PAGE_RW; if (flags & PTE_LM) pte |= GEN12_PPGTT_PTE_LM; @@ -301,7 +301,6 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm, pt = stash->pt[!!lvl]; __i915_gem_object_pin_pages(pt->base); - i915_gem_object_make_unshrinkable(pt->base); fill_px(pt, vm->scratch[lvl]->encode); @@ -652,7 +651,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) vm->scratch[0]->encode = gen8_pte_encode(px_dma(vm->scratch[0]), - I915_CACHE_LLC, pte_flags); + I915_CACHE_NONE, pte_flags); for (i = 1; i <= vm->top; i++) { struct drm_i915_gem_object *obj; @@ -668,7 +667,7 @@ static int gen8_init_scratch(struct i915_address_space *vm) } fill_px(obj, vm->scratch[i - 1]->encode); - obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_LLC); + obj->encode = gen8_pde_encode(px_dma(obj), I915_CACHE_NONE); vm->scratch[i] = obj; } diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 5634d14052bc..ba083d800a08 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -219,7 +219,7 @@ int __intel_context_do_pin_ww(struct intel_context *ce, */ err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); - if (!err && ce->ring->vma->obj) + if (!err) err = i915_gem_object_lock(ce->ring->vma->obj, ww); if (!err && ce->state) err = i915_gem_object_lock(ce->state->obj, ww); @@ -228,17 +228,17 @@ int __intel_context_do_pin_ww(struct intel_context *ce, if (err) return err; - err = i915_active_acquire(&ce->active); + err = ce->ops->pre_pin(ce, ww, &vaddr); if (err) goto err_ctx_unpin; - err = ce->ops->pre_pin(ce, ww, &vaddr); + err = i915_active_acquire(&ce->active); if (err) - goto err_release; + goto err_post_unpin; err = mutex_lock_interruptible(&ce->pin_mutex); if (err) - goto err_post_unpin; + goto err_release; intel_engine_pm_might_get(ce->engine); @@ -273,11 +273,11 @@ int __intel_context_do_pin_ww(struct intel_context *ce, err_unlock: mutex_unlock(&ce->pin_mutex); +err_release: + i915_active_release(&ce->active); err_post_unpin: if (!handoff) ce->ops->post_unpin(ce); -err_release: - i915_active_release(&ce->active); err_ctx_unpin: intel_context_post_unpin(ce); @@ -364,7 +364,7 @@ static int __intel_context_active(struct i915_active *active) return 0; } -static int __i915_sw_fence_call +static int sw_fence_dummy_notify(struct i915_sw_fence *sf, enum i915_sw_fence_notify state) { diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index ff6753ccb129..352254e001b4 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -325,6 +325,38 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, engine->id = id; engine->legacy_idx = INVALID_ENGINE; engine->mask = BIT(id); + if (GRAPHICS_VER(gt->i915) >= 11) { + static const u32 engine_reset_domains[] = { + [RCS0] = GEN11_GRDOM_RENDER, + [BCS0] = GEN11_GRDOM_BLT, + [VCS0] = GEN11_GRDOM_MEDIA, + [VCS1] = GEN11_GRDOM_MEDIA2, + [VCS2] = GEN11_GRDOM_MEDIA3, + [VCS3] = GEN11_GRDOM_MEDIA4, + [VCS4] = GEN11_GRDOM_MEDIA5, + [VCS5] = GEN11_GRDOM_MEDIA6, + [VCS6] = GEN11_GRDOM_MEDIA7, + [VCS7] = GEN11_GRDOM_MEDIA8, + [VECS0] = GEN11_GRDOM_VECS, + [VECS1] = GEN11_GRDOM_VECS2, + [VECS2] = GEN11_GRDOM_VECS3, + [VECS3] = GEN11_GRDOM_VECS4, + }; + GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || + !engine_reset_domains[id]); + engine->reset_domain = engine_reset_domains[id]; + } else { + static const u32 engine_reset_domains[] = { + [RCS0] = GEN6_GRDOM_RENDER, + [BCS0] = GEN6_GRDOM_BLT, + [VCS0] = GEN6_GRDOM_MEDIA, + [VCS1] = GEN8_GRDOM_MEDIA2, + [VECS0] = GEN6_GRDOM_VECS, + }; + GEM_BUG_ON(id >= ARRAY_SIZE(engine_reset_domains) || + !engine_reset_domains[id]); + engine->reset_domain = engine_reset_domains[id]; + } engine->i915 = i915; engine->gt = gt; engine->uncore = gt->uncore; @@ -363,7 +395,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id, DRIVER_CAPS(i915)->has_logical_contexts = true; ewma__engine_latency_init(&engine->latency); - seqcount_init(&engine->stats.lock); + seqcount_init(&engine->stats.execlists.lock); ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); @@ -1676,14 +1708,18 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, static void print_request_ring(struct drm_printer *m, struct i915_request *rq) { + struct i915_vma_snapshot *vsnap = &rq->batch_snapshot; void *ring; int size; + if (!i915_vma_snapshot_present(vsnap)) + vsnap = NULL; + drm_printf(m, "[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]:\n", rq->head, rq->postfix, rq->tail, - rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, - rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); + vsnap ? upper_32_bits(vsnap->gtt_offset) : ~0u, + vsnap ? lower_32_bits(vsnap->gtt_offset) : ~0u); size = rq->tail - rq->head; if (rq->tail < rq->head) @@ -1915,22 +1951,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, intel_engine_print_breadcrumbs(engine, m); } -static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine, - ktime_t *now) -{ - ktime_t total = engine->stats.total; - - /* - * If the engine is executing something at the moment - * add it to the total. - */ - *now = ktime_get(); - if (READ_ONCE(engine->stats.active)) - total = ktime_add(total, ktime_sub(*now, engine->stats.start)); - - return total; -} - /** * intel_engine_get_busy_time() - Return current accumulated engine busyness * @engine: engine to report on @@ -1940,15 +1960,7 @@ static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine, */ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now) { - unsigned int seq; - ktime_t total; - - do { - seq = read_seqcount_begin(&engine->stats.lock); - total = __intel_engine_get_busy_time(engine, now); - } while (read_seqcount_retry(&engine->stats.lock, seq)); - - return total; + return engine->busyness(engine, now); } struct intel_context * diff --git a/drivers/gpu/drm/i915/gt/intel_engine_stats.h b/drivers/gpu/drm/i915/gt/intel_engine_stats.h index 24fbdd94351a..8e762d683e50 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_stats.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_stats.h @@ -15,45 +15,46 @@ static inline void intel_engine_context_in(struct intel_engine_cs *engine) { + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; unsigned long flags; - if (engine->stats.active) { - engine->stats.active++; + if (stats->active) { + stats->active++; return; } /* The writer is serialised; but the pmu reader may be from hardirq */ local_irq_save(flags); - write_seqcount_begin(&engine->stats.lock); + write_seqcount_begin(&stats->lock); - engine->stats.start = ktime_get(); - engine->stats.active++; + stats->start = ktime_get(); + stats->active++; - write_seqcount_end(&engine->stats.lock); + write_seqcount_end(&stats->lock); local_irq_restore(flags); - GEM_BUG_ON(!engine->stats.active); + GEM_BUG_ON(!stats->active); } static inline void intel_engine_context_out(struct intel_engine_cs *engine) { + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; unsigned long flags; - GEM_BUG_ON(!engine->stats.active); - if (engine->stats.active > 1) { - engine->stats.active--; + GEM_BUG_ON(!stats->active); + if (stats->active > 1) { + stats->active--; return; } local_irq_save(flags); - write_seqcount_begin(&engine->stats.lock); + write_seqcount_begin(&stats->lock); - engine->stats.active--; - engine->stats.total = - ktime_add(engine->stats.total, - ktime_sub(ktime_get(), engine->stats.start)); + stats->active--; + stats->total = ktime_add(stats->total, + ktime_sub(ktime_get(), stats->start)); - write_seqcount_end(&engine->stats.lock); + write_seqcount_end(&stats->lock); local_irq_restore(flags); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index e0f773585c29..36365bdbe1ee 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -257,6 +257,55 @@ struct intel_engine_execlists { #define INTEL_ENGINE_CS_MAX_NAME 8 +struct intel_engine_execlists_stats { + /** + * @active: Number of contexts currently scheduled in. + */ + unsigned int active; + + /** + * @lock: Lock protecting the below fields. + */ + seqcount_t lock; + + /** + * @total: Total time this engine was busy. + * + * Accumulated time not counting the most recent block in cases where + * engine is currently busy (active > 0). + */ + ktime_t total; + + /** + * @start: Timestamp of the last idle to active transition. + * + * Idle is defined as active == 0, active is active > 0. + */ + ktime_t start; +}; + +struct intel_engine_guc_stats { + /** + * @running: Active state of the engine when busyness was last sampled. + */ + bool running; + + /** + * @prev_total: Previous value of total runtime clock cycles. + */ + u32 prev_total; + + /** + * @total_gt_clks: Total gt clock cycles this engine was busy. + */ + u64 total_gt_clks; + + /** + * @start_gt_clk: GT clock time of last idle to active transition. + */ + u64 start_gt_clk; +}; + struct intel_engine_cs { struct drm_i915_private *i915; struct intel_gt *gt; @@ -269,6 +318,7 @@ struct intel_engine_cs { unsigned int guc_id; intel_engine_mask_t mask; + u32 reset_domain; /** * @logical_mask: logical mask of engine, reported to user space via * query IOCTL and used to communicate with the GuC in logical space. @@ -439,6 +489,12 @@ struct intel_engine_cs { void (*add_active_request)(struct i915_request *rq); void (*remove_active_request)(struct i915_request *rq); + /* + * Get engine busyness and the time at which the busyness was sampled. + */ + ktime_t (*busyness)(struct intel_engine_cs *engine, + ktime_t *now); + struct intel_engine_execlists execlists; /* @@ -488,30 +544,10 @@ struct intel_engine_cs { u32 (*get_cmd_length_mask)(u32 cmd_header); struct { - /** - * @active: Number of contexts currently scheduled in. - */ - unsigned int active; - - /** - * @lock: Lock protecting the below fields. - */ - seqcount_t lock; - - /** - * @total: Total time this engine was busy. - * - * Accumulated time not counting the most recent block in cases - * where engine is currently busy (active > 0). - */ - ktime_t total; - - /** - * @start: Timestamp of the last idle to active transition. - * - * Idle is defined as active == 0, active is active > 0. - */ - ktime_t start; + union { + struct intel_engine_execlists_stats execlists; + struct intel_engine_guc_stats guc; + }; /** * @rps: Utilisation at last RPS sampling. diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index bedb80057046..a69df5e9e77a 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -2186,7 +2186,8 @@ struct execlists_capture { static void execlists_capture_work(struct work_struct *work) { struct execlists_capture *cap = container_of(work, typeof(*cap), work); - const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + const gfp_t gfp = __GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | + __GFP_NOWARN; struct intel_engine_cs *engine = cap->rq->engine; struct intel_gt_coredump *gt = cap->error->gt; struct intel_engine_capture_vma *vma; @@ -3293,6 +3294,38 @@ static void execlists_release(struct intel_engine_cs *engine) lrc_fini_wa_ctx(engine); } +static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine, + ktime_t *now) +{ + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; + ktime_t total = stats->total; + + /* + * If the engine is executing something at the moment + * add it to the total. + */ + *now = ktime_get(); + if (READ_ONCE(stats->active)) + total = ktime_add(total, ktime_sub(*now, stats->start)); + + return total; +} + +static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine, + ktime_t *now) +{ + struct intel_engine_execlists_stats *stats = &engine->stats.execlists; + unsigned int seq; + ktime_t total; + + do { + seq = read_seqcount_begin(&stats->lock); + total = __execlists_engine_busyness(engine, now); + } while (read_seqcount_retry(&stats->lock, seq)); + + return total; +} + static void logical_ring_default_vfuncs(struct intel_engine_cs *engine) { @@ -3349,6 +3382,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->emit_bb_start = gen8_emit_bb_start; else engine->emit_bb_start = gen8_emit_bb_start_noarb; + + engine->busyness = execlists_engine_busyness; } static void logical_ring_default_irqs(struct intel_engine_cs *engine) diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index 57c97554393b..cbc6d2b1fd9e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -3,12 +3,14 @@ * Copyright © 2020 Intel Corporation */ +#include <linux/agp_backend.h> #include <linux/stop_machine.h> #include <asm/set_memory.h> #include <asm/smp.h> #include <drm/i915_drm.h> +#include <drm/intel-gtt.h> #include "gem/i915_gem_lmem.h" @@ -104,7 +106,7 @@ static bool needs_idle_maps(struct drm_i915_private *i915) * Query intel_iommu to see if we need the workaround. Presumably that * was loaded first. */ - if (!intel_vtd_active()) + if (!intel_vtd_active(i915)) return false; if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915)) @@ -116,17 +118,26 @@ static bool needs_idle_maps(struct drm_i915_private *i915) return false; } -void i915_ggtt_suspend(struct i915_ggtt *ggtt) +/** + * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM + * @vm: The VM to suspend the mappings for + * + * Suspend the memory mappings for all objects mapped to HW via the GGTT or a + * DPT page table. + */ +void i915_ggtt_suspend_vm(struct i915_address_space *vm) { struct i915_vma *vma, *vn; int open; - mutex_lock(&ggtt->vm.mutex); + drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); + + mutex_lock(&vm->mutex); /* Skip rewriting PTE on VMA unbind. */ - open = atomic_xchg(&ggtt->vm.open, 0); + open = atomic_xchg(&vm->open, 0); - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); i915_vma_wait_for_bind(vma); @@ -139,11 +150,17 @@ void i915_ggtt_suspend(struct i915_ggtt *ggtt) } } - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - ggtt->invalidate(ggtt); - atomic_set(&ggtt->vm.open, open); + vm->clear_range(vm, 0, vm->total); - mutex_unlock(&ggtt->vm.mutex); + atomic_set(&vm->open, open); + + mutex_unlock(&vm->mutex); +} + +void i915_ggtt_suspend(struct i915_ggtt *ggtt) +{ + i915_ggtt_suspend_vm(&ggtt->vm); + ggtt->invalidate(ggtt); intel_gt_check_and_clear_faults(ggtt->vm.gt); } @@ -192,7 +209,7 @@ u64 gen8_ggtt_pte_encode(dma_addr_t addr, enum i915_cache_level level, u32 flags) { - gen8_pte_t pte = addr | _PAGE_PRESENT; + gen8_pte_t pte = addr | GEN8_PAGE_PRESENT; if (flags & PTE_LM) pte |= GEN12_GGTT_PTE_LM; @@ -1216,7 +1233,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) if (ret) return ret; - if (intel_vtd_active()) + if (intel_vtd_active(i915)) drm_info(&i915->drm, "VT-d active for gfx access\n"); return 0; @@ -1253,37 +1270,59 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt) ggtt->invalidate(ggtt); } -void i915_ggtt_resume(struct i915_ggtt *ggtt) +/** + * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM + * @vm: The VM to restore the mappings for + * + * Restore the memory mappings for all objects mapped to HW via the GGTT or a + * DPT page table. + * + * Returns %true if restoring the mapping for any object that was in a write + * domain before suspend. + */ +bool i915_ggtt_resume_vm(struct i915_address_space *vm) { struct i915_vma *vma; - bool flush = false; + bool write_domain_objs = false; int open; - intel_gt_check_and_clear_faults(ggtt->vm.gt); + drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt); /* First fill our portion of the GTT with scratch pages */ - ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); + vm->clear_range(vm, 0, vm->total); /* Skip rewriting PTE on VMA unbind. */ - open = atomic_xchg(&ggtt->vm.open, 0); + open = atomic_xchg(&vm->open, 0); /* clflush objects bound into the GGTT and rebind them. */ - list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { + list_for_each_entry(vma, &vm->bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; unsigned int was_bound = atomic_read(&vma->flags) & I915_VMA_BIND_MASK; GEM_BUG_ON(!was_bound); - vma->ops->bind_vma(&ggtt->vm, NULL, vma, + vma->ops->bind_vma(vm, NULL, vma, obj ? obj->cache_level : 0, was_bound); if (obj) { /* only used during resume => exclusive access */ - flush |= fetch_and_zero(&obj->write_domain); + write_domain_objs |= fetch_and_zero(&obj->write_domain); obj->read_domains |= I915_GEM_DOMAIN_GTT; } } - atomic_set(&ggtt->vm.open, open); + atomic_set(&vm->open, open); + + return write_domain_objs; +} + +void i915_ggtt_resume(struct i915_ggtt *ggtt) +{ + bool flush; + + intel_gt_check_and_clear_faults(ggtt->vm.gt); + + flush = i915_ggtt_resume_vm(&ggtt->vm); + ggtt->invalidate(ggtt); if (flush) @@ -1388,30 +1427,39 @@ err_st_alloc: } static struct scatterlist * -remap_pages(struct drm_i915_gem_object *obj, - unsigned int offset, unsigned int alignment_pad, - unsigned int width, unsigned int height, - unsigned int src_stride, unsigned int dst_stride, - struct sg_table *st, struct scatterlist *sg) +add_padding_pages(unsigned int count, + struct sg_table *st, struct scatterlist *sg) +{ + st->nents++; + + /* + * The DE ignores the PTEs for the padding tiles, the sg entry + * here is just a convenience to indicate how many padding PTEs + * to insert at this spot. + */ + sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0); + sg_dma_address(sg) = 0; + sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE; + sg = sg_next(sg); + + return sg; +} + +static struct scatterlist * +remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj, + unsigned int offset, unsigned int alignment_pad, + unsigned int width, unsigned int height, + unsigned int src_stride, unsigned int dst_stride, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) { unsigned int row; if (!width || !height) return sg; - if (alignment_pad) { - st->nents++; - - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a convenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, alignment_pad * 4096, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = alignment_pad * 4096; - sg = sg_next(sg); - } + if (alignment_pad) + sg = add_padding_pages(alignment_pad, st, sg); for (row = 0; row < height; row++) { unsigned int left = width * I915_GTT_PAGE_SIZE; @@ -1448,18 +1496,98 @@ remap_pages(struct drm_i915_gem_object *obj, if (!left) continue; + sg = add_padding_pages(left >> PAGE_SHIFT, st, sg); + } + + *gtt_offset += alignment_pad + dst_stride * height; + + return sg; +} + +static struct scatterlist * +remap_contiguous_pages(struct drm_i915_gem_object *obj, + unsigned int obj_offset, + unsigned int count, + struct sg_table *st, struct scatterlist *sg) +{ + struct scatterlist *iter; + unsigned int offset; + + iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset); + GEM_BUG_ON(!iter); + + do { + unsigned int len; + + len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), + count << PAGE_SHIFT); + sg_set_page(sg, NULL, len, 0); + sg_dma_address(sg) = + sg_dma_address(iter) + (offset << PAGE_SHIFT); + sg_dma_len(sg) = len; + st->nents++; + count -= len >> PAGE_SHIFT; + if (count == 0) + return sg; - /* - * The DE ignores the PTEs for the padding tiles, the sg entry - * here is just a conenience to indicate how many padding PTEs - * to insert at this spot. - */ - sg_set_page(sg, NULL, left, 0); - sg_dma_address(sg) = 0; - sg_dma_len(sg) = left; - sg = sg_next(sg); - } + sg = __sg_next(sg); + iter = __sg_next(iter); + offset = 0; + } while (1); +} + +static struct scatterlist * +remap_linear_color_plane_pages(struct drm_i915_gem_object *obj, + unsigned int obj_offset, unsigned int alignment_pad, + unsigned int size, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) +{ + if (!size) + return sg; + + if (alignment_pad) + sg = add_padding_pages(alignment_pad, st, sg); + + sg = remap_contiguous_pages(obj, obj_offset, size, st, sg); + sg = sg_next(sg); + + *gtt_offset += alignment_pad + size; + + return sg; +} + +static struct scatterlist * +remap_color_plane_pages(const struct intel_remapped_info *rem_info, + struct drm_i915_gem_object *obj, + int color_plane, + struct sg_table *st, struct scatterlist *sg, + unsigned int *gtt_offset) +{ + unsigned int alignment_pad = 0; + + if (rem_info->plane_alignment) + alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset; + + if (rem_info->plane[color_plane].linear) + sg = remap_linear_color_plane_pages(obj, + rem_info->plane[color_plane].offset, + alignment_pad, + rem_info->plane[color_plane].size, + st, sg, + gtt_offset); + + else + sg = remap_tiled_color_plane_pages(obj, + rem_info->plane[color_plane].offset, + alignment_pad, + rem_info->plane[color_plane].width, + rem_info->plane[color_plane].height, + rem_info->plane[color_plane].src_stride, + rem_info->plane[color_plane].dst_stride, + st, sg, + gtt_offset); return sg; } @@ -1488,21 +1616,8 @@ intel_remap_pages(struct intel_remapped_info *rem_info, st->nents = 0; sg = st->sgl; - for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { - unsigned int alignment_pad = 0; - - if (rem_info->plane_alignment) - alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset; - - sg = remap_pages(obj, - rem_info->plane[i].offset, alignment_pad, - rem_info->plane[i].width, rem_info->plane[i].height, - rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride, - st, sg); - - gtt_offset += alignment_pad + - rem_info->plane[i].dst_stride * rem_info->plane[i].height; - } + for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) + sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset); i915_sg_trim(st); @@ -1524,9 +1639,8 @@ intel_partial_pages(const struct i915_ggtt_view *view, struct drm_i915_gem_object *obj) { struct sg_table *st; - struct scatterlist *sg, *iter; + struct scatterlist *sg; unsigned int count = view->partial.size; - unsigned int offset; int ret = -ENOMEM; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -1537,34 +1651,14 @@ intel_partial_pages(const struct i915_ggtt_view *view, if (ret) goto err_sg_alloc; - iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset); - GEM_BUG_ON(!iter); - - sg = st->sgl; st->nents = 0; - do { - unsigned int len; - len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT), - count << PAGE_SHIFT); - sg_set_page(sg, NULL, len, 0); - sg_dma_address(sg) = - sg_dma_address(iter) + (offset << PAGE_SHIFT); - sg_dma_len(sg) = len; + sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl); - st->nents++; - count -= len >> PAGE_SHIFT; - if (count == 0) { - sg_mark_end(sg); - i915_sg_trim(st); /* Drop any unused tail entries. */ + sg_mark_end(sg); + i915_sg_trim(st); /* Drop any unused tail entries. */ - return st; - } - - sg = __sg_next(sg); - iter = __sg_next(iter); - offset = 0; - } while (1); + return st; err_sg_alloc: kfree(st); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 1cb1948ac959..f2422d48be32 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include <drm/intel-gtt.h> + #include "intel_gt_debugfs.h" #include "gem/i915_gem_lmem.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 524eaf678790..c0fa41e4c803 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -86,6 +86,7 @@ static int __gt_unpark(struct intel_wakeref *wf) intel_rc6_unpark(>->rc6); intel_rps_unpark(>->rps); i915_pmu_gt_unparked(i915); + intel_guc_busyness_unpark(gt); intel_gt_unpark_requests(gt); runtime_begin(gt); @@ -104,6 +105,7 @@ static int __gt_park(struct intel_wakeref *wf) runtime_end(gt); intel_gt_park_requests(gt); + intel_guc_busyness_park(gt); i915_vma_parked(gt); i915_pmu_gt_parked(i915); intel_rps_park(>->rps); @@ -301,7 +303,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt) user_forcewake(gt, true); wait_for_suspend(gt); - intel_pxp_suspend(>->pxp, false); + intel_pxp_suspend_prepare(>->pxp); } static suspend_state_t pm_suspend_target(void) @@ -326,6 +328,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) GEM_BUG_ON(gt->awake); intel_uc_suspend(>->uc); + intel_pxp_suspend(>->pxp); /* * On disabling the device, we want to turn off HW access to memory @@ -353,7 +356,7 @@ void intel_gt_suspend_late(struct intel_gt *gt) void intel_gt_runtime_suspend(struct intel_gt *gt) { - intel_pxp_suspend(>->pxp, true); + intel_pxp_runtime_suspend(>->pxp); intel_uc_runtime_suspend(>->uc); GT_TRACE(gt, "\n"); @@ -371,7 +374,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt) if (ret) return ret; - intel_pxp_resume(>->pxp); + intel_pxp_runtime_resume(>->pxp); return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 67d14afa6623..9fee968d57db 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -6,6 +6,9 @@ #include <linux/slab.h> /* fault-inject.h is not standalone! */ #include <linux/fault-inject.h> +#include <linux/sched/mm.h> + +#include <drm/drm_cache.h> #include "gem/i915_gem_lmem.h" #include "i915_trace.h" @@ -273,6 +276,7 @@ static void poison_scratch_page(struct drm_i915_gem_object *scratch) val = POISON_FREE; memset(vaddr, val, scratch->base.size); + drm_clflush_virt_range(vaddr, scratch->base.size); } int setup_scratch_page(struct i915_address_space *vm) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index bc6750263359..51afe66d00f2 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -135,6 +135,9 @@ typedef u64 gen8_pte_t; #define GEN8_PPAT_ELLC_OVERRIDE (0<<2) #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8)) +#define GEN8_PAGE_PRESENT BIT_ULL(0) +#define GEN8_PAGE_RW BIT_ULL(1) + #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) @@ -544,6 +547,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt); struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags); +void i915_ggtt_suspend_vm(struct i915_address_space *vm); +bool i915_ggtt_resume_vm(struct i915_address_space *vm); void i915_ggtt_suspend(struct i915_ggtt *gtt); void i915_ggtt_resume(struct i915_ggtt *ggtt); diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 56156cf18c41..b3489599e4de 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1167,6 +1167,11 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) cs = gen12_emit_cmd_buf_wa(ce, cs); cs = gen12_emit_restore_scratch(ce, cs); + /* Wa_16013000631:dg2 */ + if (IS_DG2_GRAPHICS_STEP(ce->engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_G11(ce->engine->i915)) + cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); + return cs; } diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c index afb1cce9a352..19a01878fee3 100644 --- a/drivers/gpu/drm/i915/gt/intel_migrate.c +++ b/drivers/gpu/drm/i915/gt/intel_migrate.c @@ -13,7 +13,6 @@ struct insert_pte_data { u64 offset; - bool is_lmem; }; #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */ @@ -40,7 +39,7 @@ static void insert_pte(struct i915_address_space *vm, struct insert_pte_data *d = data; vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, - d->is_lmem ? PTE_LM : 0); + i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0); d->offset += PAGE_SIZE; } @@ -134,8 +133,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt) goto err_vm; /* Now allow the GPU to rewrite the PTE via its own ppGTT */ - d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]); - vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d); + vm->vm.foreach(&vm->vm, base, d.offset - base, insert_pte, &d); } return &vm->vm; @@ -281,10 +279,10 @@ static int emit_pte(struct i915_request *rq, GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8); /* Compute the page directory offset for the target address range */ - offset += (u64)rq->engine->instance << 32; offset >>= 12; offset *= sizeof(u64); offset += 2 * CHUNK_SZ; + offset += (u64)rq->engine->instance << 32; cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 15f9ada28a7a..9c253ba593c6 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -424,7 +424,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, table->unused_entries_index = I915_MOCS_PTE; if (IS_DG2(i915)) { - if (IS_DG2_GT_STEP(i915, G10, STEP_A0, STEP_B0)) { + if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) { table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax); table->table = dg2_mocs_table_g10_ax; } else { diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 43093dd2d0c9..c3155ee58689 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -117,10 +117,17 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_RC6_ENABLE | GEN6_RC_CTL_EI_MODE(1); - pg_enable = - GEN9_RENDER_PG_ENABLE | - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE; + /* Wa_16011777198 - Render powergating must remain disabled */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) + pg_enable = + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; + else + pg_enable = + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE; if (GRAPHICS_VER(gt->i915) >= 12) { for (i = 0; i < I915_MAX_VCS; i++) diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index afb35d2e5c73..9ea49e0a27c0 100644 --- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -66,12 +66,16 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem) DMA_ATTR_FORCE_CONTIGUOUS); } -static void +static int region_lmem_release(struct intel_memory_region *mem) { - intel_region_ttm_fini(mem); + int ret; + + ret = intel_region_ttm_fini(mem); io_mapping_fini(&mem->iomap); release_fake_lmem_bar(mem); + + return ret; } static int @@ -158,7 +162,7 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt) static bool get_legacy_lowmem_region(struct intel_uncore *uncore, u64 *start, u32 *size) { - if (!IS_DG1_GT_STEP(uncore->i915, STEP_A0, STEP_C0)) + if (!IS_DG1_GRAPHICS_STEP(uncore->i915, STEP_A0, STEP_C0)) return false; *start = 0; @@ -231,7 +235,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt) return mem; err_region_put: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return ERR_PTR(err); } diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 91200c43951f..63199f0550e6 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -297,13 +297,6 @@ static int gen6_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - static const u32 hw_engine_mask[] = { - [RCS0] = GEN6_GRDOM_RENDER, - [BCS0] = GEN6_GRDOM_BLT, - [VCS0] = GEN6_GRDOM_MEDIA, - [VCS1] = GEN8_GRDOM_MEDIA2, - [VECS0] = GEN6_GRDOM_VECS, - }; struct intel_engine_cs *engine; u32 hw_mask; @@ -314,8 +307,7 @@ static int gen6_reset_engines(struct intel_gt *gt, hw_mask = 0; for_each_engine_masked(engine, gt, engine_mask, tmp) { - GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); - hw_mask |= hw_engine_mask[engine->id]; + hw_mask |= engine->reset_domain; } } @@ -492,22 +484,6 @@ static int gen11_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - static const u32 hw_engine_mask[] = { - [RCS0] = GEN11_GRDOM_RENDER, - [BCS0] = GEN11_GRDOM_BLT, - [VCS0] = GEN11_GRDOM_MEDIA, - [VCS1] = GEN11_GRDOM_MEDIA2, - [VCS2] = GEN11_GRDOM_MEDIA3, - [VCS3] = GEN11_GRDOM_MEDIA4, - [VCS4] = GEN11_GRDOM_MEDIA5, - [VCS5] = GEN11_GRDOM_MEDIA6, - [VCS6] = GEN11_GRDOM_MEDIA7, - [VCS7] = GEN11_GRDOM_MEDIA8, - [VECS0] = GEN11_GRDOM_VECS, - [VECS1] = GEN11_GRDOM_VECS2, - [VECS2] = GEN11_GRDOM_VECS3, - [VECS3] = GEN11_GRDOM_VECS4, - }; struct intel_engine_cs *engine; intel_engine_mask_t tmp; u32 reset_mask, unlock_mask = 0; @@ -518,8 +494,7 @@ static int gen11_reset_engines(struct intel_gt *gt, } else { reset_mask = 0; for_each_engine_masked(engine, gt, engine_mask, tmp) { - GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); - reset_mask |= hw_engine_mask[engine->id]; + reset_mask |= engine->reset_domain; ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask); if (ret) goto sfc_unlock; @@ -1367,20 +1342,27 @@ void intel_gt_handle_error(struct intel_gt *gt, /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */ synchronize_rcu_expedited(); - /* Prevent any other reset-engine attempt. */ - for_each_engine(engine, gt, tmp) { - while (test_and_set_bit(I915_RESET_ENGINE + engine->id, - >->reset.flags)) - wait_on_bit(>->reset.flags, - I915_RESET_ENGINE + engine->id, - TASK_UNINTERRUPTIBLE); + /* + * Prevent any other reset-engine attempt. We don't do this for GuC + * submission the GuC owns the per-engine reset, not the i915. + */ + if (!intel_uc_uses_guc_submission(>->uc)) { + for_each_engine(engine, gt, tmp) { + while (test_and_set_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags)) + wait_on_bit(>->reset.flags, + I915_RESET_ENGINE + engine->id, + TASK_UNINTERRUPTIBLE); + } } intel_gt_reset_global(gt, engine_mask, msg); - for_each_engine(engine, gt, tmp) - clear_bit_unlock(I915_RESET_ENGINE + engine->id, - >->reset.flags); + if (!intel_uc_uses_guc_submission(>->uc)) { + for_each_engine(engine, gt, tmp) + clear_bit_unlock(I915_RESET_ENGINE + engine->id, + >->reset.flags); + } clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); smp_mb__after_atomic(); wake_up_all(>->reset.queue); @@ -1441,6 +1423,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES > I915_WEDGED_ON_INIT); intel_gt_set_wedged(gt); + i915_disable_error_state(gt->i915, -ENODEV); set_bit(I915_WEDGED_ON_INIT, >->reset.flags); /* Wedged on init is non-recoverable */ @@ -1450,6 +1433,7 @@ void intel_gt_set_wedged_on_init(struct intel_gt *gt) void intel_gt_set_wedged_on_fini(struct intel_gt *gt) { intel_gt_set_wedged(gt); + i915_disable_error_state(gt->i915, -ENODEV); set_bit(I915_WEDGED_ON_FINI, >->reset.flags); intel_gt_retire_requests(gt); /* cleanup any wedged requests */ } diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 586dca1731ce..3e6fac0340ef 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -1357,7 +1357,7 @@ retry: err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); if (!err && gen7_wa_vma) err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); - if (!err && engine->legacy.ring->vma->obj) + if (!err) err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); if (!err) err = intel_timeline_pin(timeline, &ww); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 5e275f8dda8c..07ff7ba7b2b7 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -936,8 +936,70 @@ void intel_rps_park(struct intel_rps *rps) GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); } +u32 intel_rps_get_boost_frequency(struct intel_rps *rps) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + return slpc->boost_freq; + } else { + return intel_gpu_freq(rps, rps->boost_freq); + } +} + +static int rps_set_boost_freq(struct intel_rps *rps, u32 val) +{ + bool boost = false; + + /* Validate against (static) hardware limits */ + val = intel_freq_opcode(rps, val); + if (val < rps->min_freq || val > rps->max_freq) + return -EINVAL; + + mutex_lock(&rps->lock); + if (val != rps->boost_freq) { + rps->boost_freq = val; + boost = atomic_read(&rps->num_waiters); + } + mutex_unlock(&rps->lock); + if (boost) + schedule_work(&rps->work); + + return 0; +} + +int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + return intel_guc_slpc_set_boost_freq(slpc, freq); + } else { + return rps_set_boost_freq(rps, freq); + } +} + +void intel_rps_dec_waiters(struct intel_rps *rps) +{ + struct intel_guc_slpc *slpc; + + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + intel_guc_slpc_dec_waiters(slpc); + } else { + atomic_dec(&rps->num_waiters); + } +} + void intel_rps_boost(struct i915_request *rq) { + struct intel_guc_slpc *slpc; + if (i915_request_signaled(rq) || i915_request_has_waitboost(rq)) return; @@ -945,6 +1007,16 @@ void intel_rps_boost(struct i915_request *rq) if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; + if (rps_uses_slpc(rps)) { + slpc = rps_to_slpc(rps); + + /* Return if old value is non zero */ + if (!atomic_fetch_inc(&slpc->num_waiters)) + schedule_work(&slpc->boost_work); + + return; + } + if (atomic_fetch_inc(&rps->num_waiters)) return; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h index 11960d64ca82..aee12f37d38a 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.h +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -23,6 +23,9 @@ void intel_rps_disable(struct intel_rps *rps); void intel_rps_park(struct intel_rps *rps); void intel_rps_unpark(struct intel_rps *rps); void intel_rps_boost(struct i915_request *rq); +void intel_rps_dec_waiters(struct intel_rps *rps); +u32 intel_rps_get_boost_frequency(struct intel_rps *rps); +int intel_rps_set_boost_frequency(struct intel_rps *rps, u32 freq); int intel_rps_set(struct intel_rps *rps, u8 val); void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive); diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index e1f362530889..3113266c286e 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -482,7 +482,7 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, gen9_ctx_workarounds_init(engine, wal); /* WaToEnableHwFixForPushConstHWBug:kbl */ - if (IS_KBL_GT_STEP(i915, STEP_C0, STEP_FOREVER)) + if (IS_KBL_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER)) wa_masked_en(wal, COMMON_SLICE_CHICKEN2, GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); @@ -560,6 +560,22 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, /* * These settings aren't actually workarounds, but general tuning settings that + * need to be programmed on dg2 platform. + */ +static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + wa_write_clr_set(wal, GEN11_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK, + REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f)); + wa_add(wal, + FF_MODE2, + FF_MODE2_TDS_TIMER_MASK, + FF_MODE2_TDS_TIMER_128, + 0, false); +} + +/* + * These settings aren't actually workarounds, but general tuning settings that * need to be programmed on several platforms. */ static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine, @@ -621,13 +637,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0, false); - - /* - * Wa_14012131227:dg1 - * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p - */ - wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1, - GEN9_RHWO_OPTIMIZATION_DISABLE); } static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -644,6 +653,42 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE); } +static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine, + struct i915_wa_list *wal) +{ + dg2_ctx_gt_tuning_init(engine, wal); + + /* Wa_16011186671:dg2_g11 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + wa_masked_dis(wal, VFLSKPD, DIS_MULT_MISS_RD_SQUASH); + wa_masked_en(wal, VFLSKPD, DIS_OVER_FETCH_CACHE); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_14010469329:dg2_g10 */ + wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, + XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE); + + /* + * Wa_22010465075:dg2_g10 + * Wa_22010613112:dg2_g10 + * Wa_14010698770:dg2_g10 + */ + wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, + GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); + } + + /* Wa_16013271637:dg2 */ + wa_masked_en(wal, SLICE_COMMON_ECO_CHICKEN1, + MSC_MSAA_REODER_BUF_BYPASS_DISABLE); + + /* Wa_22012532006:dg2 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) + wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, + DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA); +} + static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal) { @@ -730,7 +775,11 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine, if (engine->class != RENDER_CLASS) goto done; - if (IS_DG1(i915)) + if (IS_DG2(i915)) + dg2_ctx_workarounds_init(engine, wal); + else if (IS_XEHPSDV(i915)) + ; /* noop; none at this time */ + else if (IS_DG1(i915)) dg1_ctx_workarounds_init(engine, wal); else if (GRAPHICS_VER(i915) == 12) gen12_ctx_workarounds_init(engine, wal); @@ -878,10 +927,51 @@ hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) } static void +gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) +{ + const struct sseu_dev_info *sseu = &i915->gt.info.sseu; + unsigned int slice, subslice; + u32 mcr, mcr_mask; + + GEM_BUG_ON(GRAPHICS_VER(i915) != 9); + + /* + * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml + * Before any MMIO read into slice/subslice specific registers, MCR + * packet control register needs to be programmed to point to any + * enabled s/ss pair. Otherwise, incorrect values will be returned. + * This means each subsequent MMIO read will be forwarded to an + * specific s/ss combination, but this is OK since these registers + * are consistent across s/ss in almost all cases. In the rare + * occasions, such as INSTDONE, where this value is dependent + * on s/ss combo, the read should be done with read_subslice_reg. + */ + slice = ffs(sseu->slice_mask) - 1; + GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); + subslice = ffs(intel_sseu_get_subslices(sseu, slice)); + GEM_BUG_ON(!subslice); + subslice--; + + /* + * We use GEN8_MCR..() macros to calculate the |mcr| value for + * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads + */ + mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice); + mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK; + + drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr); + + wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); +} + +static void gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { struct drm_i915_private *i915 = gt->i915; + /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */ + gen9_wa_init_mcr(i915, wal); + /* WaDisableKillLogic:bxt,skl,kbl */ if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915)) wa_write_or(wal, @@ -916,7 +1006,7 @@ skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); /* WaInPlaceDecompressionHang:skl */ - if (IS_SKL_GT_STEP(gt->i915, STEP_A0, STEP_H0)) + if (IS_SKL_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0)) wa_write_or(wal, GEN9_GAMT_ECO_REG_RW_IA, GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); @@ -928,7 +1018,7 @@ kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) gen9_gt_workarounds_init(gt, wal); /* WaDisableDynamicCreditSharing:kbl */ - if (IS_KBL_GT_STEP(gt->i915, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(gt->i915, 0, STEP_C0)) wa_write_or(wal, GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING); @@ -1134,9 +1224,18 @@ icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_L3_COH_PIPE); + /* Wa_1407352427:icl,ehl */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, + PSDUNIT_CLKGATE_DIS); + + /* Wa_1406680159:icl,ehl */ + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE, + GWUNIT_CLKGATE_DIS); + /* Wa_1607087056:icl,ehl,jsl */ if (IS_ICELAKE(i915) || - IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0)) + IS_JSL_EHL_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); @@ -1190,19 +1289,19 @@ tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) gen12_gt_workarounds_init(gt, wal); /* Wa_1409420604:tgl */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2, CPSSUNIT_CLKGATE_DIS); /* Wa_1607087056:tgl also know as BUG:1409180338 */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); /* Wa_1408615072:tgl[a0] */ - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL); } @@ -1215,7 +1314,7 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) gen12_gt_workarounds_init(gt, wal); /* Wa_1607087056:dg1 */ - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0)) + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); @@ -1236,7 +1335,179 @@ dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) static void xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) { + struct drm_i915_private *i915 = gt->i915; + + xehp_init_mcr(gt, wal); + + /* Wa_1409757795:xehpsdv */ + wa_write_or(wal, SCCGCTL94DC, CG3DDISURB); + + /* Wa_18011725039:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) { + wa_masked_dis(wal, MLTICTXCTL, TDONRENDER); + wa_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH); + } + + /* Wa_16011155590:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + TSGUNIT_CLKGATE_DIS); + + /* Wa_14011780169:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) { + wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | + GAMTLBVDBOX7_CLKGATE_DIS | + GAMTLBVDBOX6_CLKGATE_DIS | + GAMTLBVDBOX5_CLKGATE_DIS | + GAMTLBVDBOX4_CLKGATE_DIS | + GAMTLBVDBOX3_CLKGATE_DIS | + GAMTLBVDBOX2_CLKGATE_DIS | + GAMTLBVDBOX1_CLKGATE_DIS | + GAMTLBVDBOX0_CLKGATE_DIS | + GAMTLBKCR_CLKGATE_DIS | + GAMTLBGUC_CLKGATE_DIS | + GAMTLBBLT_CLKGATE_DIS); + wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | + GAMTLBGFXA1_CLKGATE_DIS | + GAMTLBCOMPA0_CLKGATE_DIS | + GAMTLBCOMPA1_CLKGATE_DIS | + GAMTLBCOMPB0_CLKGATE_DIS | + GAMTLBCOMPB1_CLKGATE_DIS | + GAMTLBCOMPC0_CLKGATE_DIS | + GAMTLBCOMPC1_CLKGATE_DIS | + GAMTLBCOMPD0_CLKGATE_DIS | + GAMTLBCOMPD1_CLKGATE_DIS | + GAMTLBMERT_CLKGATE_DIS | + GAMTLBVEBOX3_CLKGATE_DIS | + GAMTLBVEBOX2_CLKGATE_DIS | + GAMTLBVEBOX1_CLKGATE_DIS | + GAMTLBVEBOX0_CLKGATE_DIS); + } + + /* Wa_14012362059:xehpsdv */ + wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB); + + /* Wa_16012725990:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER)) + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS); + + /* Wa_14011060649:xehpsdv */ + wa_14011060649(gt, wal); + + /* Wa_14014368820:xehpsdv */ + wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS | + GLOBAL_INVALIDATION_MODE); +} + +static void +dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal) +{ + struct intel_engine_cs *engine; + int id; + xehp_init_mcr(gt, wal); + + /* Wa_14011060649:dg2 */ + wa_14011060649(gt, wal); + + /* + * Although there are per-engine instances of these registers, + * they technically exist outside the engine itself and are not + * impacted by engine resets. Furthermore, they're part of the + * GuC blacklist so trying to treat them as engine workarounds + * will result in GuC initialization failure and a wedged GPU. + */ + for_each_engine(engine, gt, id) { + if (engine->class != VIDEO_DECODE_CLASS) + continue; + + /* Wa_16010515920:dg2_g10 */ + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) + wa_write_or(wal, VDBOX_CGCTL3F18(engine->mmio_base), + ALNUNIT_CLKGATE_DIS); + } + + if (IS_DG2_G10(gt->i915)) { + /* Wa_22010523718:dg2 */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + CG3DDISCFEG_CLKGATE_DIS); + + /* Wa_14011006942:dg2 */ + wa_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE, + DSS_ROUTER_CLKGATE_DIS); + } + + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_14010680813:dg2_g10 */ + wa_write_or(wal, GEN12_GAMSTLB_CTRL, CONTROL_BLOCK_CLKGATE_DIS | + EGRESS_BLOCK_CLKGATE_DIS | TAG_BLOCK_CLKGATE_DIS); + + /* Wa_14010948348:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9430, MSQDUNIT_CLKGATE_DIS); + + /* Wa_14011037102:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9444, LTCDD_CLKGATE_DIS); + + /* Wa_14011371254:dg2_g10 */ + wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, NODEDSS_CLKGATE_DIS); + + /* Wa_14011431319:dg2_g10 */ + wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS | + GAMTLBVDBOX7_CLKGATE_DIS | + GAMTLBVDBOX6_CLKGATE_DIS | + GAMTLBVDBOX5_CLKGATE_DIS | + GAMTLBVDBOX4_CLKGATE_DIS | + GAMTLBVDBOX3_CLKGATE_DIS | + GAMTLBVDBOX2_CLKGATE_DIS | + GAMTLBVDBOX1_CLKGATE_DIS | + GAMTLBVDBOX0_CLKGATE_DIS | + GAMTLBKCR_CLKGATE_DIS | + GAMTLBGUC_CLKGATE_DIS | + GAMTLBBLT_CLKGATE_DIS); + wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS | + GAMTLBGFXA1_CLKGATE_DIS | + GAMTLBCOMPA0_CLKGATE_DIS | + GAMTLBCOMPA1_CLKGATE_DIS | + GAMTLBCOMPB0_CLKGATE_DIS | + GAMTLBCOMPB1_CLKGATE_DIS | + GAMTLBCOMPC0_CLKGATE_DIS | + GAMTLBCOMPC1_CLKGATE_DIS | + GAMTLBCOMPD0_CLKGATE_DIS | + GAMTLBCOMPD1_CLKGATE_DIS | + GAMTLBMERT_CLKGATE_DIS | + GAMTLBVEBOX3_CLKGATE_DIS | + GAMTLBVEBOX2_CLKGATE_DIS | + GAMTLBVEBOX1_CLKGATE_DIS | + GAMTLBVEBOX0_CLKGATE_DIS); + + /* Wa_14010569222:dg2_g10 */ + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, + GAMEDIA_CLKGATE_DIS); + + /* Wa_14011028019:dg2_g10 */ + wa_write_or(wal, SSMCGCTL9530, RTFUNIT_CLKGATE_DIS); + } + + if (IS_DG2_GRAPHICS_STEP(gt->i915, G10, STEP_A0, STEP_B0) || + IS_DG2_GRAPHICS_STEP(gt->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14012362059:dg2 */ + wa_write_or(wal, GEN12_MERT_MOD_CTRL, FORCE_MISS_FTLB); + } + + /* Wa_1509235366:dg2 */ + wa_write_or(wal, GEN12_GAMCNTRL_CTRL, INVALIDATION_BROADCAST_MODE_DIS | + GLOBAL_INVALIDATION_MODE); + + /* Wa_14014830051:dg2 */ + wa_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN); + + /* + * The following are not actually "workarounds" but rather + * recommended tuning settings documented in the bspec's + * performance guide section. + */ + wa_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS); + wa_write_or(wal, GEN12_SQCM, EN_32B_ACCESS); } static void @@ -1244,7 +1515,9 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal) { struct drm_i915_private *i915 = gt->i915; - if (IS_XEHPSDV(i915)) + if (IS_DG2(i915)) + dg2_gt_workarounds_init(gt, wal); + else if (IS_XEHPSDV(i915)) xehpsdv_gt_workarounds_init(gt, wal); else if (IS_DG1(i915)) dg1_gt_workarounds_init(gt, wal); @@ -1518,7 +1791,7 @@ static void cfl_whitelist_build(struct intel_engine_cs *engine) RING_FORCE_TO_NONPRIV_RANGE_4); } -static void cml_whitelist_build(struct intel_engine_cs *engine) +static void allow_read_ctx_timestamp(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; @@ -1526,6 +1799,11 @@ static void cml_whitelist_build(struct intel_engine_cs *engine) whitelist_reg_ext(w, RING_CTX_TIMESTAMP(engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); +} + +static void cml_whitelist_build(struct intel_engine_cs *engine) +{ + allow_read_ctx_timestamp(engine); cfl_whitelist_build(engine); } @@ -1534,6 +1812,8 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; + allow_read_ctx_timestamp(engine); + switch (engine->class) { case RENDER_CLASS: /* WaAllowUMDToModifyHalfSliceChicken7:icl */ @@ -1569,15 +1849,9 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) /* hucStatus2RegOffset */ whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; default: - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; } } @@ -1586,6 +1860,8 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) { struct i915_wa_list *w = &engine->whitelist; + allow_read_ctx_timestamp(engine); + switch (engine->class) { case RENDER_CLASS: /* @@ -1602,16 +1878,17 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine) RING_FORCE_TO_NONPRIV_ACCESS_RD | RING_FORCE_TO_NONPRIV_RANGE_4); - /* Wa_1808121037:tgl */ + /* + * Wa_1808121037:tgl + * Wa_14012131227:dg1 + * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p + */ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1); /* Wa_1806527549:tgl */ whitelist_reg(w, HIZ_CHICKEN); break; default: - whitelist_reg_ext(w, - RING_CTX_TIMESTAMP(engine->mmio_base), - RING_FORCE_TO_NONPRIV_ACCESS_RD); break; } } @@ -1623,13 +1900,46 @@ static void dg1_whitelist_build(struct intel_engine_cs *engine) tgl_whitelist_build(engine); /* GEN:BUG:1409280441:dg1 */ - if (IS_DG1_GT_STEP(engine->i915, STEP_A0, STEP_B0) && + if (IS_DG1_GRAPHICS_STEP(engine->i915, STEP_A0, STEP_B0) && (engine->class == RENDER_CLASS || engine->class == COPY_ENGINE_CLASS)) whitelist_reg_ext(w, RING_ID(engine->mmio_base), RING_FORCE_TO_NONPRIV_ACCESS_RD); } +static void xehpsdv_whitelist_build(struct intel_engine_cs *engine) +{ + allow_read_ctx_timestamp(engine); +} + +static void dg2_whitelist_build(struct intel_engine_cs *engine) +{ + struct i915_wa_list *w = &engine->whitelist; + + allow_read_ctx_timestamp(engine); + + switch (engine->class) { + case RENDER_CLASS: + /* + * Wa_1507100340:dg2_g10 + * + * This covers 4 registers which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); + + break; + default: + break; + } +} + void intel_engine_init_whitelist(struct intel_engine_cs *engine) { struct drm_i915_private *i915 = engine->i915; @@ -1637,7 +1947,11 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine) wa_init_start(w, "whitelist", engine->name); - if (IS_DG1(i915)) + if (IS_DG2(i915)) + dg2_whitelist_build(engine); + else if (IS_XEHPSDV(i915)) + xehpsdv_whitelist_build(engine); + else if (IS_DG1(i915)) dg1_whitelist_build(engine); else if (GRAPHICS_VER(i915) == 12) tgl_whitelist_build(engine); @@ -1711,13 +2025,119 @@ engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) CMD_CCTL_MOCS_OVERRIDE(mocs, mocs)); } } + +static bool needs_wa_1308578152(struct intel_engine_cs *engine) +{ + u64 dss_mask = intel_sseu_get_subslices(&engine->gt->info.sseu, 0); + + return (dss_mask & GENMASK(GEN_DSS_PER_GSLICE - 1, 0)) == 0; +} + static void rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || - IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) { + if (IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14013392000:dg2_g11 */ + wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_ENABLE_LARGE_GRF_MODE); + + /* Wa_16011620976:dg2_g11 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) { + /* Wa_14012419201:dg2 */ + wa_masked_en(wal, GEN9_ROW_CHICKEN4, + GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_G11(engine->i915)) { + /* + * Wa_22012826095:dg2 + * Wa_22013059131:dg2 + */ + wa_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW, + MAXREQS_PER_BANK, + REG_FIELD_PREP(MAXREQS_PER_BANK, 2)); + + /* Wa_22013059131:dg2 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0, + FORCE_1_SUB_MESSAGE_PER_FRAGMENT); + } + + /* Wa_1308578152:dg2_g10 when first gslice is fused off */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) && + needs_wa_1308578152(engine)) { + wa_masked_dis(wal, GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON, + GEN12_REPLAY_MODE_GRANULARITY); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) || + IS_DG2_G11(engine->i915)) { + /* Wa_22013037850:dg2 */ + wa_write_or(wal, LSC_CHICKEN_BIT_0_UDW, + DISABLE_128B_EVICTION_COMMAND_UDW); + + /* Wa_22012856258:dg2 */ + wa_masked_en(wal, GEN7_ROW_CHICKEN2, + GEN12_DISABLE_READ_SUPPRESSION); + + /* + * Wa_22010960976:dg2 + * Wa_14013347512:dg2 + */ + wa_masked_dis(wal, GEN12_HDC_CHICKEN0, + LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* + * Wa_1608949956:dg2_g10 + * Wa_14010198302:dg2_g10 + */ + wa_masked_en(wal, GEN8_ROW_CHICKEN, + MDQ_ARBITRATION_MODE | UGM_BACKUP_MODE); + + /* + * Wa_14010918519:dg2_g10 + * + * LSC_CHICKEN_BIT_0 always reads back as 0 is this stepping, + * so ignoring verification. + */ + wa_add(wal, LSC_CHICKEN_BIT_0_UDW, 0, + FORCE_SLM_FENCE_SCOPE_TO_TILE | FORCE_UGM_FENCE_SCOPE_TO_TILE, + 0, false); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_B0)) { + /* Wa_22010430635:dg2 */ + wa_masked_en(wal, + GEN9_ROW_CHICKEN4, + GEN12_DISABLE_GRF_CLEAR); + + /* Wa_14010648519:dg2 */ + wa_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE); + } + + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_A0, STEP_C0) || + IS_DG2_G11(engine->i915)) { + /* Wa_22012654132:dg2 */ + wa_add(wal, GEN10_CACHE_MODE_SS, 0, + _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC), + 0 /* write-only, so skip validation */, + true); + } + + /* Wa_14013202645:dg2 */ + if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_C0) || + IS_DG2_GRAPHICS_STEP(engine->i915, G11, STEP_A0, STEP_B0)) + wa_write_or(wal, RT_CTRL, DIS_NULL_QUERY); + + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || + IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { /* * Wa_1607138336:tgl[a0],dg1[a0] * Wa_1607063988:tgl[a0],dg1[a0] @@ -1727,7 +2147,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); } - if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) { + if (IS_TGL_UY_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)) { /* * Wa_1606679103:tgl * (see also Wa_1606682166:icl) @@ -1762,7 +2182,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) } if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || - IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || + IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */ wa_masked_en(wal, GEN7_ROW_CHICKEN2, @@ -1775,8 +2195,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH); } - - if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) || + if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { /* * Wa_1607030317:tgl @@ -1859,15 +2278,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS); - /* Wa_1407352427:icl,ehl */ - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, - PSDUNIT_CLKGATE_DIS); - - /* Wa_1406680159:icl,ehl */ - wa_write_or(wal, - SUBSLICE_UNIT_LEVEL_CLKGATE, - GWUNIT_CLKGATE_DIS); - /* * Wa_1408767742:icl[a2..forever],ehl[all] * Wa_1605460711:icl[a0..c0] @@ -2138,7 +2548,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) struct drm_i915_private *i915 = engine->i915; /* WaKBLVECSSemaphoreWaitPoll:kbl */ - if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_F0)) { + if (IS_KBL_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) { wa_write(wal, RING_SEMA_WAIT_POLL(engine->mmio_base), 1); diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 8b89215afe46..bb99fc03f503 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -35,9 +35,31 @@ static void mock_timeline_unpin(struct intel_timeline *tl) atomic_dec(&tl->pin_count); } +static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) +{ + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return vma; +} + static struct intel_ring *mock_ring(struct intel_engine_cs *engine) { - const unsigned long sz = PAGE_SIZE / 2; + const unsigned long sz = PAGE_SIZE; struct intel_ring *ring; ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL); @@ -50,15 +72,11 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) ring->vaddr = (void *)(ring + 1); atomic_set(&ring->pin_count, 1); - ring->vma = i915_vma_alloc(); - if (!ring->vma) { + ring->vma = create_ring_vma(engine->gt->ggtt, PAGE_SIZE); + if (IS_ERR(ring->vma)) { kfree(ring); return NULL; } - i915_active_init(&ring->vma->active, NULL, NULL, 0); - __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma)); - __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags); - ring->vma->node.size = sz; intel_ring_update_space(ring); @@ -67,8 +85,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine) static void mock_ring_free(struct intel_ring *ring) { - i915_active_fini(&ring->vma->active); - i915_vma_free(ring->vma); + i915_vma_put(ring->vma); kfree(ring); } @@ -125,6 +142,7 @@ static void mock_context_unpin(struct intel_context *ce) static void mock_context_post_unpin(struct intel_context *ce) { + i915_vma_unpin(ce->ring->vma); } static void mock_context_destroy(struct kref *ref) @@ -169,7 +187,7 @@ static int mock_context_alloc(struct intel_context *ce) static int mock_context_pre_pin(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **unused) { - return 0; + return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH); } static int mock_context_pin(struct intel_context *ce, void *unused) diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 75569666105d..75f6efc9882f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -214,6 +214,31 @@ static int live_engine_timestamps(void *arg) return 0; } +static int __spin_until_busier(struct intel_engine_cs *engine, ktime_t busyness) +{ + ktime_t start, unused, dt; + + if (!intel_engine_uses_guc(engine)) + return 0; + + /* + * In GuC mode of submission, the busyness stats may get updated after + * the batch starts running. Poll for a change in busyness and timeout + * after 500 us. + */ + start = ktime_get(); + while (intel_engine_get_busy_time(engine, &unused) == busyness) { + dt = ktime_get() - start; + if (dt > 500000) { + pr_err("active wait timed out %lld\n", dt); + ENGINE_TRACE(engine, "active wait time out %lld\n", dt); + return -ETIME; + } + } + + return 0; +} + static int live_engine_busy_stats(void *arg) { struct intel_gt *gt = arg; @@ -232,6 +257,7 @@ static int live_engine_busy_stats(void *arg) GEM_BUG_ON(intel_gt_pm_is_awake(gt)); for_each_engine(engine, gt, id) { struct i915_request *rq; + ktime_t busyness, dummy; ktime_t de, dt; ktime_t t[2]; @@ -274,16 +300,23 @@ static int live_engine_busy_stats(void *arg) } i915_request_add(rq); + busyness = intel_engine_get_busy_time(engine, &dummy); if (!igt_wait_for_spinner(&spin, rq)) { intel_gt_set_wedged(engine->gt); err = -ETIME; goto end; } + err = __spin_until_busier(engine, busyness); + if (err) { + GEM_TRACE_DUMP(); + goto end; + } + ENGINE_TRACE(engine, "measuring busy time\n"); preempt_disable(); de = intel_engine_get_busy_time(engine, &t[0]); - udelay(100); + mdelay(10); de = ktime_sub(intel_engine_get_busy_time(engine, &t[1]), de); preempt_enable(); dt = ktime_sub(t[1], t[0]); diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 7e2d99dd012d..e5ad4d5a91c0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -471,7 +471,8 @@ static int igt_reset_nop_engine(void *arg) count = 0; st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); do { int i; @@ -528,7 +529,7 @@ static int igt_reset_nop_engine(void *arg) break; } } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); pr_info("%s(%s): %d resets\n", __func__, engine->name, count); @@ -582,7 +583,8 @@ static int igt_reset_fail_engine(void *arg) } st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); force_reset_timeout(engine); err = intel_engine_reset(engine, NULL); @@ -679,7 +681,7 @@ static int igt_reset_fail_engine(void *arg) out: pr_info("%s(%s): %d resets\n", __func__, engine->name, count); skip: - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); intel_context_put(ce); @@ -734,7 +736,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) reset_engine_count = i915_reset_engine_count(global, engine); st_engine_heartbeat_disable(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); count = 0; do { struct i915_request *rq = NULL; @@ -824,7 +827,7 @@ restore: if (err) break; } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable(engine); pr_info("%s: Completed %lu %s resets\n", engine->name, count, active ? "active" : "idle"); @@ -1042,7 +1045,8 @@ static int __igt_reset_engines(struct intel_gt *gt, yield(); /* start all threads before we begin */ st_engine_heartbeat_disable_no_pm(engine); - set_bit(I915_RESET_ENGINE + id, >->reset.flags); + GEM_BUG_ON(test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)); do { struct i915_request *rq = NULL; struct intel_selftest_saved_policy saved; @@ -1165,7 +1169,7 @@ restore: if (err) break; } while (time_before(jiffies, end_time)); - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); st_engine_heartbeat_enable_no_pm(engine); pr_info("i915_reset_engine(%s:%s): %lu resets\n", diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c index 12ef2837c89b..e21787301bbd 100644 --- a/drivers/gpu/drm/i915/gt/selftest_migrate.c +++ b/drivers/gpu/drm/i915/gt/selftest_migrate.c @@ -49,6 +49,7 @@ static int copy(struct intel_migrate *migrate, if (IS_ERR(src)) return 0; + sz = src->base.size; dst = i915_gem_object_create_internal(i915, sz); if (IS_ERR(dst)) goto err_free_src; diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h index ba10bd374cee..fe5d7d261797 100644 --- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h @@ -144,6 +144,7 @@ enum intel_guc_action { INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600, INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601, INTEL_GUC_ACTION_RESET_CLIENT = 0x5507, + INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A, INTEL_GUC_ACTION_LIMIT }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 31cf9fb48c7e..1cb46098030d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -138,6 +138,8 @@ struct intel_guc { u32 ads_regset_size; /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */ u32 ads_golden_ctxt_size; + /** @ads_engine_usage_size: size of engine usage in the ADS */ + u32 ads_engine_usage_size; /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */ struct i915_vma *lrc_desc_pool; @@ -172,6 +174,34 @@ struct intel_guc { /** @send_mutex: used to serialize the intel_guc_send actions */ struct mutex send_mutex; + + /** + * @timestamp: GT timestamp object that stores a copy of the timestamp + * and adjusts it for overflow using a worker. + */ + struct { + /** + * @lock: Lock protecting the below fields and the engine stats. + */ + spinlock_t lock; + + /** + * @gt_stamp: 64 bit extended value of the GT timestamp. + */ + u64 gt_stamp; + + /** + * @ping_delay: Period for polling the GT timestamp for + * overflow. + */ + unsigned long ping_delay; + + /** + * @work: Periodic work to adjust GT timestamp, engine and + * context usage for overflows. + */ + struct delayed_work work; + } timestamp; }; static inline struct intel_guc *log_to_guc(struct intel_guc_log *log) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 621c893a009f..1a1edae67e4e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -26,6 +26,8 @@ * | guc_policies | * +---------------------------------------+ * | guc_gt_system_info | + * +---------------------------------------+ + * | guc_engine_usage | * +---------------------------------------+ <== static * | guc_mmio_reg[countA] (engine 0.0) | * | guc_mmio_reg[countB] (engine 0.1) | @@ -47,6 +49,7 @@ struct __guc_ads_blob { struct guc_ads ads; struct guc_policies policies; struct guc_gt_system_info system_info; + struct guc_engine_usage engine_usage; /* From here on, location is dynamic! Refer to above diagram. */ struct guc_mmio_reg regset[0]; } __packed; @@ -628,3 +631,21 @@ void intel_guc_ads_reset(struct intel_guc *guc) guc_ads_private_data_reset(guc); } + +u32 intel_guc_engine_usage_offset(struct intel_guc *guc) +{ + struct __guc_ads_blob *blob = guc->ads_blob; + u32 base = intel_guc_ggtt_offset(guc, guc->ads_vma); + u32 offset = base + ptr_offset(blob, engine_usage); + + return offset; +} + +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine) +{ + struct intel_guc *guc = &engine->gt->uc.guc; + struct __guc_ads_blob *blob = guc->ads_blob; + u8 guc_class = engine_class_to_guc_class(engine->class); + + return &blob->engine_usage.engines[guc_class][ilog2(engine->logical_mask)]; +} diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h index 3d85051d57e4..e74c110facff 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h @@ -6,8 +6,11 @@ #ifndef _INTEL_GUC_ADS_H_ #define _INTEL_GUC_ADS_H_ +#include <linux/types.h> + struct intel_guc; struct drm_printer; +struct intel_engine_cs; int intel_guc_ads_create(struct intel_guc *guc); void intel_guc_ads_destroy(struct intel_guc *guc); @@ -15,5 +18,7 @@ void intel_guc_ads_init_late(struct intel_guc *guc); void intel_guc_ads_reset(struct intel_guc *guc); void intel_guc_ads_print_policy_info(struct intel_guc *guc, struct drm_printer *p); +struct guc_engine_usage_record *intel_guc_engine_usage(struct intel_engine_cs *engine); +u32 intel_guc_engine_usage_offset(struct intel_guc *guc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 722933e26347..7072e30e99f4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -294,6 +294,19 @@ struct guc_ads { u32 reserved[15]; } __packed; +/* Engine usage stats */ +struct guc_engine_usage_record { + u32 current_context_index; + u32 last_switch_in_stamp; + u32 reserved0; + u32 total_runtime; + u32 reserved1[4]; +} __packed; + +struct guc_engine_usage { + struct guc_engine_usage_record engines[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS]; +} __packed; + /* GuC logging structures */ enum guc_log_buffer_type { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c index 65a3e7fdb2b2..22c1c12369f2 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c @@ -79,29 +79,6 @@ static void slpc_mem_set_disabled(struct slpc_shared_data *data, slpc_mem_set_param(data, enable_id, 0); } -int intel_guc_slpc_init(struct intel_guc_slpc *slpc) -{ - struct intel_guc *guc = slpc_to_guc(slpc); - struct drm_i915_private *i915 = slpc_to_i915(slpc); - u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); - int err; - - GEM_BUG_ON(slpc->vma); - - err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); - if (unlikely(err)) { - drm_err(&i915->drm, - "Failed to allocate SLPC struct (err=%pe)\n", - ERR_PTR(err)); - return err; - } - - slpc->max_freq_softlimit = 0; - slpc->min_freq_softlimit = 0; - - return err; -} - static u32 slpc_get_state(struct intel_guc_slpc *slpc) { struct slpc_shared_data *data; @@ -203,6 +180,86 @@ static int slpc_unset_param(struct intel_guc_slpc *slpc, return guc_action_slpc_unset_param(guc, id); } +static int slpc_force_min_freq(struct intel_guc_slpc *slpc, u32 freq) +{ + struct drm_i915_private *i915 = slpc_to_i915(slpc); + struct intel_guc *guc = slpc_to_guc(slpc); + intel_wakeref_t wakeref; + int ret = 0; + + lockdep_assert_held(&slpc->lock); + + if (!intel_guc_is_ready(guc)) + return -ENODEV; + + /* + * This function is a little different as compared to + * intel_guc_slpc_set_min_freq(). Softlimit will not be updated + * here since this is used to temporarily change min freq, + * for example, during a waitboost. Caller is responsible for + * checking bounds. + */ + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + ret = slpc_set_param(slpc, + SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, + freq); + if (ret) + drm_err(&i915->drm, "Unable to force min freq to %u: %d", + freq, ret); + } + + return ret; +} + +static void slpc_boost_work(struct work_struct *work) +{ + struct intel_guc_slpc *slpc = container_of(work, typeof(*slpc), boost_work); + + /* + * Raise min freq to boost. It's possible that + * this is greater than current max. But it will + * certainly be limited by RP0. An error setting + * the min param is not fatal. + */ + mutex_lock(&slpc->lock); + if (atomic_read(&slpc->num_waiters)) { + slpc_force_min_freq(slpc, slpc->boost_freq); + slpc->num_boosts++; + } + mutex_unlock(&slpc->lock); +} + +int intel_guc_slpc_init(struct intel_guc_slpc *slpc) +{ + struct intel_guc *guc = slpc_to_guc(slpc); + struct drm_i915_private *i915 = slpc_to_i915(slpc); + u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); + int err; + + GEM_BUG_ON(slpc->vma); + + err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr); + if (unlikely(err)) { + drm_err(&i915->drm, + "Failed to allocate SLPC struct (err=%pe)\n", + ERR_PTR(err)); + return err; + } + + slpc->max_freq_softlimit = 0; + slpc->min_freq_softlimit = 0; + + slpc->boost_freq = 0; + atomic_set(&slpc->num_waiters, 0); + slpc->num_boosts = 0; + + mutex_init(&slpc->lock); + INIT_WORK(&slpc->boost_work, slpc_boost_work); + + return err; +} + static const char *slpc_global_state_to_string(enum slpc_global_state state) { switch (state) { @@ -393,7 +450,11 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) val > slpc->max_freq_softlimit) return -EINVAL; + /* Need a lock now since waitboost can be modifying min as well */ + mutex_lock(&slpc->lock); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + ret = slpc_set_param(slpc, SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ, val); @@ -406,6 +467,8 @@ int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val) if (!ret) slpc->min_freq_softlimit = val; + mutex_unlock(&slpc->lock); + return ret; } @@ -522,6 +585,9 @@ static void slpc_get_rp_values(struct intel_guc_slpc *slpc) GT_FREQUENCY_MULTIPLIER; slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) * GT_FREQUENCY_MULTIPLIER; + + if (!slpc->boost_freq) + slpc->boost_freq = slpc->rp0_freq; } /* @@ -588,6 +654,47 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc) return 0; } +int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val) +{ + int ret = 0; + + if (val < slpc->min_freq || val > slpc->rp0_freq) + return -EINVAL; + + mutex_lock(&slpc->lock); + + if (slpc->boost_freq != val) { + /* Apply only if there are active waiters */ + if (atomic_read(&slpc->num_waiters)) { + ret = slpc_force_min_freq(slpc, val); + if (ret) { + ret = -EIO; + goto done; + } + } + + slpc->boost_freq = val; + } + +done: + mutex_unlock(&slpc->lock); + return ret; +} + +void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc) +{ + /* + * Return min back to the softlimit. + * This is called during request retire, + * so we don't need to fail that if the + * set_param fails. + */ + mutex_lock(&slpc->lock); + if (atomic_dec_and_test(&slpc->num_waiters)) + slpc_force_min_freq(slpc, slpc->min_freq_softlimit); + mutex_unlock(&slpc->lock); +} + int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p) { struct drm_i915_private *i915 = slpc_to_i915(slpc); @@ -611,6 +718,8 @@ int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p slpc_decode_max_freq(slpc)); drm_printf(p, "\tMin freq: %u MHz\n", slpc_decode_min_freq(slpc)); + drm_printf(p, "\twaitboosts: %u\n", + slpc->num_boosts); } } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h index e45054d5b9b4..0caa8fee3c04 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h @@ -34,9 +34,12 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc); void intel_guc_slpc_fini(struct intel_guc_slpc *slpc); int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val); int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val); +int intel_guc_slpc_set_boost_freq(struct intel_guc_slpc *slpc, u32 val); int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val); int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val); int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p); void intel_guc_pm_intrmsk_enable(struct intel_gt *gt); +void intel_guc_slpc_boost(struct intel_guc_slpc *slpc); +void intel_guc_slpc_dec_waiters(struct intel_guc_slpc *slpc); #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h index 41d13527666f..bf5b9a563c09 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h @@ -6,6 +6,9 @@ #ifndef _INTEL_GUC_SLPC_TYPES_H_ #define _INTEL_GUC_SLPC_TYPES_H_ +#include <linux/atomic.h> +#include <linux/workqueue.h> +#include <linux/mutex.h> #include <linux/types.h> #define SLPC_RESET_TIMEOUT_MS 5 @@ -20,10 +23,20 @@ struct intel_guc_slpc { u32 min_freq; u32 rp0_freq; u32 rp1_freq; + u32 boost_freq; /* frequency softlimits */ u32 min_freq_softlimit; u32 max_freq_softlimit; + + /* Protects set/reset of boost freq + * and value of num_waiters + */ + struct mutex lock; + + struct work_struct boost_work; + atomic_t num_waiters; + u32 num_boosts; }; #endif diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index 38b47e73e35d..1f9d4fde421f 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -13,6 +13,7 @@ #include "gt/intel_engine_heartbeat.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_clock_utils.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm.h" #include "gt/intel_gt_requests.h" @@ -21,6 +22,7 @@ #include "gt/intel_mocs.h" #include "gt/intel_ring.h" +#include "intel_guc_ads.h" #include "intel_guc_submission.h" #include "i915_drv.h" @@ -1077,6 +1079,271 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc) xa_unlock_irqrestore(&guc->context_lookup, flags); } +/* + * GuC stores busyness stats for each engine at context in/out boundaries. A + * context 'in' logs execution start time, 'out' adds in -> out delta to total. + * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with + * GuC. + * + * __i915_pmu_event_read samples engine busyness. When sampling, if context id + * is valid (!= ~0) and start is non-zero, the engine is considered to be + * active. For an active engine total busyness = total + (now - start), where + * 'now' is the time at which the busyness is sampled. For inactive engine, + * total busyness = total. + * + * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain. + * + * The start and total values provided by GuC are 32 bits and wrap around in a + * few minutes. Since perf pmu provides busyness as 64 bit monotonically + * increasing ns values, there is a need for this implementation to account for + * overflows and extend the GuC provided values to 64 bits before returning + * busyness to the user. In order to do that, a worker runs periodically at + * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in + * 27 seconds for a gt clock frequency of 19.2 MHz). + */ + +#define WRAP_TIME_CLKS U32_MAX +#define POLL_TIME_CLKS (WRAP_TIME_CLKS >> 3) + +static void +__extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) +{ + u32 gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); + u32 gt_stamp_last = lower_32_bits(guc->timestamp.gt_stamp); + + if (new_start == lower_32_bits(*prev_start)) + return; + + if (new_start < gt_stamp_last && + (new_start - gt_stamp_last) <= POLL_TIME_CLKS) + gt_stamp_hi++; + + if (new_start > gt_stamp_last && + (gt_stamp_last - new_start) <= POLL_TIME_CLKS && gt_stamp_hi) + gt_stamp_hi--; + + *prev_start = ((u64)gt_stamp_hi << 32) | new_start; +} + +static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) +{ + struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine); + struct intel_engine_guc_stats *stats = &engine->stats.guc; + struct intel_guc *guc = &engine->gt->uc.guc; + u32 last_switch = rec->last_switch_in_stamp; + u32 ctx_id = rec->current_context_index; + u32 total = rec->total_runtime; + + lockdep_assert_held(&guc->timestamp.lock); + + stats->running = ctx_id != ~0U && last_switch; + if (stats->running) + __extend_last_switch(guc, &stats->start_gt_clk, last_switch); + + /* + * Instead of adjusting the total for overflow, just add the + * difference from previous sample stats->total_gt_clks + */ + if (total && total != ~0U) { + stats->total_gt_clks += (u32)(total - stats->prev_total); + stats->prev_total = total; + } +} + +static void guc_update_pm_timestamp(struct intel_guc *guc, + struct intel_engine_cs *engine, + ktime_t *now) +{ + u32 gt_stamp_now, gt_stamp_hi; + + lockdep_assert_held(&guc->timestamp.lock); + + gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); + gt_stamp_now = intel_uncore_read(engine->uncore, + RING_TIMESTAMP(engine->mmio_base)); + *now = ktime_get(); + + if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp)) + gt_stamp_hi++; + + guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now; +} + +/* + * Unlike the execlist mode of submission total and active times are in terms of + * gt clocks. The *now parameter is retained to return the cpu time at which the + * busyness was sampled. + */ +static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) +{ + struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc; + struct i915_gpu_error *gpu_error = &engine->i915->gpu_error; + struct intel_gt *gt = engine->gt; + struct intel_guc *guc = >->uc.guc; + u64 total, gt_stamp_saved; + unsigned long flags; + u32 reset_count; + bool in_reset; + + spin_lock_irqsave(&guc->timestamp.lock, flags); + + /* + * If a reset happened, we risk reading partially updated engine + * busyness from GuC, so we just use the driver stored copy of busyness. + * Synchronize with gt reset using reset_count and the + * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count + * after I915_RESET_BACKOFF flag, so ensure that the reset_count is + * usable by checking the flag afterwards. + */ + reset_count = i915_reset_count(gpu_error); + in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags); + + *now = ktime_get(); + + /* + * The active busyness depends on start_gt_clk and gt_stamp. + * gt_stamp is updated by i915 only when gt is awake and the + * start_gt_clk is derived from GuC state. To get a consistent + * view of activity, we query the GuC state only if gt is awake. + */ + if (intel_gt_pm_get_if_awake(gt) && !in_reset) { + stats_saved = *stats; + gt_stamp_saved = guc->timestamp.gt_stamp; + guc_update_engine_gt_clks(engine); + guc_update_pm_timestamp(guc, engine, now); + intel_gt_pm_put_async(gt); + if (i915_reset_count(gpu_error) != reset_count) { + *stats = stats_saved; + guc->timestamp.gt_stamp = gt_stamp_saved; + } + } + + total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks); + if (stats->running) { + u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk; + + total += intel_gt_clock_interval_to_ns(gt, clk); + } + + spin_unlock_irqrestore(&guc->timestamp.lock, flags); + + return ns_to_ktime(total); +} + +static void __reset_guc_busyness_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long flags; + ktime_t unused; + + cancel_delayed_work_sync(&guc->timestamp.work); + + spin_lock_irqsave(&guc->timestamp.lock, flags); + + for_each_engine(engine, gt, id) { + guc_update_pm_timestamp(guc, engine, &unused); + guc_update_engine_gt_clks(engine); + engine->stats.guc.prev_total = 0; + } + + spin_unlock_irqrestore(&guc->timestamp.lock, flags); +} + +static void __update_guc_busyness_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long flags; + ktime_t unused; + + spin_lock_irqsave(&guc->timestamp.lock, flags); + for_each_engine(engine, gt, id) { + guc_update_pm_timestamp(guc, engine, &unused); + guc_update_engine_gt_clks(engine); + } + spin_unlock_irqrestore(&guc->timestamp.lock, flags); +} + +static void guc_timestamp_ping(struct work_struct *wrk) +{ + struct intel_guc *guc = container_of(wrk, typeof(*guc), + timestamp.work.work); + struct intel_uc *uc = container_of(guc, typeof(*uc), guc); + struct intel_gt *gt = guc_to_gt(guc); + intel_wakeref_t wakeref; + int srcu, ret; + + /* + * Synchronize with gt reset to make sure the worker does not + * corrupt the engine/guc stats. + */ + ret = intel_gt_reset_trylock(gt, &srcu); + if (ret) + return; + + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + __update_guc_busyness_stats(guc); + + intel_gt_reset_unlock(gt, srcu); + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); +} + +static int guc_action_enable_usage_stats(struct intel_guc *guc) +{ + u32 offset = intel_guc_engine_usage_offset(guc); + u32 action[] = { + INTEL_GUC_ACTION_SET_ENG_UTIL_BUFF, + offset, + 0, + }; + + return intel_guc_send(guc, action, ARRAY_SIZE(action)); +} + +static void guc_init_engine_stats(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + intel_wakeref_t wakeref; + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); + + with_intel_runtime_pm(>->i915->runtime_pm, wakeref) { + int ret = guc_action_enable_usage_stats(guc); + + if (ret) + drm_err(>->i915->drm, + "Failed to enable usage stats: %d!\n", ret); + } +} + +void intel_guc_busyness_park(struct intel_gt *gt) +{ + struct intel_guc *guc = >->uc.guc; + + if (!guc_submission_initialized(guc)) + return; + + cancel_delayed_work(&guc->timestamp.work); + __update_guc_busyness_stats(guc); +} + +void intel_guc_busyness_unpark(struct intel_gt *gt) +{ + struct intel_guc *guc = >->uc.guc; + + if (!guc_submission_initialized(guc)) + return; + + mod_delayed_work(system_highpri_wq, &guc->timestamp.work, + guc->timestamp.ping_delay); +} + static inline bool submission_disabled(struct intel_guc *guc) { @@ -1138,6 +1405,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc) intel_gt_park_heartbeats(guc_to_gt(guc)); disable_submission(guc); guc->interrupts.disable(guc); + __reset_guc_busyness_stats(guc); /* Flush IRQ handler */ spin_lock_irq(&guc_to_gt(guc)->irq_lock); @@ -1484,6 +1752,7 @@ static void destroyed_worker_func(struct work_struct *w); */ int intel_guc_submission_init(struct intel_guc *guc) { + struct intel_gt *gt = guc_to_gt(guc); int ret; if (guc->lrc_desc_pool) @@ -1512,6 +1781,10 @@ int intel_guc_submission_init(struct intel_guc *guc) if (!guc->submission_state.guc_ids_bitmap) return -ENOMEM; + spin_lock_init(&guc->timestamp.lock); + INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping); + guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; + return 0; } @@ -3080,8 +3353,8 @@ guc_create_parallel(struct intel_engine_cs **engines, ce = intel_engine_create_virtual(siblings, num_siblings, FORCE_VIRTUAL); - if (!ce) { - err = ERR_PTR(-ENOMEM); + if (IS_ERR(ce)) { + err = ERR_CAST(ce); goto unwind; } @@ -3369,7 +3642,9 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine) engine->emit_flush = gen12_emit_flush_xcs; } engine->set_default_submission = guc_set_default_submission; + engine->busyness = guc_engine_busyness; + engine->flags |= I915_ENGINE_SUPPORTS_STATS; engine->flags |= I915_ENGINE_HAS_PREEMPTION; engine->flags |= I915_ENGINE_HAS_TIMESLICES; @@ -3468,6 +3743,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine) void intel_guc_submission_enable(struct intel_guc *guc) { guc_init_lrc_mapping(guc); + guc_init_engine_stats(guc); } void intel_guc_submission_disable(struct intel_guc *guc) @@ -3695,6 +3971,7 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, const u32 *msg, u32 len) { struct intel_context *ce; + unsigned long flags; int desc_idx; if (unlikely(len != 1)) { @@ -3703,11 +3980,24 @@ int intel_guc_context_reset_process_msg(struct intel_guc *guc, } desc_idx = msg[0]; + + /* + * The context lookup uses the xarray but lookups only require an RCU lock + * not the full spinlock. So take the lock explicitly and keep it until the + * context has been reference count locked to ensure it can't be destroyed + * asynchronously until the reset is done. + */ + xa_lock_irqsave(&guc->context_lookup, flags); ce = g2h_context_lookup(guc, desc_idx); + if (ce) + intel_context_get(ce); + xa_unlock_irqrestore(&guc->context_lookup, flags); + if (unlikely(!ce)) return -EPROTO; guc_handle_context_reset(guc, ce); + intel_context_put(ce); return 0; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index c7ef44fa0c36..5a95a9f0a8e3 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -28,6 +28,8 @@ void intel_guc_submission_print_context_info(struct intel_guc *guc, void intel_guc_dump_active_requests(struct intel_engine_cs *engine, struct i915_request *hung_rq, struct drm_printer *m); +void intel_guc_busyness_park(struct intel_gt *gt); +void intel_guc_busyness_unpark(struct intel_gt *gt); bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 53d0cb327539..99d1781fa5f0 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -446,17 +446,17 @@ static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e) || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) return (e->val64 != 0); else - return (e->val64 & _PAGE_PRESENT); + return (e->val64 & GEN8_PAGE_PRESENT); } static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) { - e->val64 &= ~_PAGE_PRESENT; + e->val64 &= ~GEN8_PAGE_PRESENT; } static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e) { - e->val64 |= _PAGE_PRESENT; + e->val64 |= GEN8_PAGE_PRESENT; } static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e) @@ -2439,7 +2439,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, /* The entry parameters like present/writeable/cache type * set to the same as i915's scratch page tree. */ - se.val64 |= _PAGE_PRESENT | _PAGE_RW; + se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW; if (type == GTT_TYPE_PPGTT_PDE_PT) se.val64 |= PPAT_CACHED; @@ -2896,7 +2896,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt) offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; for (idx = 0; idx < num_low; idx++) { pte = mm->ggtt_mm.host_ggtt_aperture[idx]; - if (pte & _PAGE_PRESENT) + if (pte & GEN8_PAGE_PRESENT) write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); } @@ -2904,7 +2904,7 @@ void intel_gvt_restore_ggtt(struct intel_gvt *gvt) offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; for (idx = 0; idx < num_hi; idx++) { pte = mm->ggtt_mm.host_ggtt_hidden[idx]; - if (pte & _PAGE_PRESENT) + if (pte & GEN8_PAGE_PRESENT) write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte); } } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index fe638b5da7c0..390d541f64ea 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -65,6 +65,7 @@ static int i915_capabilities(struct seq_file *m, void *data) intel_device_info_print_static(INTEL_INFO(i915), &p); intel_device_info_print_runtime(RUNTIME_INFO(i915), &p); + i915_print_iommu_status(i915, &p); intel_gt_info_print(&i915->gt.info, &p); intel_driver_caps_print(&i915->caps, &p); diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_driver.c index b18a250e5d2e..bbc99fc5888f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -29,8 +29,8 @@ #include <linux/acpi.h> #include <linux/device.h> -#include <linux/oom.h> #include <linux/module.h> +#include <linux/oom.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/pm_runtime.h> @@ -48,12 +48,14 @@ #include "display/intel_acpi.h" #include "display/intel_bw.h" #include "display/intel_cdclk.h" -#include "display/intel_dmc.h" #include "display/intel_display_types.h" +#include "display/intel_dmc.h" #include "display/intel_dp.h" +#include "display/intel_dpt.h" #include "display/intel_fbdev.h" #include "display/intel_hotplug.h" #include "display/intel_overlay.h" +#include "display/intel_pch_refclk.h" #include "display/intel_pipe_crc.h" #include "display/intel_pps.h" #include "display/intel_sprite.h" @@ -70,6 +72,7 @@ #include "pxp/intel_pxp_pm.h" #include "i915_debugfs.h" +#include "i915_driver.h" #include "i915_drv.h" #include "i915_ioc32.h" #include "i915_irq.h" @@ -89,7 +92,7 @@ #include "intel_region_ttm.h" #include "vlv_suspend.h" -static const struct drm_driver driver; +static const struct drm_driver i915_drm_driver; static int i915_get_bridge_dev(struct drm_i915_private *dev_priv) { @@ -322,7 +325,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->sb_lock); cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE); - mutex_init(&dev_priv->av_mutex); + mutex_init(&dev_priv->audio.mutex); mutex_init(&dev_priv->wm.wm_mutex); mutex_init(&dev_priv->pps_mutex); mutex_init(&dev_priv->hdcp_comp_mutex); @@ -415,10 +418,14 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) if (ret < 0) return ret; - ret = intel_uncore_init_mmio(&dev_priv->uncore); + ret = intel_uncore_setup_mmio(&dev_priv->uncore); if (ret < 0) goto err_bridge; + ret = intel_uncore_init_mmio(&dev_priv->uncore); + if (ret) + goto err_mmio; + /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev_priv); intel_device_info_runtime_init(dev_priv); @@ -435,6 +442,8 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) err_uncore: intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); +err_mmio: + intel_uncore_cleanup_mmio(&dev_priv->uncore); err_bridge: pci_dev_put(dev_priv->bridge_dev); @@ -449,6 +458,7 @@ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) { intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); + intel_uncore_cleanup_mmio(&dev_priv->uncore); pci_dev_put(dev_priv->bridge_dev); } @@ -731,6 +741,12 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) i915_gem_driver_unregister(dev_priv); } +void +i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p) +{ + drm_printf(p, "iommu: %s\n", enableddisabled(intel_vtd_active(i915))); +} + static void i915_welcome_messages(struct drm_i915_private *dev_priv) { if (drm_debug_enabled(DRM_UT_DRIVER)) { @@ -746,6 +762,7 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv) intel_device_info_print_static(INTEL_INFO(dev_priv), &p); intel_device_info_print_runtime(RUNTIME_INFO(dev_priv), &p); + i915_print_iommu_status(dev_priv, &p); intel_gt_info_print(&dev_priv->gt.info, &p); } @@ -766,7 +783,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent) struct intel_device_info *device_info; struct drm_i915_private *i915; - i915 = devm_drm_dev_alloc(&pdev->dev, &driver, + i915 = devm_drm_dev_alloc(&pdev->dev, &i915_drm_driver, struct drm_i915_private, drm); if (IS_ERR(i915)) return i915; @@ -807,7 +824,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return PTR_ERR(i915); /* Disable nuclear pageflip by default on pre-ILK */ - if (!i915->params.nuclear_pageflip && match_info->graphics_ver < 5) + if (!i915->params.nuclear_pageflip && match_info->graphics.ver < 5) i915->drm.driver_features &= ~DRIVER_ATOMIC; /* @@ -1127,6 +1144,8 @@ static int i915_drm_suspend(struct drm_device *dev) intel_suspend_hw(dev_priv); + /* Must be called before GGTT is suspended. */ + intel_dpt_suspend(dev_priv); i915_ggtt_suspend(&dev_priv->ggtt); i915_save_display(dev_priv); @@ -1183,6 +1202,14 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) goto out; } + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM and on s2idle cases. + */ + if (suspend_to_idle(dev_priv)) + pci_d3cold_disable(pdev); + pci_disable_device(pdev); /* * During hibernation on some platforms the BIOS may try to access @@ -1207,7 +1234,8 @@ out: return ret; } -int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) +int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, + pm_message_t state) { int error; @@ -1243,6 +1271,8 @@ static int i915_drm_resume(struct drm_device *dev) drm_err(&dev_priv->drm, "failed to re-enable GGTT\n"); i915_ggtt_resume(&dev_priv->ggtt); + /* Must be called after GGTT is resumed. */ + intel_dpt_resume(dev_priv); intel_dmc_ucode_resume(dev_priv); @@ -1344,6 +1374,8 @@ static int i915_drm_resume_early(struct drm_device *dev) pci_set_master(pdev); + pci_d3cold_enable(pdev); + disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); ret = vlv_resume_prepare(dev_priv, false); @@ -1364,7 +1396,7 @@ static int i915_drm_resume_early(struct drm_device *dev) return ret; } -int i915_resume_switcheroo(struct drm_i915_private *i915) +int i915_driver_resume_switcheroo(struct drm_i915_private *i915) { int ret; @@ -1520,6 +1552,7 @@ static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) @@ -1565,6 +1598,12 @@ static int intel_runtime_suspend(struct device *kdev) drm_err(&dev_priv->drm, "Unclaimed access detected prior to suspending\n"); + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM and on s2idle cases. + */ + pci_d3cold_disable(pdev); rpm->suspended = true; /* @@ -1603,6 +1642,7 @@ static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); int ret; if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv))) @@ -1615,6 +1655,7 @@ static int intel_runtime_resume(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D0); rpm->suspended = false; + pci_d3cold_enable(pdev); if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) drm_dbg(&dev_priv->drm, "Unclaimed access during suspend, bios?\n"); @@ -1777,7 +1818,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW), }; -static const struct drm_driver driver = { +static const struct drm_driver i915_drm_driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. */ diff --git a/drivers/gpu/drm/i915/i915_driver.h b/drivers/gpu/drm/i915/i915_driver.h new file mode 100644 index 000000000000..9ef8db4aa0a6 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_driver.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_DRIVER_H__ +#define __I915_DRIVER_H__ + +#include <linux/pm.h> + +struct pci_dev; +struct pci_device_id; +struct drm_i915_private; + +extern const struct dev_pm_ops i915_pm_ops; + +int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +void i915_driver_remove(struct drm_i915_private *i915); +void i915_driver_shutdown(struct drm_i915_private *i915); + +int i915_driver_resume_switcheroo(struct drm_i915_private *i915); +int i915_driver_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); + +#endif /* __I915_DRIVER_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 12256218634f..d99e020773ac 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -50,7 +50,6 @@ #include <linux/stackdepot.h> #include <linux/xarray.h> -#include <drm/intel-gtt.h> #include <drm/drm_gem.h> #include <drm/drm_auth.h> #include <drm/drm_cache.h> @@ -191,8 +190,6 @@ struct i915_hotplug { I915_GEM_DOMAIN_VERTEX) struct drm_i915_private; -struct i915_mm_struct; -struct i915_mmu_object; struct drm_i915_file_private { struct drm_i915_private *dev_priv; @@ -364,15 +361,6 @@ struct intel_color_funcs { void (*read_luts)(struct intel_crtc_state *crtc_state); }; -struct intel_audio_funcs { - void (*audio_codec_enable)(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); - void (*audio_codec_disable)(struct intel_encoder *encoder, - const struct intel_crtc_state *old_crtc_state, - const struct drm_connector_state *old_conn_state); -}; - struct intel_cdclk_funcs { void (*get_cdclk)(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config); @@ -411,10 +399,14 @@ struct drm_i915_display_funcs { void (*commit_modeset_enables)(struct intel_atomic_state *state); }; +struct intel_fbc_funcs; #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */ struct intel_fbc { + struct drm_i915_private *i915; + const struct intel_fbc_funcs *funcs; + /* This is always the inner lock when overlapping with struct_mutex and * it's the outer lock when overlapping with stolen_lock. */ struct mutex lock; @@ -828,6 +820,30 @@ struct i915_selftest_stash { struct ida mock_region_instances; }; +/* intel_audio.c private */ +struct intel_audio_funcs; +struct intel_audio_private { + /* Display internal audio functions */ + const struct intel_audio_funcs *funcs; + + /* hda/i915 audio component */ + struct i915_audio_component *component; + bool component_registered; + /* mutex for audio/video sync */ + struct mutex mutex; + int power_refcount; + u32 freq_cntrl; + + /* Used to save the pipe-to-encoder mapping for audio */ + struct intel_encoder *encoder_map[I915_MAX_PIPES]; + + /* necessary resource sharing with HDMI LPE audio driver. */ + struct { + struct platform_device *platdev; + int irq; + } lpe; +}; + struct drm_i915_private { struct drm_device drm; @@ -995,9 +1011,6 @@ struct drm_i915_private { /* Display internal color functions */ const struct intel_color_funcs *color_funcs; - /* Display internal audio functions */ - const struct intel_audio_funcs *audio_funcs; - /* Display CDCLK functions */ const struct intel_cdclk_funcs *cdclk_funcs; @@ -1084,17 +1097,6 @@ struct drm_i915_private { struct drm_property *broadcast_rgb_property; struct drm_property *force_audio_property; - /* hda/i915 audio component */ - struct i915_audio_component *audio_component; - bool audio_component_registered; - /** - * av_mutex - mutex for audio/video sync - * - */ - struct mutex av_mutex; - int audio_power_refcount; - u32 audio_freq_cntrl; - u32 fdi_rx_config; /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ @@ -1227,14 +1229,7 @@ struct drm_i915_private { bool ipc_enabled; - /* Used to save the pipe-to-encoder mapping for audio */ - struct intel_encoder *av_enc_map[I915_MAX_PIPES]; - - /* necessary resource sharing with HDMI LPE audio driver. */ - struct { - struct platform_device *platdev; - int irq; - } lpe_audio; + struct intel_audio_private audio; struct i915_pmu pmu; @@ -1327,15 +1322,15 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) #define IP_VER(ver, rel) ((ver) << 8 | (rel)) -#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics_ver) -#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics_ver, \ - INTEL_INFO(i915)->graphics_rel) +#define GRAPHICS_VER(i915) (INTEL_INFO(i915)->graphics.ver) +#define GRAPHICS_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->graphics.ver, \ + INTEL_INFO(i915)->graphics.rel) #define IS_GRAPHICS_VER(i915, from, until) \ (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until)) -#define MEDIA_VER(i915) (INTEL_INFO(i915)->media_ver) -#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media_ver, \ - INTEL_INFO(i915)->media_rel) +#define MEDIA_VER(i915) (INTEL_INFO(i915)->media.ver) +#define MEDIA_VER_FULL(i915) IP_VER(INTEL_INFO(i915)->media.arch, \ + INTEL_INFO(i915)->media.rel) #define IS_MEDIA_VER(i915, from, until) \ (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until)) @@ -1348,15 +1343,20 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) #define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb) #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step) -#define INTEL_GT_STEP(__i915) (RUNTIME_INFO(__i915)->step.gt_step) +#define INTEL_GRAPHICS_STEP(__i915) (RUNTIME_INFO(__i915)->step.graphics_step) +#define INTEL_MEDIA_STEP(__i915) (RUNTIME_INFO(__i915)->step.media_step) #define IS_DISPLAY_STEP(__i915, since, until) \ (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \ INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until)) -#define IS_GT_STEP(__i915, since, until) \ - (drm_WARN_ON(&(__i915)->drm, INTEL_GT_STEP(__i915) == STEP_NONE), \ - INTEL_GT_STEP(__i915) >= (since) && INTEL_GT_STEP(__i915) < (until)) +#define IS_GRAPHICS_STEP(__i915, since, until) \ + (drm_WARN_ON(&(__i915)->drm, INTEL_GRAPHICS_STEP(__i915) == STEP_NONE), \ + INTEL_GRAPHICS_STEP(__i915) >= (since) && INTEL_GRAPHICS_STEP(__i915) < (until)) + +#define IS_MEDIA_STEP(__i915, since, until) \ + (drm_WARN_ON(&(__i915)->drm, INTEL_MEDIA_STEP(__i915) == STEP_NONE), \ + INTEL_MEDIA_STEP(__i915) >= (since) && INTEL_MEDIA_STEP(__i915) < (until)) static __always_inline unsigned int __platform_mask_index(const struct intel_runtime_info *info, @@ -1455,7 +1455,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE) #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE) #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE) -#define IS_CANNONLAKE(dev_priv) 0 #define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE) #define IS_JSL_EHL(dev_priv) (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \ IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)) @@ -1530,15 +1529,15 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_TGL_Y(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX) -#define IS_SKL_GT_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GT_STEP(p, since, until)) +#define IS_SKL_GRAPHICS_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GRAPHICS_STEP(p, since, until)) -#define IS_KBL_GT_STEP(dev_priv, since, until) \ - (IS_KABYLAKE(dev_priv) && IS_GT_STEP(dev_priv, since, until)) +#define IS_KBL_GRAPHICS_STEP(dev_priv, since, until) \ + (IS_KABYLAKE(dev_priv) && IS_GRAPHICS_STEP(dev_priv, since, until)) #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \ (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until)) -#define IS_JSL_EHL_GT_STEP(p, since, until) \ - (IS_JSL_EHL(p) && IS_GT_STEP(p, since, until)) +#define IS_JSL_EHL_GRAPHICS_STEP(p, since, until) \ + (IS_JSL_EHL(p) && IS_GRAPHICS_STEP(p, since, until)) #define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \ (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until)) @@ -1546,19 +1545,19 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_TIGERLAKE(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_TGL_UY_GT_STEP(__i915, since, until) \ +#define IS_TGL_UY_GRAPHICS_STEP(__i915, since, until) \ ((IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) -#define IS_TGL_GT_STEP(__i915, since, until) \ +#define IS_TGL_GRAPHICS_STEP(__i915, since, until) \ (IS_TIGERLAKE(__i915) && !(IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) #define IS_RKL_DISPLAY_STEP(p, since, until) \ (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until)) -#define IS_DG1_GT_STEP(p, since, until) \ - (IS_DG1(p) && IS_GT_STEP(p, since, until)) +#define IS_DG1_GRAPHICS_STEP(p, since, until) \ + (IS_DG1(p) && IS_GRAPHICS_STEP(p, since, until)) #define IS_DG1_DISPLAY_STEP(p, since, until) \ (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until)) @@ -1566,20 +1565,20 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, (IS_ALDERLAKE_S(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_ADLS_GT_STEP(__i915, since, until) \ +#define IS_ADLS_GRAPHICS_STEP(__i915, since, until) \ (IS_ALDERLAKE_S(__i915) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) #define IS_ADLP_DISPLAY_STEP(__i915, since, until) \ (IS_ALDERLAKE_P(__i915) && \ IS_DISPLAY_STEP(__i915, since, until)) -#define IS_ADLP_GT_STEP(__i915, since, until) \ +#define IS_ADLP_GRAPHICS_STEP(__i915, since, until) \ (IS_ALDERLAKE_P(__i915) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) -#define IS_XEHPSDV_GT_STEP(__i915, since, until) \ - (IS_XEHPSDV(__i915) && IS_GT_STEP(__i915, since, until)) +#define IS_XEHPSDV_GRAPHICS_STEP(__i915, since, until) \ + (IS_XEHPSDV(__i915) && IS_GRAPHICS_STEP(__i915, since, until)) /* * DG2 hardware steppings are a bit unusual. The hardware design was forked @@ -1595,9 +1594,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, * and stepping-specific logic will be applied with a general DG2-wide stepping * number. */ -#define IS_DG2_GT_STEP(__i915, variant, since, until) \ +#define IS_DG2_GRAPHICS_STEP(__i915, variant, since, until) \ (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \ - IS_GT_STEP(__i915, since, until)) + IS_GRAPHICS_STEP(__i915, since, until)) #define IS_DG2_DISP_STEP(__i915, since, until) \ (IS_DG2(__i915) && \ @@ -1745,7 +1744,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0) -#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12) +#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11) #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) @@ -1761,26 +1760,27 @@ static inline bool run_as_guest(void) #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \ IS_ALDERLAKE_S(dev_priv)) -static inline bool intel_vtd_active(void) +static inline bool intel_vtd_active(struct drm_i915_private *i915) { -#ifdef CONFIG_INTEL_IOMMU - if (intel_iommu_gfx_mapped) + if (device_iommu_mapped(i915->drm.dev)) return true; -#endif /* Running as a guest, we assume the host is enforcing VT'd */ return run_as_guest(); } +void +i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p); + static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) { - return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(); + return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv); } static inline bool intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *i915) { - return IS_BROXTON(i915) && intel_vtd_active(); + return IS_BROXTON(i915) && intel_vtd_active(i915); } static inline bool @@ -1789,16 +1789,7 @@ intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); } -/* i915_drv.c */ -extern const struct dev_pm_ops i915_pm_ops; - -int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); -void i915_driver_remove(struct drm_i915_private *i915); -void i915_driver_shutdown(struct drm_i915_private *i915); - -int i915_resume_switcheroo(struct drm_i915_private *i915); -int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); - +/* i915_getparam.c */ int i915_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -1819,6 +1810,7 @@ static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) */ while (atomic_read(&i915->mm.free_count)) { flush_work(&i915->mm.free_work); + flush_delayed_work(&i915->bdev.wq); rcu_barrier(); } } @@ -1933,6 +1925,10 @@ int i915_gem_evict_vm(struct i915_address_space *vm); struct drm_i915_gem_object * i915_gem_object_create_internal(struct drm_i915_private *dev_priv, phys_addr_t size); +struct drm_i915_gem_object * +__i915_gem_object_create_internal(struct drm_i915_private *dev_priv, + const struct drm_i915_gem_object_ops *ops, + phys_addr_t size); /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 981e383d1a5d..527228d4da7e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -764,7 +764,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * perspective, requiring manual detiling by the client. */ if (!i915_gem_object_has_struct_page(obj) || - cpu_write_needs_clflush(obj)) + i915_gem_cpu_write_needs_clflush(obj)) /* Note that the gtt paths might fail with non-page-backed user * pointers (e.g. gtt mappings when moving data between * textures). Fallback to the shmem path in that case. @@ -1005,7 +1005,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, obj->ops->adjust_lru(obj); } - if (i915_gem_object_has_pages(obj)) { + if (i915_gem_object_has_pages(obj) || + i915_gem_object_has_self_managed_shrink_list(obj)) { unsigned long flags; spin_lock_irqsave(&i915->mm.obj_lock, flags); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 2a2d7643b551..96d2d99f5b98 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -48,8 +48,9 @@ #include "i915_gpu_error.h" #include "i915_memcpy.h" #include "i915_scatterlist.h" +#include "i915_vma_snapshot.h" -#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) +#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN) #define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN) static void __sg_set_buf(struct scatterlist *sg, @@ -275,16 +276,16 @@ static bool compress_start(struct i915_vma_compress *c) static void *compress_next_page(struct i915_vma_compress *c, struct i915_vma_coredump *dst) { - void *page; + void *page_addr; + struct page *page; - if (dst->page_count >= dst->num_pages) - return ERR_PTR(-ENOSPC); - - page = pool_alloc(&c->pool, ALLOW_FAIL); - if (!page) + page_addr = pool_alloc(&c->pool, ALLOW_FAIL); + if (!page_addr) return ERR_PTR(-ENOMEM); - return dst->pages[dst->page_count++] = page; + page = virt_to_page(page_addr); + list_add_tail(&page->lru, &dst->page_list); + return page_addr; } static int compress_page(struct i915_vma_compress *c, @@ -397,7 +398,7 @@ static int compress_page(struct i915_vma_compress *c, if (!(wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) memcpy(ptr, src, PAGE_SIZE); - dst->pages[dst->page_count++] = ptr; + list_add_tail(&virt_to_page(ptr)->lru, &dst->page_list); cond_resched(); return 0; @@ -614,7 +615,7 @@ static void print_error_vma(struct drm_i915_error_state_buf *m, const struct i915_vma_coredump *vma) { char out[ASCII85_BUFSZ]; - int page; + struct page *page; if (!vma) return; @@ -628,16 +629,17 @@ static void print_error_vma(struct drm_i915_error_state_buf *m, err_printf(m, "gtt_page_sizes = 0x%08x\n", vma->gtt_page_sizes); err_compression_marker(m); - for (page = 0; page < vma->page_count; page++) { + list_for_each_entry(page, &vma->page_list, lru) { int i, len; + const u32 *addr = page_address(page); len = PAGE_SIZE; - if (page == vma->page_count - 1) + if (page == list_last_entry(&vma->page_list, typeof(*page), lru)) len -= vma->unused; len = ascii85_encode_len(len); for (i = 0; i < len; i++) - err_puts(m, ascii85_encode(vma->pages[page][i], out)); + err_puts(m, ascii85_encode(addr[i], out)); } err_puts(m, "\n"); } @@ -946,10 +948,12 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma) { while (vma) { struct i915_vma_coredump *next = vma->next; - int page; + struct page *page, *n; - for (page = 0; page < vma->page_count; page++) - free_page((unsigned long)vma->pages[page]); + list_for_each_entry_safe(page, n, &vma->page_list, lru) { + list_del_init(&page->lru); + __free_page(page); + } kfree(vma); vma = next; @@ -1009,25 +1013,21 @@ void __i915_gpu_coredump_free(struct kref *error_ref) static struct i915_vma_coredump * i915_vma_coredump_create(const struct intel_gt *gt, - const struct i915_vma *vma, - const char *name, + const struct i915_vma_snapshot *vsnap, struct i915_vma_compress *compress) { struct i915_ggtt *ggtt = gt->ggtt; const u64 slot = ggtt->error_capture.start; struct i915_vma_coredump *dst; - unsigned long num_pages; struct sgt_iter iter; int ret; might_sleep(); - if (!vma || !vma->pages || !compress) + if (!vsnap || !vsnap->pages || !compress) return NULL; - num_pages = min_t(u64, vma->size, vma->obj->base.size) >> PAGE_SHIFT; - num_pages = DIV_ROUND_UP(10 * num_pages, 8); /* worstcase zlib growth */ - dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), ALLOW_FAIL); + dst = kmalloc(sizeof(*dst), ALLOW_FAIL); if (!dst) return NULL; @@ -1036,14 +1036,13 @@ i915_vma_coredump_create(const struct intel_gt *gt, return NULL; } - strcpy(dst->name, name); + INIT_LIST_HEAD(&dst->page_list); + strcpy(dst->name, vsnap->name); dst->next = NULL; - dst->gtt_offset = vma->node.start; - dst->gtt_size = vma->node.size; - dst->gtt_page_sizes = vma->page_sizes.gtt; - dst->num_pages = num_pages; - dst->page_count = 0; + dst->gtt_offset = vsnap->gtt_offset; + dst->gtt_size = vsnap->gtt_size; + dst->gtt_page_sizes = vsnap->page_sizes; dst->unused = 0; ret = -EINVAL; @@ -1051,7 +1050,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, void __iomem *s; dma_addr_t dma; - for_each_sgt_daddr(dma, iter, vma->pages) { + for_each_sgt_daddr(dma, iter, vsnap->pages) { mutex_lock(&ggtt->error_mutex); ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); @@ -1069,11 +1068,11 @@ i915_vma_coredump_create(const struct intel_gt *gt, if (ret) break; } - } else if (__i915_gem_object_is_lmem(vma->obj)) { - struct intel_memory_region *mem = vma->obj->mm.region; + } else if (vsnap->mr && vsnap->mr->type != INTEL_MEMORY_SYSTEM) { + struct intel_memory_region *mem = vsnap->mr; dma_addr_t dma; - for_each_sgt_daddr(dma, iter, vma->pages) { + for_each_sgt_daddr(dma, iter, vsnap->pages) { void __iomem *s; s = io_mapping_map_wc(&mem->iomap, @@ -1089,7 +1088,7 @@ i915_vma_coredump_create(const struct intel_gt *gt, } else { struct page *page; - for_each_sgt_page(page, iter, vma->pages) { + for_each_sgt_page(page, iter, vsnap->pages) { void *s; drm_clflush_pages(&page, 1); @@ -1106,8 +1105,13 @@ i915_vma_coredump_create(const struct intel_gt *gt, } if (ret || compress_flush(compress, dst)) { - while (dst->page_count--) - pool_free(&compress->pool, dst->pages[dst->page_count]); + struct page *page, *n; + + list_for_each_entry_safe_reverse(page, n, &dst->page_list, lru) { + list_del_init(&page->lru); + pool_free(&compress->pool, page_address(page)); + } + kfree(dst); dst = NULL; } @@ -1320,38 +1324,72 @@ static bool record_context(struct i915_gem_context_coredump *e, struct intel_engine_capture_vma { struct intel_engine_capture_vma *next; - struct i915_vma *vma; + struct i915_vma_snapshot *vsnap; char name[16]; + bool lockdep_cookie; }; static struct intel_engine_capture_vma * -capture_vma(struct intel_engine_capture_vma *next, - struct i915_vma *vma, - const char *name, - gfp_t gfp) +capture_vma_snapshot(struct intel_engine_capture_vma *next, + struct i915_vma_snapshot *vsnap, + gfp_t gfp) { struct intel_engine_capture_vma *c; - if (!vma) + if (!i915_vma_snapshot_present(vsnap)) return next; c = kmalloc(sizeof(*c), gfp); if (!c) return next; - if (!i915_active_acquire_if_busy(&vma->active)) { + if (!i915_vma_snapshot_resource_pin(vsnap, &c->lockdep_cookie)) { kfree(c); return next; } - strcpy(c->name, name); - c->vma = vma; /* reference held while active */ + strcpy(c->name, vsnap->name); + c->vsnap = vsnap; + i915_vma_snapshot_get(vsnap); c->next = next; return c; } static struct intel_engine_capture_vma * +capture_vma(struct intel_engine_capture_vma *next, + struct i915_vma *vma, + const char *name, + gfp_t gfp) +{ + struct i915_vma_snapshot *vsnap; + + if (!vma) + return next; + + /* + * If the vma isn't pinned, then the vma should be snapshotted + * to a struct i915_vma_snapshot at command submission time. + * Not here. + */ + GEM_WARN_ON(!i915_vma_is_pinned(vma)); + if (!i915_vma_is_pinned(vma)) + return next; + + vsnap = i915_vma_snapshot_alloc(gfp); + if (!vsnap) + return next; + + i915_vma_snapshot_init(vsnap, vma, name); + next = capture_vma_snapshot(next, vsnap, gfp); + + /* FIXME: Replace on async unbind. */ + i915_vma_snapshot_put(vsnap); + + return next; +} + +static struct intel_engine_capture_vma * capture_user(struct intel_engine_capture_vma *capture, const struct i915_request *rq, gfp_t gfp) @@ -1359,7 +1397,7 @@ capture_user(struct intel_engine_capture_vma *capture, struct i915_capture_list *c; for (c = rq->capture_list; c; c = c->next) - capture = capture_vma(capture, c->vma, "user", gfp); + capture = capture_vma_snapshot(capture, c->vma_snapshot, gfp); return capture; } @@ -1373,6 +1411,36 @@ static void add_vma(struct intel_engine_coredump *ee, } } +static struct i915_vma_coredump * +create_vma_coredump(const struct intel_gt *gt, struct i915_vma *vma, + const char *name, struct i915_vma_compress *compress) +{ + struct i915_vma_coredump *ret = NULL; + struct i915_vma_snapshot tmp; + bool lockdep_cookie; + + if (!vma) + return NULL; + + i915_vma_snapshot_init_onstack(&tmp, vma, name); + if (i915_vma_snapshot_resource_pin(&tmp, &lockdep_cookie)) { + ret = i915_vma_coredump_create(gt, &tmp, compress); + i915_vma_snapshot_resource_unpin(&tmp, lockdep_cookie); + } + i915_vma_snapshot_put_onstack(&tmp); + + return ret; +} + +static void add_vma_coredump(struct intel_engine_coredump *ee, + const struct intel_gt *gt, + struct i915_vma *vma, + const char *name, + struct i915_vma_compress *compress) +{ + add_vma(ee, create_vma_coredump(gt, vma, name, compress)); +} + struct intel_engine_coredump * intel_engine_coredump_alloc(struct intel_engine_cs *engine, gfp_t gfp) { @@ -1406,7 +1474,7 @@ intel_engine_coredump_add_request(struct intel_engine_coredump *ee, * as the simplest method to avoid being overwritten * by userspace. */ - vma = capture_vma(vma, rq->batch, "batch", gfp); + vma = capture_vma_snapshot(vma, &rq->batch_snapshot, gfp); vma = capture_user(vma, rq, gfp); vma = capture_vma(vma, rq->ring->vma, "ring", gfp); vma = capture_vma(vma, rq->context->state, "HW context", gfp); @@ -1427,30 +1495,24 @@ intel_engine_coredump_add_vma(struct intel_engine_coredump *ee, while (capture) { struct intel_engine_capture_vma *this = capture; - struct i915_vma *vma = this->vma; + struct i915_vma_snapshot *vsnap = this->vsnap; add_vma(ee, i915_vma_coredump_create(engine->gt, - vma, this->name, - compress)); + vsnap, compress)); - i915_active_release(&vma->active); + i915_vma_snapshot_resource_unpin(vsnap, this->lockdep_cookie); + i915_vma_snapshot_put(vsnap); capture = this->next; kfree(this); } - add_vma(ee, - i915_vma_coredump_create(engine->gt, - engine->status_page.vma, - "HW Status", - compress)); + add_vma_coredump(ee, engine->gt, engine->status_page.vma, + "HW Status", compress); - add_vma(ee, - i915_vma_coredump_create(engine->gt, - engine->wa_ctx.vma, - "WA context", - compress)); + add_vma_coredump(ee, engine->gt, engine->wa_ctx.vma, + "WA context", compress); } static struct intel_engine_coredump * @@ -1486,17 +1548,25 @@ capture_engine(struct intel_engine_cs *engine, } } if (rq) - capture = intel_engine_coredump_add_request(ee, rq, - ATOMIC_MAYFAIL); + rq = i915_request_get_rcu(rq); + + if (!rq) + goto no_request_capture; + + capture = intel_engine_coredump_add_request(ee, rq, ATOMIC_MAYFAIL); if (!capture) { -no_request_capture: - kfree(ee); - return NULL; + i915_request_put(rq); + goto no_request_capture; } intel_engine_coredump_add_vma(ee, capture, compress); + i915_request_put(rq); return ee; + +no_request_capture: + kfree(ee); + return NULL; } static void @@ -1550,10 +1620,8 @@ gt_record_uc(struct intel_gt_coredump *gt, */ error_uc->guc_fw.path = kstrdup(uc->guc.fw.path, ALLOW_FAIL); error_uc->huc_fw.path = kstrdup(uc->huc.fw.path, ALLOW_FAIL); - error_uc->guc_log = - i915_vma_coredump_create(gt->_gt, - uc->guc.log.vma, "GuC log buffer", - compress); + error_uc->guc_log = create_vma_coredump(gt->_gt, uc->guc.log.vma, + "GuC log buffer", compress); return error_uc; } @@ -1750,10 +1818,7 @@ static void capture_gen(struct i915_gpu_coredump *error) error->wakelock = atomic_read(&i915->runtime_pm.wakeref_count); error->suspended = i915->runtime_pm.suspended; - error->iommu = -1; -#ifdef CONFIG_INTEL_IOMMU - error->iommu = intel_iommu_gfx_mapped; -#endif + error->iommu = intel_vtd_active(i915); error->reset_count = i915_reset_count(&i915->gpu_error); error->suspend_count = i915->suspend_count; @@ -1839,8 +1904,8 @@ void i915_vma_capture_finish(struct intel_gt_coredump *gt, kfree(compress); } -struct i915_gpu_coredump * -i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) +static struct i915_gpu_coredump * +__i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) { struct drm_i915_private *i915 = gt->i915; struct i915_gpu_coredump *error; @@ -1881,6 +1946,22 @@ i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) return error; } +struct i915_gpu_coredump * +i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask) +{ + static DEFINE_MUTEX(capture_mutex); + int ret = mutex_lock_interruptible(&capture_mutex); + struct i915_gpu_coredump *dump; + + if (ret) + return ERR_PTR(ret); + + dump = __i915_gpu_coredump(gt, engine_mask); + mutex_unlock(&capture_mutex); + + return dump; +} + void i915_error_state_store(struct i915_gpu_coredump *error) { struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index b98d8cdbe4f2..5aedf5129814 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -39,10 +39,8 @@ struct i915_vma_coredump { u64 gtt_size; u32 gtt_page_sizes; - int num_pages; - int page_count; int unused; - u32 *pages[]; + struct list_head page_list; }; struct i915_request_coredump { diff --git a/drivers/gpu/drm/i915/i915_iosf_mbi.h b/drivers/gpu/drm/i915/i915_iosf_mbi.h new file mode 100644 index 000000000000..8f81b7603d37 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_iosf_mbi.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_IOSF_MBI_H__ +#define __I915_IOSF_MBI_H__ + +#if IS_ENABLED(CONFIG_IOSF_MBI) +#include <asm/iosf_mbi.h> +#else + +/* Stubs to compile for all non-x86 archs */ +#define MBI_PMIC_BUS_ACCESS_BEGIN 1 +#define MBI_PMIC_BUS_ACCESS_END 2 + +struct notifier_block; + +static inline void iosf_mbi_punit_acquire(void) {} +static inline void iosf_mbi_punit_release(void) {} +static inline void iosf_mbi_assert_punit_acquired(void) {} + +static inline +int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int +iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(struct notifier_block *nb) +{ + return 0; +} + +static inline +int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb) +{ + return 0; +} +#endif + +#endif /* __I915_IOSF_MBI_H__ */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 77680bca46ee..6aea159abc50 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -2772,7 +2772,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = arg; struct intel_gt *gt = &i915->gt; - void __iomem * const regs = i915->uncore.regs; + void __iomem * const regs = gt->uncore->regs; u32 master_tile_ctl, master_ctl; u32 gu_misc_iir; @@ -3016,7 +3016,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) if (IS_CHERRYVIEW(dev_priv)) intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else - intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); + intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); @@ -3173,11 +3173,12 @@ static void gen11_display_irq_reset(struct drm_i915_private *dev_priv) static void gen11_irq_reset(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; gen11_master_intr_disable(dev_priv->uncore.regs); - gen11_gt_irq_reset(&dev_priv->gt); + gen11_gt_irq_reset(gt); gen11_display_irq_reset(dev_priv); GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); @@ -3186,11 +3187,12 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) static void dg1_irq_reset(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; dg1_master_intr_disable(dev_priv->uncore.regs); - gen11_gt_irq_reset(&dev_priv->gt); + gen11_gt_irq_reset(gt); gen11_display_irq_reset(dev_priv); GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_); @@ -3869,13 +3871,14 @@ static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) icp_irq_postinstall(dev_priv); - gen11_gt_irq_postinstall(&dev_priv->gt); + gen11_gt_irq_postinstall(gt); gen11_de_irq_postinstall(dev_priv); GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); @@ -3886,10 +3889,11 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) { - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = &dev_priv->gt; + struct intel_uncore *uncore = gt->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; - gen11_gt_irq_postinstall(&dev_priv->gt); + gen11_gt_irq_postinstall(gt); GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); @@ -3900,8 +3904,8 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) GEN11_DISPLAY_IRQ_ENABLE); } - dg1_master_intr_enable(dev_priv->uncore.regs); - intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_TILE_INTR); + dg1_master_intr_enable(uncore->regs); + intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); } static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c index 1df66bf276ce..f6bcd2f89257 100644 --- a/drivers/gpu/drm/i915/i915_module.c +++ b/drivers/gpu/drm/i915/i915_module.c @@ -24,8 +24,8 @@ static int i915_check_nomodeset(void) /* * Enable KMS by default, unless explicitly overriden by - * either the i915.modeset prarameter or by the - * vga_text_mode_force boot option. + * either the i915.modeset parameter or by the + * nomodeset boot option. */ if (i915_modparams.modeset == 0) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 169837de395d..f01cba4ec283 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -27,13 +27,14 @@ #include <drm/drm_drv.h> #include <drm/i915_pciids.h> +#include "i915_driver.h" #include "i915_drv.h" #include "i915_pci.h" #define PLATFORM(x) .platform = (x) #define GEN(x) \ - .graphics_ver = (x), \ - .media_ver = (x), \ + .graphics.ver = (x), \ + .media.ver = (x), \ .display.ver = (x) #define I845_PIPE_OFFSETS \ @@ -145,6 +146,12 @@ .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ DRM_COLOR_LUT_EQUAL_CHANNELS, \ } +#define ICL_COLORS \ + .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145, \ + .degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \ + DRM_COLOR_LUT_EQUAL_CHANNELS, \ + .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \ + } /* Keep in gen based order, and chronological order within a gen */ @@ -811,7 +818,7 @@ static const struct intel_device_info cml_gt2_info = { [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ GEN(11), \ - .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \ + ICL_COLORS, \ .dbuf.size = 2048, \ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \ .display.has_dsc = 1, \ @@ -866,7 +873,7 @@ static const struct intel_device_info jsl_info = { TGL_CURSOR_OFFSETS, \ .has_global_mocs = 1, \ .has_pxp = 1, \ - .display.has_dsb = 1 + .display.has_dsb = 0 /* FIXME: LUT load is broken with DSB */ static const struct intel_device_info tgl_info = { GEN12_FEATURES, @@ -899,7 +906,7 @@ static const struct intel_device_info rkl_info = { static const struct intel_device_info dg1_info = { GEN12_FEATURES, DGFX_FEATURES, - .graphics_rel = 10, + .graphics.rel = 10, PLATFORM(INTEL_DG1), .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .require_force_probe = 1, @@ -932,8 +939,6 @@ static const struct intel_device_info adl_s_info = { #define XE_LPD_FEATURES \ .abox_mask = GENMASK(1, 0), \ .color = { .degamma_lut_size = 0, .gamma_lut_size = 0 }, \ - .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \ - BIT(TRANSCODER_C) | BIT(TRANSCODER_D), \ .dbuf.size = 4096, \ .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2) | BIT(DBUF_S3) | \ BIT(DBUF_S4), \ @@ -955,12 +960,16 @@ static const struct intel_device_info adl_s_info = { [TRANSCODER_B] = PIPE_B_OFFSET, \ [TRANSCODER_C] = PIPE_C_OFFSET, \ [TRANSCODER_D] = PIPE_D_OFFSET, \ + [TRANSCODER_DSI_0] = PIPE_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = PIPE_DSI1_OFFSET, \ }, \ .trans_offsets = { \ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \ [TRANSCODER_D] = TRANSCODER_D_OFFSET, \ + [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ + [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ XE_LPD_CURSOR_OFFSETS @@ -969,6 +978,9 @@ static const struct intel_device_info adl_p_info = { XE_LPD_FEATURES, PLATFORM(INTEL_ALDERLAKE_P), .require_force_probe = 1, + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D) | + BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1), .display.has_cdclk_crawl = 1, .display.has_modular_fia = 1, .display.has_psr_hw_tracking = 0, @@ -986,8 +998,8 @@ static const struct intel_device_info adl_p_info = { I915_GTT_PAGE_SIZE_2M #define XE_HP_FEATURES \ - .graphics_ver = 12, \ - .graphics_rel = 50, \ + .graphics.ver = 12, \ + .graphics.rel = 50, \ XE_HP_PAGE_SIZES, \ .dma_mask_size = 46, \ .has_64bit_reloc = 1, \ @@ -1005,8 +1017,8 @@ static const struct intel_device_info adl_p_info = { .ppgtt_type = INTEL_PPGTT_FULL #define XE_HPM_FEATURES \ - .media_ver = 12, \ - .media_rel = 50 + .media.ver = 12, \ + .media.rel = 50 __maybe_unused static const struct intel_device_info xehpsdv_info = { @@ -1030,14 +1042,16 @@ static const struct intel_device_info dg2_info = { XE_HPM_FEATURES, XE_LPD_FEATURES, DGFX_FEATURES, - .graphics_rel = 55, - .media_rel = 55, + .graphics.rel = 55, + .media.rel = 55, PLATFORM(INTEL_DG2), .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VECS1) | BIT(VCS0) | BIT(VCS2), .require_force_probe = 1, + .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | BIT(TRANSCODER_D), }; #undef PLATFORM diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index da9055c3ebf0..3450818802c2 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -371,6 +371,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VLV_G3DCTL _MMIO(0x9024) #define VLV_GSCKGCTL _MMIO(0x9028) +#define FBC_LLC_READ_CTRL _MMIO(0x9044) +#define FBC_LLC_FULLY_OPEN REG_BIT(30) + #define GEN6_MBCTL _MMIO(0x0907c) #define GEN6_MBCTL_ENABLE_BOOT_FETCH (1 << 4) #define GEN6_MBCTL_CTX_FETCH_NEEDED (1 << 3) @@ -498,6 +501,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ECOBITS_PPGTT_CACHE64B (3 << 8) #define ECOBITS_PPGTT_CACHE4B (0 << 8) +#define GEN12_GAMCNTRL_CTRL _MMIO(0xcf54) +#define INVALIDATION_BROADCAST_MODE_DIS REG_BIT(12) +#define GLOBAL_INVALIDATION_MODE REG_BIT(2) + +#define GEN12_GAMSTLB_CTRL _MMIO(0xcf4c) +#define CONTROL_BLOCK_CLKGATE_DIS REG_BIT(12) +#define EGRESS_BLOCK_CLKGATE_DIS REG_BIT(11) +#define TAG_BLOCK_CLKGATE_DIS REG_BIT(7) + +#define GEN12_MERT_MOD_CTRL _MMIO(0xcf28) +#define FORCE_MISS_FTLB REG_BIT(3) + #define GAB_CTL _MMIO(0x24000) #define GAB_CTL_CONT_AFTER_PAGEFAULT (1 << 8) @@ -719,6 +734,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN12_OA_TLB_INV_CR _MMIO(0xceec) +#define GEN12_SQCM _MMIO(0x8724) +#define EN_32B_ACCESS REG_BIT(30) + /* Gen12 OAR unit */ #define GEN12_OAR_OACONTROL _MMIO(0x2960) #define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1 @@ -770,6 +788,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define EU_PERF_CNTL5 _MMIO(0xe55c) #define EU_PERF_CNTL6 _MMIO(0xe65c) +#define RT_CTRL _MMIO(0xe530) +#define DIS_NULL_QUERY REG_BIT(10) + /* * OA Boolean state */ @@ -2662,6 +2683,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ +#define GUCPMTIMESTAMP _MMIO(0xC3E8) + /* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ #define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) #define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4) @@ -2772,6 +2795,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10) #define IECPUNIT_CLKGATE_DIS REG_BIT(22) +#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18) +#define ALNUNIT_CLKGATE_DIS REG_BIT(13) + #define ERROR_GEN6 _MMIO(0x40a0) #define GEN7_ERR_INT _MMIO(0x44040) #define ERR_INT_POISON (1 << 31) @@ -2795,12 +2821,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN12_AUX_ERR_DBG _MMIO(0x43f4) #define FPGA_DBG _MMIO(0x42300) -#define FPGA_DBG_RM_NOCLAIM (1 << 31) +#define FPGA_DBG_RM_NOCLAIM REG_BIT(31) #define CLAIM_ER _MMIO(VLV_DISPLAY_BASE + 0x2028) -#define CLAIM_ER_CLR (1 << 31) -#define CLAIM_ER_OVERFLOW (1 << 16) -#define CLAIM_ER_CTR_MASK 0xffff +#define CLAIM_ER_CLR REG_BIT(31) +#define CLAIM_ER_OVERFLOW REG_BIT(16) +#define CLAIM_ER_CTR_MASK REG_GENMASK(15, 0) #define DERRMR _MMIO(0x44050) /* Note that HBLANK events are reserved on bdw+ */ @@ -2870,6 +2896,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2) #define GEN11_ENABLE_32_PLANE_MODE (1 << 7) +#define SCCGCTL94DC _MMIO(0x94dc) +#define CG3DDISURB REG_BIT(14) + +#define MLTICTXCTL _MMIO(0xb170) +#define TDONRENDER REG_BIT(2) + +#define L3SQCREG1_CCS0 _MMIO(0xb200) +#define FLUSHALLNONCOH REG_BIT(5) + /* WaClearTdlStateAckDirtyBits */ #define GEN8_STATE_ACK _MMIO(0x20F0) #define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) @@ -3106,7 +3141,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4) #define GEN10_CACHE_MODE_SS _MMIO(0xe420) -#define FLOAT_BLEND_OPTIMIZATION_ENABLE (1 << 4) +#define ENABLE_PREFETCH_INTO_IC REG_BIT(3) +#define FLOAT_BLEND_OPTIMIZATION_ENABLE REG_BIT(4) /* Fuse readout registers for GT */ #define HSW_PAVP_FUSE1 _MMIO(0x911C) @@ -3307,93 +3343,98 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define FBC_CFB_BASE _MMIO(0x3200) /* 4k page aligned */ #define FBC_LL_BASE _MMIO(0x3204) /* 4k page aligned */ #define FBC_CONTROL _MMIO(0x3208) -#define FBC_CTL_EN REG_BIT(31) -#define FBC_CTL_PERIODIC REG_BIT(30) -#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16) -#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x)) -#define FBC_CTL_STOP_ON_MOD REG_BIT(15) -#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */ -#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm */ -#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5) -#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x)) -#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0) -#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x)) +#define FBC_CTL_EN REG_BIT(31) +#define FBC_CTL_PERIODIC REG_BIT(30) +#define FBC_CTL_INTERVAL_MASK REG_GENMASK(29, 16) +#define FBC_CTL_INTERVAL(x) REG_FIELD_PREP(FBC_CTL_INTERVAL_MASK, (x)) +#define FBC_CTL_STOP_ON_MOD REG_BIT(15) +#define FBC_CTL_UNCOMPRESSIBLE REG_BIT(14) /* i915+ */ +#define FBC_CTL_C3_IDLE REG_BIT(13) /* i945gm only */ +#define FBC_CTL_STRIDE_MASK REG_GENMASK(12, 5) +#define FBC_CTL_STRIDE(x) REG_FIELD_PREP(FBC_CTL_STRIDE_MASK, (x)) +#define FBC_CTL_FENCENO_MASK REG_GENMASK(3, 0) +#define FBC_CTL_FENCENO(x) REG_FIELD_PREP(FBC_CTL_FENCENO_MASK, (x)) #define FBC_COMMAND _MMIO(0x320c) -#define FBC_CMD_COMPRESS (1 << 0) +#define FBC_CMD_COMPRESS REG_BIT(0) #define FBC_STATUS _MMIO(0x3210) -#define FBC_STAT_COMPRESSING (1 << 31) -#define FBC_STAT_COMPRESSED (1 << 30) -#define FBC_STAT_MODIFIED (1 << 29) -#define FBC_STAT_CURRENT_LINE_SHIFT (0) -#define FBC_CONTROL2 _MMIO(0x3214) -#define FBC_CTL_FENCE_DBL (0 << 4) -#define FBC_CTL_IDLE_IMM (0 << 2) -#define FBC_CTL_IDLE_FULL (1 << 2) -#define FBC_CTL_IDLE_LINE (2 << 2) -#define FBC_CTL_IDLE_DEBUG (3 << 2) -#define FBC_CTL_CPU_FENCE (1 << 1) -#define FBC_CTL_PLANE(plane) ((plane) << 0) -#define FBC_FENCE_OFF _MMIO(0x3218) /* BSpec typo has 321Bh */ -#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) +#define FBC_STAT_COMPRESSING REG_BIT(31) +#define FBC_STAT_COMPRESSED REG_BIT(30) +#define FBC_STAT_MODIFIED REG_BIT(29) +#define FBC_STAT_CURRENT_LINE_MASK REG_GENMASK(10, 0) +#define FBC_CONTROL2 _MMIO(0x3214) /* i965gm only */ +#define FBC_CTL_FENCE_DBL REG_BIT(4) +#define FBC_CTL_IDLE_MASK REG_GENMASK(3, 2) +#define FBC_CTL_IDLE_IMM REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 0) +#define FBC_CTL_IDLE_FULL REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 1) +#define FBC_CTL_IDLE_LINE REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 2) +#define FBC_CTL_IDLE_DEBUG REG_FIELD_PREP(FBC_CTL_IDLE_MASK, 3) +#define FBC_CTL_CPU_FENCE_EN REG_BIT(1) +#define FBC_CTL_PLANE_MASK REG_GENMASK(1, 0) +#define FBC_CTL_PLANE(i9xx_plane) REG_FIELD_PREP(FBC_CTL_PLANE_MASK, (i9xx_plane)) +#define FBC_FENCE_OFF _MMIO(0x3218) /* i965gm only, BSpec typo has 321Bh */ +#define FBC_MOD_NUM _MMIO(0x3220) /* i965gm only */ +#define FBC_MOD_NUM_MASK REG_GENMASK(31, 1) +#define FBC_MOD_NUM_VALID REG_BIT(0) +#define FBC_TAG(i) _MMIO(0x3300 + (i) * 4) /* 49 reisters */ +#define FBC_TAG_MASK REG_GENMASK(1, 0) /* 16 tags per register */ +#define FBC_TAG_MODIFIED REG_FIELD_PREP(FBC_TAG_MASK, 0) +#define FBC_TAG_UNCOMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 1) +#define FBC_TAG_UNCOMPRESSIBLE REG_FIELD_PREP(FBC_TAG_MASK, 2) +#define FBC_TAG_COMPRESSED REG_FIELD_PREP(FBC_TAG_MASK, 3) #define FBC_LL_SIZE (1536) -#define FBC_LLC_READ_CTRL _MMIO(0x9044) -#define FBC_LLC_FULLY_OPEN (1 << 30) - /* Framebuffer compression for GM45+ */ #define DPFC_CB_BASE _MMIO(0x3200) +#define ILK_DPFC_CB_BASE _MMIO(0x43200) #define DPFC_CONTROL _MMIO(0x3208) -#define DPFC_CTL_EN (1 << 31) -#define DPFC_CTL_PLANE(plane) ((plane) << 30) -#define IVB_DPFC_CTL_PLANE(plane) ((plane) << 29) -#define DPFC_CTL_FENCE_EN (1 << 29) -#define IVB_DPFC_CTL_FENCE_EN (1 << 28) -#define DPFC_CTL_PERSISTENT_MODE (1 << 25) -#define DPFC_SR_EN (1 << 10) -#define DPFC_CTL_LIMIT_1X (0 << 6) -#define DPFC_CTL_LIMIT_2X (1 << 6) -#define DPFC_CTL_LIMIT_4X (2 << 6) +#define ILK_DPFC_CONTROL _MMIO(0x43208) +#define DPFC_CTL_EN REG_BIT(31) +#define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */ +#define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane)) +#define DPFC_CTL_FENCE_EN_G4X REG_BIT(29) /* g4x-snb */ +#define DPFC_CTL_PLANE_MASK_IVB REG_GENMASK(30, 29) /* ivb only */ +#define DPFC_CTL_PLANE_IVB(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_IVB, (i9xx_plane)) +#define DPFC_CTL_FENCE_EN_IVB REG_BIT(28) /* ivb+ */ +#define DPFC_CTL_PERSISTENT_MODE REG_BIT(25) /* g4x-snb */ +#define DPFC_CTL_FALSE_COLOR REG_BIT(10) /* ivb+ */ +#define DPFC_CTL_SR_EN REG_BIT(10) /* g4x only */ +#define DPFC_CTL_SR_EXIT_DIS REG_BIT(9) /* g4x only */ +#define DPFC_CTL_LIMIT_MASK REG_GENMASK(7, 6) +#define DPFC_CTL_LIMIT_1X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 0) +#define DPFC_CTL_LIMIT_2X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 1) +#define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2) +#define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0) +#define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence)) #define DPFC_RECOMP_CTL _MMIO(0x320c) -#define DPFC_RECOMP_STALL_EN (1 << 27) -#define DPFC_RECOMP_STALL_WM_SHIFT (16) -#define DPFC_RECOMP_STALL_WM_MASK (0x07ff0000) -#define DPFC_RECOMP_TIMER_COUNT_SHIFT (0) -#define DPFC_RECOMP_TIMER_COUNT_MASK (0x0000003f) +#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) +#define DPFC_RECOMP_STALL_EN REG_BIT(27) +#define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16) +#define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0) #define DPFC_STATUS _MMIO(0x3210) -#define DPFC_INVAL_SEG_SHIFT (16) -#define DPFC_INVAL_SEG_MASK (0x07ff0000) -#define DPFC_COMP_SEG_SHIFT (0) -#define DPFC_COMP_SEG_MASK (0x000007ff) +#define ILK_DPFC_STATUS _MMIO(0x43210) +#define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16) +#define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0) #define DPFC_STATUS2 _MMIO(0x3214) +#define ILK_DPFC_STATUS2 _MMIO(0x43214) +#define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0) #define DPFC_FENCE_YOFF _MMIO(0x3218) -#define DPFC_CHICKEN _MMIO(0x3224) -#define DPFC_HT_MODIFY (1 << 31) - -/* Framebuffer compression for Ironlake */ -#define ILK_DPFC_CB_BASE _MMIO(0x43200) -#define ILK_DPFC_CONTROL _MMIO(0x43208) -#define FBC_CTL_FALSE_COLOR (1 << 10) -/* The bit 28-8 is reserved */ -#define DPFC_RESERVED (0x1FFFFF00) -#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) -#define ILK_DPFC_STATUS _MMIO(0x43210) -#define ILK_DPFC_COMP_SEG_MASK 0x7ff -#define IVB_FBC_STATUS2 _MMIO(0x43214) -#define IVB_FBC_COMP_SEG_MASK 0x7ff -#define BDW_FBC_COMP_SEG_MASK 0xfff #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) +#define DPFC_CHICKEN _MMIO(0x3224) #define ILK_DPFC_CHICKEN _MMIO(0x43224) -#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8) -#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14) -#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23) +#define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */ +#define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */ +#define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */ +#define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */ + #define GLK_FBC_STRIDE _MMIO(0x43228) #define FBC_STRIDE_OVERRIDE REG_BIT(15) #define FBC_STRIDE_MASK REG_GENMASK(14, 0) #define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x)) + #define ILK_FBC_RT_BASE _MMIO(0x2128) -#define ILK_FBC_RT_VALID (1 << 0) -#define SNB_FBC_FRONT_BUFFER (1 << 1) +#define ILK_FBC_RT_VALID REG_BIT(0) +#define SNB_FBC_FRONT_BUFFER REG_BIT(1) #define ILK_DISPLAY_CHICKEN1 _MMIO(0x42000) #define ILK_FBCQ_DIS (1 << 22) @@ -3417,8 +3458,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) * The following two registers are of type GTTMMADR */ #define SNB_DPFC_CTL_SA _MMIO(0x100100) -#define SNB_CPU_FENCE_ENABLE (1 << 29) -#define DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) +#define SNB_DPFC_FENCE_EN REG_BIT(29) +#define SNB_DPFC_FENCENO_MASK REG_GENMASK(4, 0) +#define SNB_DPFC_FENCENO(fence) REG_FIELD_PREP(SNB_DPFC_FENCENO_MASK, (fence)) +#define SNB_DPFC_CPU_FENCE_OFFSET _MMIO(0x100104) /* Framebuffer compression for Ivybridge */ #define IVB_FBC_RT_BASE _MMIO(0x7020) @@ -3428,8 +3471,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define IPS_ENABLE (1 << 31) #define MSG_FBC_REND_STATE _MMIO(0x50380) -#define FBC_REND_NUKE (1 << 2) -#define FBC_REND_CACHE_CLEAN (1 << 1) +#define FBC_REND_NUKE REG_BIT(2) +#define FBC_REND_CACHE_CLEAN REG_BIT(1) /* * GPIO regs @@ -4278,21 +4321,62 @@ enum { /* * GEN10 clock gating regs */ + +#define UNSLCGCTL9440 _MMIO(0x9440) +#define GAMTLBOACS_CLKGATE_DIS REG_BIT(28) +#define GAMTLBVDBOX5_CLKGATE_DIS REG_BIT(27) +#define GAMTLBVDBOX6_CLKGATE_DIS REG_BIT(26) +#define GAMTLBVDBOX3_CLKGATE_DIS REG_BIT(24) +#define GAMTLBVDBOX4_CLKGATE_DIS REG_BIT(23) +#define GAMTLBVDBOX7_CLKGATE_DIS REG_BIT(22) +#define GAMTLBVDBOX2_CLKGATE_DIS REG_BIT(21) +#define GAMTLBVDBOX0_CLKGATE_DIS REG_BIT(17) +#define GAMTLBKCR_CLKGATE_DIS REG_BIT(16) +#define GAMTLBGUC_CLKGATE_DIS REG_BIT(15) +#define GAMTLBBLT_CLKGATE_DIS REG_BIT(14) +#define GAMTLBVDBOX1_CLKGATE_DIS REG_BIT(6) + +#define UNSLCGCTL9444 _MMIO(0x9444) +#define GAMTLBGFXA0_CLKGATE_DIS REG_BIT(30) +#define GAMTLBGFXA1_CLKGATE_DIS REG_BIT(29) +#define GAMTLBCOMPA0_CLKGATE_DIS REG_BIT(28) +#define GAMTLBCOMPA1_CLKGATE_DIS REG_BIT(27) +#define GAMTLBCOMPB0_CLKGATE_DIS REG_BIT(26) +#define GAMTLBCOMPB1_CLKGATE_DIS REG_BIT(25) +#define GAMTLBCOMPC0_CLKGATE_DIS REG_BIT(24) +#define GAMTLBCOMPC1_CLKGATE_DIS REG_BIT(23) +#define GAMTLBCOMPD0_CLKGATE_DIS REG_BIT(22) +#define GAMTLBCOMPD1_CLKGATE_DIS REG_BIT(21) +#define GAMTLBMERT_CLKGATE_DIS REG_BIT(20) +#define GAMTLBVEBOX3_CLKGATE_DIS REG_BIT(19) +#define GAMTLBVEBOX2_CLKGATE_DIS REG_BIT(18) +#define GAMTLBVEBOX1_CLKGATE_DIS REG_BIT(17) +#define GAMTLBVEBOX0_CLKGATE_DIS REG_BIT(16) +#define LTCDD_CLKGATE_DIS REG_BIT(10) + #define SLICE_UNIT_LEVEL_CLKGATE _MMIO(0x94d4) #define SARBUNIT_CLKGATE_DIS (1 << 5) #define RCCUNIT_CLKGATE_DIS (1 << 7) #define MSCUNIT_CLKGATE_DIS (1 << 10) +#define NODEDSS_CLKGATE_DIS REG_BIT(12) #define L3_CLKGATE_DIS REG_BIT(16) #define L3_CR2X_CLKGATE_DIS REG_BIT(17) #define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524) -#define GWUNIT_CLKGATE_DIS (1 << 16) +#define DSS_ROUTER_CLKGATE_DIS REG_BIT(28) +#define GWUNIT_CLKGATE_DIS REG_BIT(16) #define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528) #define CPSSUNIT_CLKGATE_DIS REG_BIT(9) +#define SSMCGCTL9530 _MMIO(0x9530) +#define RTFUNIT_CLKGATE_DIS REG_BIT(18) + #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) #define VFUNIT_CLKGATE_DIS REG_BIT(20) +#define TSGUNIT_CLKGATE_DIS REG_BIT(17) /* XEHPSDV */ +#define CG3DDISCFEG_CLKGATE_DIS REG_BIT(17) /* DG2 */ +#define GAMEDIA_CLKGATE_DIS REG_BIT(11) #define HSUNIT_CLKGATE_DIS REG_BIT(8) #define VSUNIT_CLKGATE_DIS REG_BIT(3) @@ -4309,47 +4393,52 @@ enum { /* Pipe A CRC regs */ #define _PIPE_CRC_CTL_A 0x60050 -#define PIPE_CRC_ENABLE (1 << 31) +#define PIPE_CRC_ENABLE REG_BIT(31) /* skl+ source selection */ -#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28) -#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28) -#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28) -#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28) -#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28) -#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28) -#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28) -#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28) +#define PIPE_CRC_SOURCE_MASK_SKL REG_GENMASK(30, 28) +#define PIPE_CRC_SOURCE_PLANE_1_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 0) +#define PIPE_CRC_SOURCE_PLANE_2_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 2) +#define PIPE_CRC_SOURCE_DMUX_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 4) +#define PIPE_CRC_SOURCE_PLANE_3_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 6) +#define PIPE_CRC_SOURCE_PLANE_4_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 7) +#define PIPE_CRC_SOURCE_PLANE_5_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 5) +#define PIPE_CRC_SOURCE_PLANE_6_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 3) +#define PIPE_CRC_SOURCE_PLANE_7_SKL REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_SKL, 1) /* ivb+ source selection */ -#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29) -#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29) -#define PIPE_CRC_SOURCE_PF_IVB (2 << 29) +#define PIPE_CRC_SOURCE_MASK_IVB REG_GENMASK(30, 29) +#define PIPE_CRC_SOURCE_PRIMARY_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 0) +#define PIPE_CRC_SOURCE_SPRITE_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 1) +#define PIPE_CRC_SOURCE_PF_IVB REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_IVB, 2) /* ilk+ source selection */ -#define PIPE_CRC_SOURCE_PRIMARY_ILK (0 << 28) -#define PIPE_CRC_SOURCE_SPRITE_ILK (1 << 28) -#define PIPE_CRC_SOURCE_PIPE_ILK (2 << 28) -/* embedded DP port on the north display block, reserved on ivb */ -#define PIPE_CRC_SOURCE_PORT_A_ILK (4 << 28) -#define PIPE_CRC_SOURCE_FDI_ILK (5 << 28) /* reserved on ivb */ +#define PIPE_CRC_SOURCE_MASK_ILK REG_GENMASK(30, 28) +#define PIPE_CRC_SOURCE_PRIMARY_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 0) +#define PIPE_CRC_SOURCE_SPRITE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 1) +#define PIPE_CRC_SOURCE_PIPE_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 2) +/* embedded DP port on the north display block */ +#define PIPE_CRC_SOURCE_PORT_A_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 4) +#define PIPE_CRC_SOURCE_FDI_ILK REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_ILK, 5) /* vlv source selection */ -#define PIPE_CRC_SOURCE_PIPE_VLV (0 << 27) -#define PIPE_CRC_SOURCE_HDMIB_VLV (1 << 27) -#define PIPE_CRC_SOURCE_HDMIC_VLV (2 << 27) +#define PIPE_CRC_SOURCE_MASK_VLV REG_GENMASK(30, 27) +#define PIPE_CRC_SOURCE_PIPE_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 0) +#define PIPE_CRC_SOURCE_HDMIB_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 1) +#define PIPE_CRC_SOURCE_HDMIC_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 2) /* with DP port the pipe source is invalid */ -#define PIPE_CRC_SOURCE_DP_D_VLV (3 << 27) -#define PIPE_CRC_SOURCE_DP_B_VLV (6 << 27) -#define PIPE_CRC_SOURCE_DP_C_VLV (7 << 27) +#define PIPE_CRC_SOURCE_DP_D_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 3) +#define PIPE_CRC_SOURCE_DP_B_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 6) +#define PIPE_CRC_SOURCE_DP_C_VLV REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_VLV, 7) /* gen3+ source selection */ -#define PIPE_CRC_SOURCE_PIPE_I9XX (0 << 28) -#define PIPE_CRC_SOURCE_SDVOB_I9XX (1 << 28) -#define PIPE_CRC_SOURCE_SDVOC_I9XX (2 << 28) +#define PIPE_CRC_SOURCE_MASK_I9XX REG_GENMASK(30, 28) +#define PIPE_CRC_SOURCE_PIPE_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 0) +#define PIPE_CRC_SOURCE_SDVOB_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 1) +#define PIPE_CRC_SOURCE_SDVOC_I9XX REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 2) /* with DP/TV port the pipe source is invalid */ -#define PIPE_CRC_SOURCE_DP_D_G4X (3 << 28) -#define PIPE_CRC_SOURCE_TV_PRE (4 << 28) -#define PIPE_CRC_SOURCE_TV_POST (5 << 28) -#define PIPE_CRC_SOURCE_DP_B_G4X (6 << 28) -#define PIPE_CRC_SOURCE_DP_C_G4X (7 << 28) +#define PIPE_CRC_SOURCE_DP_D_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 3) +#define PIPE_CRC_SOURCE_TV_PRE REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 4) +#define PIPE_CRC_SOURCE_TV_POST REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 5) +#define PIPE_CRC_SOURCE_DP_B_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 6) +#define PIPE_CRC_SOURCE_DP_C_G4X REG_FIELD_PREP(PIPE_CRC_SOURCE_MASK_I9XX, 7) /* gen2 doesn't have source selection bits */ -#define PIPE_CRC_INCLUDE_BORDER_I8XX (1 << 30) +#define PIPE_CRC_INCLUDE_BORDER_I8XX REG_BIT(30) #define _PIPE_CRC_RES_1_A_IVB 0x60064 #define _PIPE_CRC_RES_2_A_IVB 0x60068 @@ -4698,11 +4787,11 @@ enum { #define PSR_EVENT_LPSP_MODE_EXIT (1 << 1) #define PSR_EVENT_PSR_DISABLE (1 << 0) -#define _PSR2_STATUS_A 0x60940 -#define _PSR2_STATUS_EDP 0x6f940 -#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) -#define EDP_PSR2_STATUS_STATE_MASK (0xf << 28) -#define EDP_PSR2_STATUS_STATE_SHIFT 28 +#define _PSR2_STATUS_A 0x60940 +#define _PSR2_STATUS_EDP 0x6f940 +#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) +#define EDP_PSR2_STATUS_STATE_MASK REG_GENMASK(31, 28) +#define EDP_PSR2_STATUS_STATE_DEEP_SLEEP REG_FIELD_PREP(EDP_PSR2_STATUS_STATE_MASK, 0x8) #define _PSR2_SU_STATUS_A 0x60914 #define _PSR2_SU_STATUS_EDP 0x6f914 @@ -4999,9 +5088,9 @@ enum { #define PORT_DFT2_G4X _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61154) #define DC_BALANCE_RESET_VLV (1 << 31) #define PIPE_SCRAMBLE_RESET_MASK ((1 << 14) | (0x3 << 0)) -#define PIPE_C_SCRAMBLE_RESET (1 << 14) /* chv */ -#define PIPE_B_SCRAMBLE_RESET (1 << 1) -#define PIPE_A_SCRAMBLE_RESET (1 << 0) +#define PIPE_C_SCRAMBLE_RESET REG_BIT(14) /* chv */ +#define PIPE_B_SCRAMBLE_RESET REG_BIT(1) +#define PIPE_A_SCRAMBLE_RESET REG_BIT(0) /* Gen 3 SDVO bits: */ #define SDVO_ENABLE (1 << 31) @@ -6266,55 +6355,55 @@ enum { #define PIPE_STATUS_PORT_UNDERRUN_XELPD REG_BIT(26) #define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028) -#define PIPEB_LINE_COMPARE_INT_EN (1 << 29) -#define PIPEB_HLINE_INT_EN (1 << 28) -#define PIPEB_VBLANK_INT_EN (1 << 27) -#define SPRITED_FLIP_DONE_INT_EN (1 << 26) -#define SPRITEC_FLIP_DONE_INT_EN (1 << 25) -#define PLANEB_FLIP_DONE_INT_EN (1 << 24) -#define PIPE_PSR_INT_EN (1 << 22) -#define PIPEA_LINE_COMPARE_INT_EN (1 << 21) -#define PIPEA_HLINE_INT_EN (1 << 20) -#define PIPEA_VBLANK_INT_EN (1 << 19) -#define SPRITEB_FLIP_DONE_INT_EN (1 << 18) -#define SPRITEA_FLIP_DONE_INT_EN (1 << 17) -#define PLANEA_FLIPDONE_INT_EN (1 << 16) -#define PIPEC_LINE_COMPARE_INT_EN (1 << 13) -#define PIPEC_HLINE_INT_EN (1 << 12) -#define PIPEC_VBLANK_INT_EN (1 << 11) -#define SPRITEF_FLIPDONE_INT_EN (1 << 10) -#define SPRITEE_FLIPDONE_INT_EN (1 << 9) -#define PLANEC_FLIPDONE_INT_EN (1 << 8) +#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29) +#define PIPEB_HLINE_INT_EN REG_BIT(28) +#define PIPEB_VBLANK_INT_EN REG_BIT(27) +#define SPRITED_FLIP_DONE_INT_EN REG_BIT(26) +#define SPRITEC_FLIP_DONE_INT_EN REG_BIT(25) +#define PLANEB_FLIP_DONE_INT_EN REG_BIT(24) +#define PIPE_PSR_INT_EN REG_BIT(22) +#define PIPEA_LINE_COMPARE_INT_EN REG_BIT(21) +#define PIPEA_HLINE_INT_EN REG_BIT(20) +#define PIPEA_VBLANK_INT_EN REG_BIT(19) +#define SPRITEB_FLIP_DONE_INT_EN REG_BIT(18) +#define SPRITEA_FLIP_DONE_INT_EN REG_BIT(17) +#define PLANEA_FLIPDONE_INT_EN REG_BIT(16) +#define PIPEC_LINE_COMPARE_INT_EN REG_BIT(13) +#define PIPEC_HLINE_INT_EN REG_BIT(12) +#define PIPEC_VBLANK_INT_EN REG_BIT(11) +#define SPRITEF_FLIPDONE_INT_EN REG_BIT(10) +#define SPRITEE_FLIPDONE_INT_EN REG_BIT(9) +#define PLANEC_FLIPDONE_INT_EN REG_BIT(8) #define DPINVGTT _MMIO(VLV_DISPLAY_BASE + 0x7002c) /* VLV/CHV only */ -#define SPRITEF_INVALID_GTT_INT_EN (1 << 27) -#define SPRITEE_INVALID_GTT_INT_EN (1 << 26) -#define PLANEC_INVALID_GTT_INT_EN (1 << 25) -#define CURSORC_INVALID_GTT_INT_EN (1 << 24) -#define CURSORB_INVALID_GTT_INT_EN (1 << 23) -#define CURSORA_INVALID_GTT_INT_EN (1 << 22) -#define SPRITED_INVALID_GTT_INT_EN (1 << 21) -#define SPRITEC_INVALID_GTT_INT_EN (1 << 20) -#define PLANEB_INVALID_GTT_INT_EN (1 << 19) -#define SPRITEB_INVALID_GTT_INT_EN (1 << 18) -#define SPRITEA_INVALID_GTT_INT_EN (1 << 17) -#define PLANEA_INVALID_GTT_INT_EN (1 << 16) -#define DPINVGTT_EN_MASK 0xff0000 -#define DPINVGTT_EN_MASK_CHV 0xfff0000 -#define SPRITEF_INVALID_GTT_STATUS (1 << 11) -#define SPRITEE_INVALID_GTT_STATUS (1 << 10) -#define PLANEC_INVALID_GTT_STATUS (1 << 9) -#define CURSORC_INVALID_GTT_STATUS (1 << 8) -#define CURSORB_INVALID_GTT_STATUS (1 << 7) -#define CURSORA_INVALID_GTT_STATUS (1 << 6) -#define SPRITED_INVALID_GTT_STATUS (1 << 5) -#define SPRITEC_INVALID_GTT_STATUS (1 << 4) -#define PLANEB_INVALID_GTT_STATUS (1 << 3) -#define SPRITEB_INVALID_GTT_STATUS (1 << 2) -#define SPRITEA_INVALID_GTT_STATUS (1 << 1) -#define PLANEA_INVALID_GTT_STATUS (1 << 0) -#define DPINVGTT_STATUS_MASK 0xff -#define DPINVGTT_STATUS_MASK_CHV 0xfff +#define DPINVGTT_EN_MASK_CHV REG_GENMASK(27, 16) +#define DPINVGTT_EN_MASK_VLV REG_GENMASK(23, 16) +#define SPRITEF_INVALID_GTT_INT_EN REG_BIT(27) +#define SPRITEE_INVALID_GTT_INT_EN REG_BIT(26) +#define PLANEC_INVALID_GTT_INT_EN REG_BIT(25) +#define CURSORC_INVALID_GTT_INT_EN REG_BIT(24) +#define CURSORB_INVALID_GTT_INT_EN REG_BIT(23) +#define CURSORA_INVALID_GTT_INT_EN REG_BIT(22) +#define SPRITED_INVALID_GTT_INT_EN REG_BIT(21) +#define SPRITEC_INVALID_GTT_INT_EN REG_BIT(20) +#define PLANEB_INVALID_GTT_INT_EN REG_BIT(19) +#define SPRITEB_INVALID_GTT_INT_EN REG_BIT(18) +#define SPRITEA_INVALID_GTT_INT_EN REG_BIT(17) +#define PLANEA_INVALID_GTT_INT_EN REG_BIT(16) +#define DPINVGTT_STATUS_MASK_CHV REG_GENMASK(11, 0) +#define DPINVGTT_STATUS_MASK_VLV REG_GENMASK(7, 0) +#define SPRITEF_INVALID_GTT_STATUS REG_BIT(11) +#define SPRITEE_INVALID_GTT_STATUS REG_BIT(10) +#define PLANEC_INVALID_GTT_STATUS REG_BIT(9) +#define CURSORC_INVALID_GTT_STATUS REG_BIT(8) +#define CURSORB_INVALID_GTT_STATUS REG_BIT(7) +#define CURSORA_INVALID_GTT_STATUS REG_BIT(6) +#define SPRITED_INVALID_GTT_STATUS REG_BIT(5) +#define SPRITEC_INVALID_GTT_STATUS REG_BIT(4) +#define PLANEB_INVALID_GTT_STATUS REG_BIT(3) +#define SPRITEB_INVALID_GTT_STATUS REG_BIT(2) +#define SPRITEA_INVALID_GTT_STATUS REG_BIT(1) +#define PLANEA_INVALID_GTT_STATUS REG_BIT(0) #define DSPARB _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x70030) #define DSPARB_CSTART_MASK (0x7f << 7) @@ -8263,7 +8352,7 @@ enum { /* * The below are numbered starting from "S1" on gen11/gen12, but starting - * with gen13 display, the bspec switches to a 0-based numbering scheme + * with display 13, the bspec switches to a 0-based numbering scheme * (although the addresses stay the same so new S0 = old S1, new S1 = old S2). * We'll just use the 0-based numbering here for all platforms since it's the * way things will be named by the hardware team going forward, plus it's more @@ -8308,9 +8397,10 @@ enum { #define RESET_PCH_HANDSHAKE_ENABLE (1 << 4) #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) -#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30) -#define ICL_DELAY_PMRSP (1 << 22) -#define MASK_WAKEMEM (1 << 13) +#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30) +#define ICL_DELAY_PMRSP REG_BIT(22) +#define DISABLE_FLR_SRC REG_BIT(15) +#define MASK_WAKEMEM REG_BIT(13) #define GEN11_CHICKEN_DCPR_2 _MMIO(0x46434) #define DCPR_MASK_MAXLATENCY_MEMUP_CLR REG_BIT(27) @@ -8351,6 +8441,9 @@ enum { #define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) #define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11) +#define GEN12_CS_DEBUG_MODE1_CCCSUNIT_BE_COMMON _MMIO(0x20EC) +#define GEN12_REPLAY_MODE_GRANULARITY REG_BIT(0) + #define GEN8_CS_CHICKEN1 _MMIO(0x2580) #define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0) #define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1)) @@ -8374,9 +8467,10 @@ enum { #define GEN8_ERRDETBCTRL (1 << 9) #define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) - #define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12) - #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11) - #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9) +#define DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN REG_BIT(12) +#define XEHP_DUAL_SIMD8_SEQ_MERGE_DISABLE REG_BIT(12) +#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC REG_BIT(11) +#define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE REG_BIT(9) #define HIZ_CHICKEN _MMIO(0x7018) # define CHV_HZ_8X8_MODE_IN_1X REG_BIT(15) @@ -8430,6 +8524,12 @@ enum { #define GEN8_LQSC_FLUSH_COHERENT_LINES (1 << 21) #define GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE REG_BIT(22) +#define GEN11_L3SQCREG5 _MMIO(0xb158) +#define L3_PWM_TIMER_INIT_VAL_MASK REG_GENMASK(9, 0) + +#define XEHP_L3SCQREG7 _MMIO(0xb188) +#define BLEND_FILL_CACHING_OPT_DIS REG_BIT(3) + /* GEN8 chicken */ #define HDC_CHICKEN0 _MMIO(0x7300) #define ICL_HDC_MODE _MMIO(0xE5F4) @@ -8440,6 +8540,12 @@ enum { #define HDC_FORCE_NON_COHERENT (1 << 4) #define HDC_BARRIER_PERFORMANCE_DISABLE (1 << 10) +#define GEN12_HDC_CHICKEN0 _MMIO(0xE5F0) +#define LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK REG_GENMASK(13, 11) + +#define SARB_CHICKEN1 _MMIO(0xe90c) +#define COMP_CKN_IN REG_GENMASK(30, 29) + #define GEN8_HDC_CHICKEN1 _MMIO(0x7304) /* GEN9 chicken */ @@ -8470,6 +8576,10 @@ enum { #define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15) #define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7) +#define VFLSKPD _MMIO(0x62a8) +#define DIS_OVER_FETCH_CACHE REG_BIT(1) +#define DIS_MULT_MISS_RD_SQUASH REG_BIT(0) + #define FF_MODE2 _MMIO(0x6604) #define FF_MODE2_GS_TIMER_MASK REG_GENMASK(31, 24) #define FF_MODE2_GS_TIMER_224 REG_FIELD_PREP(FF_MODE2_GS_TIMER_MASK, 224) @@ -9293,6 +9403,9 @@ enum { #define GEN8_SDEUNIT_CLOCK_GATE_DISABLE (1 << 14) #define GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ (1 << 28) +#define UNSLCGCTL9430 _MMIO(0x9430) +#define MSQDUNIT_CLKGATE_DIS REG_BIT(3) + #define GEN6_GFXPAUSE _MMIO(0xA000) #define GEN6_RPNSWREQ _MMIO(0xA008) #define GEN6_TURBO_DISABLE (1 << 31) @@ -9608,24 +9721,39 @@ enum { #define GEN9_CCS_TLB_PREFETCH_ENABLE (1 << 3) #define GEN8_ROW_CHICKEN _MMIO(0xe4f0) -#define FLOW_CONTROL_ENABLE (1 << 15) -#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1 << 8) -#define STALL_DOP_GATING_DISABLE (1 << 5) -#define THROTTLE_12_5 (7 << 2) -#define DISABLE_EARLY_EOT (1 << 1) +#define FLOW_CONTROL_ENABLE REG_BIT(15) +#define UGM_BACKUP_MODE REG_BIT(13) +#define MDQ_ARBITRATION_MODE REG_BIT(12) +#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE REG_BIT(8) +#define STALL_DOP_GATING_DISABLE REG_BIT(5) +#define THROTTLE_12_5 REG_GENMASK(4, 2) +#define DISABLE_EARLY_EOT REG_BIT(1) #define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4) +#define GEN12_DISABLE_READ_SUPPRESSION REG_BIT(15) #define GEN12_DISABLE_EARLY_READ REG_BIT(14) +#define GEN12_ENABLE_LARGE_GRF_MODE REG_BIT(12) #define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8) +#define LSC_CHICKEN_BIT_0 _MMIO(0xe7c8) +#define FORCE_1_SUB_MESSAGE_PER_FRAGMENT REG_BIT(15) +#define LSC_CHICKEN_BIT_0_UDW _MMIO(0xe7c8 + 4) +#define DIS_CHAIN_2XSIMD8 REG_BIT(55 - 32) +#define FORCE_SLM_FENCE_SCOPE_TO_TILE REG_BIT(42 - 32) +#define FORCE_UGM_FENCE_SCOPE_TO_TILE REG_BIT(41 - 32) +#define MAXREQS_PER_BANK REG_GENMASK(39 - 32, 37 - 32) +#define DISABLE_128B_EVICTION_COMMAND_UDW REG_BIT(36 - 32) + #define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4) #define DOP_CLOCK_GATING_DISABLE (1 << 0) #define PUSH_CONSTANT_DEREF_DISABLE (1 << 8) #define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1) -#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c) -#define GEN12_DISABLE_TDL_PUSH REG_BIT(9) -#define GEN11_DIS_PICK_2ND_EU REG_BIT(7) +#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c) +#define GEN12_DISABLE_GRF_CLEAR REG_BIT(13) +#define GEN12_DISABLE_TDL_PUSH REG_BIT(9) +#define GEN11_DIS_PICK_2ND_EU REG_BIT(7) +#define GEN12_DISABLE_HDR_PAST_PAYLOAD_HOLD_FIX REG_BIT(4) #define HSW_ROW_CHICKEN3 _MMIO(0xe49c) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) @@ -9640,9 +9768,10 @@ enum { #define GEN8_SAMPLER_POWER_BYPASS_DIS (1 << 1) #define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194) -#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR (1 << 8) -#define GEN9_ENABLE_YV12_BUGFIX (1 << 4) -#define GEN9_ENABLE_GPGPU_PREEMPTION (1 << 2) +#define DG2_DISABLE_ROUND_ENABLE_ALLOW_FOR_SSLA REG_BIT(15) +#define GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR REG_BIT(8) +#define GEN9_ENABLE_YV12_BUGFIX REG_BIT(4) +#define GEN9_ENABLE_GPGPU_PREEMPTION REG_BIT(2) /* Audio */ #define G4X_AUD_VID_DID _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x62020) @@ -9781,6 +9910,10 @@ enum { #define AUD_PIN_BUF_CTL _MMIO(0x48414) #define AUD_PIN_BUF_ENABLE REG_BIT(31) +#define AUD_TS_CDCLK_M _MMIO(0x65ea0) +#define AUD_TS_CDCLK_M_EN REG_BIT(31) +#define AUD_TS_CDCLK_N _MMIO(0x65ea4) + /* Display Audio Config Reg */ #define AUD_CONFIG_BE _MMIO(0x65ef0) #define HBLANK_EARLY_ENABLE_ICL(pipe) (0x1 << (20 - (pipe))) @@ -10212,8 +10345,6 @@ enum skl_power_gate { #define TGL_TRANS_DDI_PORT_MASK (0xf << TGL_TRANS_DDI_PORT_SHIFT) #define TRANS_DDI_SELECT_PORT(x) ((x) << TRANS_DDI_PORT_SHIFT) #define TGL_TRANS_DDI_SELECT_PORT(x) (((x) + 1) << TGL_TRANS_DDI_PORT_SHIFT) -#define TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) (((val) & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT) -#define TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(val) ((((val) & TGL_TRANS_DDI_PORT_MASK) >> TGL_TRANS_DDI_PORT_SHIFT) - 1) #define TRANS_DDI_MODE_SELECT_MASK (7 << 24) #define TRANS_DDI_MODE_SELECT_HDMI (0 << 24) #define TRANS_DDI_MODE_SELECT_DVI (1 << 24) @@ -11717,7 +11848,9 @@ enum skl_power_gate { #define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \ _TGL_DSI_CHKN_REG_0, \ _TGL_DSI_CHKN_REG_1) -#define TGL_DSI_CHKN_LSHS_GB REG_GENMASK(15, 12) +#define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12) +#define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \ + (byte_clocks)) /* Display Stream Splitter Control */ #define DSS_CTL1 _MMIO(0x67400) @@ -12464,11 +12597,19 @@ enum skl_power_gate { #define PMFLUSH_GAPL3UNBLOCK (1 << 21) #define PMFLUSHDONE_LNEBLK (1 << 22) +#define XEHP_L3NODEARBCFG _MMIO(0xb0b4) +#define XEHP_LNESPARE REG_BIT(19) + #define GEN12_GLOBAL_MOCS(i) _MMIO(0x4000 + (i) * 4) /* Global MOCS regs */ #define GEN12_GSMBASE _MMIO(0x108100) #define GEN12_DSMBASE _MMIO(0x1080C0) +#define XEHP_CLOCK_GATE_DIS _MMIO(0x101014) +#define SGSI_SIDECLK_DIS REG_BIT(17) +#define SGGI_DIS REG_BIT(15) +#define SGR_DIS REG_BIT(13) + /* gamt regs */ #define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4) #define GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW 0x67F1427F /* max/min for LRA1/2 */ @@ -12845,4 +12986,7 @@ enum skl_power_gate { #define CLKGATE_DIS_MISC _MMIO(0x46534) #define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21) +#define SLICE_COMMON_ECO_CHICKEN1 _MMIO(0x731C) +#define MSC_MSAA_REODER_BUF_BYPASS_DISABLE REG_BIT(14) + #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 42cd17357771..ad175d662b4e 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -29,6 +29,7 @@ #include <linux/sched.h> #include <linux/sched/clock.h> #include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include "gem/i915_gem_context.h" #include "gt/intel_breadcrumbs.h" @@ -113,6 +114,10 @@ static void i915_fence_release(struct dma_fence *fence) GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT && rq->guc_prio != GUC_PRIO_FINI); + i915_request_free_capture_list(fetch_and_zero(&rq->capture_list)); + if (i915_vma_snapshot_present(&rq->batch_snapshot)) + i915_vma_snapshot_put_onstack(&rq->batch_snapshot); + /* * The request is put onto a RCU freelist (i.e. the address * is immediately reused), mark the fences as being freed now. @@ -186,19 +191,6 @@ void i915_request_notify_execute_cb_imm(struct i915_request *rq) __notify_execute_cb(rq, irq_work_imm); } -static void free_capture_list(struct i915_request *request) -{ - struct i915_capture_list *capture; - - capture = fetch_and_zero(&request->capture_list); - while (capture) { - struct i915_capture_list *next = capture->next; - - kfree(capture); - capture = next; - } -} - static void __i915_request_fill(struct i915_request *rq, u8 val) { void *vaddr = rq->ring->vaddr; @@ -303,6 +295,37 @@ static void __rq_cancel_watchdog(struct i915_request *rq) i915_request_put(rq); } +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) + +/** + * i915_request_free_capture_list - Free a capture list + * @capture: Pointer to the first list item or NULL + * + */ +void i915_request_free_capture_list(struct i915_capture_list *capture) +{ + while (capture) { + struct i915_capture_list *next = capture->next; + + i915_vma_snapshot_put(capture->vma_snapshot); + capture = next; + } +} + +#define assert_capture_list_is_null(_rq) GEM_BUG_ON((_rq)->capture_list) + +#define clear_capture_list(_rq) ((_rq)->capture_list = NULL) + +#else + +#define i915_request_free_capture_list(_a) do {} while (0) + +#define assert_capture_list_is_null(_a) do {} while (0) + +#define clear_capture_list(_rq) do {} while (0) + +#endif + bool i915_request_retire(struct i915_request *rq) { if (!__i915_request_is_complete(rq)) @@ -339,7 +362,7 @@ bool i915_request_retire(struct i915_request *rq) } if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) - atomic_dec(&rq->engine->gt->rps.num_waiters); + intel_rps_dec_waiters(&rq->engine->gt->rps); /* * We only loosely track inflight requests across preemption, @@ -359,7 +382,6 @@ bool i915_request_retire(struct i915_request *rq) intel_context_exit(rq->context); intel_context_unpin(rq->context); - free_capture_list(rq); i915_sched_node_fini(&rq->sched); i915_request_put(rq); @@ -719,7 +741,7 @@ void i915_request_cancel(struct i915_request *rq, int error) intel_context_cancel_request(rq->context, rq); } -static int __i915_sw_fence_call +static int submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_request *request = @@ -755,7 +777,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) return NOTIFY_DONE; } -static int __i915_sw_fence_call +static int semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct i915_request *rq = container_of(fence, typeof(*rq), semaphore); @@ -829,11 +851,18 @@ static void __i915_request_ctor(void *arg) i915_sw_fence_init(&rq->submit, submit_notify); i915_sw_fence_init(&rq->semaphore, semaphore_notify); - rq->capture_list = NULL; + clear_capture_list(rq); + rq->batch_snapshot.present = false; init_llist_head(&rq->execute_cb); } +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#define clear_batch_ptr(_rq) ((_rq)->batch = NULL) +#else +#define clear_batch_ptr(_a) do {} while (0) +#endif + struct i915_request * __i915_request_create(struct intel_context *ce, gfp_t gfp) { @@ -925,10 +954,11 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) i915_sched_node_reinit(&rq->sched); /* No zalloc, everything must be cleared after use */ - rq->batch = NULL; + clear_batch_ptr(rq); __rq_init_watchdog(rq); - GEM_BUG_ON(rq->capture_list); + assert_capture_list_is_null(rq); GEM_BUG_ON(!llist_empty(&rq->execute_cb)); + GEM_BUG_ON(i915_vma_snapshot_present(&rq->batch_snapshot)); /* * Reserve space in the ring buffer for all the commands required to diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 3c6e8acd1457..0ed01979491b 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -40,6 +40,7 @@ #include "i915_scheduler.h" #include "i915_selftest.h" #include "i915_sw_fence.h" +#include "i915_vma_snapshot.h" #include <uapi/drm/i915_drm.h> @@ -48,11 +49,17 @@ struct drm_i915_gem_object; struct drm_printer; struct i915_request; +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) struct i915_capture_list { + struct i915_vma_snapshot *vma_snapshot; struct i915_capture_list *next; - struct i915_vma *vma; }; +void i915_request_free_capture_list(struct i915_capture_list *capture); +#else +#define i915_request_free_capture_list(_a) do {} while (0) +#endif + #define RQ_TRACE(rq, fmt, ...) do { \ const struct i915_request *rq__ = (rq); \ ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \ @@ -289,10 +296,12 @@ struct i915_request { /** Preallocate space in the ring for the emitting the request */ u32 reserved_space; - /** Batch buffer related to this request if any (used for - * error state dump only). - */ - struct i915_vma *batch; + /** Batch buffer pointer for selftest internal use. */ + I915_SELFTEST_DECLARE(struct i915_vma *batch); + + struct i915_vma_snapshot batch_snapshot; + +#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) /** * Additional buffers requested by userspace to be captured upon * a GPU hang. The vma/obj on this list are protected by their @@ -300,6 +309,7 @@ struct i915_request { * on the active_list (of their final request). */ struct i915_capture_list *capture_list; +#endif /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index 4a6712dca838..41f2adb6a583 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -41,8 +41,32 @@ bool i915_sg_trim(struct sg_table *orig_st) return true; } +static void i915_refct_sgt_release(struct kref *ref) +{ + struct i915_refct_sgt *rsgt = + container_of(ref, typeof(*rsgt), kref); + + sg_free_table(&rsgt->table); + kfree(rsgt); +} + +static const struct i915_refct_sgt_ops rsgt_ops = { + .release = i915_refct_sgt_release +}; + +/** + * i915_refct_sgt_init - Initialize a struct i915_refct_sgt with default ops + * @rsgt: The struct i915_refct_sgt to initialize. + * size: The size of the underlying memory buffer. + */ +void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size) +{ + __i915_refct_sgt_init(rsgt, size, &rsgt_ops); +} + /** - * i915_sg_from_mm_node - Create an sg_table from a struct drm_mm_node + * i915_rsgt_from_mm_node - Create a refcounted sg_table from a struct + * drm_mm_node * @node: The drm_mm_node. * @region_start: An offset to add to the dma addresses of the sg list. * @@ -50,25 +74,28 @@ bool i915_sg_trim(struct sg_table *orig_st) * taking a maximum segment length into account, splitting into segments * if necessary. * - * Return: A pointer to a kmalloced struct sg_table on success, negative + * Return: A pointer to a kmalloced struct i915_refct_sgt on success, negative * error code cast to an error pointer on failure. */ -struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, - u64 region_start) +struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, + u64 region_start) { const u64 max_segment = SZ_1G; /* Do we have a limit on this? */ u64 segment_pages = max_segment >> PAGE_SHIFT; u64 block_size, offset, prev_end; + struct i915_refct_sgt *rsgt; struct sg_table *st; struct scatterlist *sg; - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); + if (!rsgt) return ERR_PTR(-ENOMEM); + i915_refct_sgt_init(rsgt, node->size << PAGE_SHIFT); + st = &rsgt->table; if (sg_alloc_table(st, DIV_ROUND_UP(node->size, segment_pages), GFP_KERNEL)) { - kfree(st); + i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } @@ -104,11 +131,11 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, sg_mark_end(sg); i915_sg_trim(st); - return st; + return rsgt; } /** - * i915_sg_from_buddy_resource - Create an sg_table from a struct + * i915_rsgt_from_buddy_resource - Create a refcounted sg_table from a struct * i915_buddy_block list * @res: The struct i915_ttm_buddy_resource. * @region_start: An offset to add to the dma addresses of the sg list. @@ -117,11 +144,11 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, * taking a maximum segment length into account, splitting into segments * if necessary. * - * Return: A pointer to a kmalloced struct sg_table on success, negative + * Return: A pointer to a kmalloced struct i915_refct_sgts on success, negative * error code cast to an error pointer on failure. */ -struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, - u64 region_start) +struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, + u64 region_start) { struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); const u64 size = res->num_pages << PAGE_SHIFT; @@ -129,18 +156,21 @@ struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, struct i915_buddy_mm *mm = bman_res->mm; struct list_head *blocks = &bman_res->blocks; struct i915_buddy_block *block; + struct i915_refct_sgt *rsgt; struct scatterlist *sg; struct sg_table *st; resource_size_t prev_end; GEM_BUG_ON(list_empty(blocks)); - st = kmalloc(sizeof(*st), GFP_KERNEL); - if (!st) + rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL); + if (!rsgt) return ERR_PTR(-ENOMEM); + i915_refct_sgt_init(rsgt, size); + st = &rsgt->table; if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) { - kfree(st); + i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } @@ -181,7 +211,7 @@ struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, sg_mark_end(sg); i915_sg_trim(st); - return st; + return rsgt; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index b8bd5925b03f..12c6a1684081 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -144,10 +144,78 @@ static inline unsigned int i915_sg_segment_size(void) bool i915_sg_trim(struct sg_table *orig_st); -struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node, - u64 region_start); +/** + * struct i915_refct_sgt_ops - Operations structure for struct i915_refct_sgt + */ +struct i915_refct_sgt_ops { + /** + * release() - Free the memory of the struct i915_refct_sgt + * @ref: struct kref that is embedded in the struct i915_refct_sgt + */ + void (*release)(struct kref *ref); +}; + +/** + * struct i915_refct_sgt - A refcounted scatter-gather table + * @kref: struct kref for refcounting + * @table: struct sg_table holding the scatter-gather table itself. Note that + * @table->sgl = NULL can be used to determine whether a scatter-gather table + * is present or not. + * @size: The size in bytes of the underlying memory buffer + * @ops: The operations structure. + */ +struct i915_refct_sgt { + struct kref kref; + struct sg_table table; + size_t size; + const struct i915_refct_sgt_ops *ops; +}; + +/** + * i915_refct_sgt_put - Put a refcounted sg-table + * @rsgt the struct i915_refct_sgt to put. + */ +static inline void i915_refct_sgt_put(struct i915_refct_sgt *rsgt) +{ + if (rsgt) + kref_put(&rsgt->kref, rsgt->ops->release); +} + +/** + * i915_refct_sgt_get - Get a refcounted sg-table + * @rsgt the struct i915_refct_sgt to get. + */ +static inline struct i915_refct_sgt * +i915_refct_sgt_get(struct i915_refct_sgt *rsgt) +{ + kref_get(&rsgt->kref); + return rsgt; +} + +/** + * __i915_refct_sgt_init - Initialize a refcounted sg-list with a custom + * operations structure + * @rsgt The struct i915_refct_sgt to initialize. + * @size: Size in bytes of the underlying memory buffer. + * @ops: A customized operations structure in case the refcounted sg-list + * is embedded into another structure. + */ +static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt, + size_t size, + const struct i915_refct_sgt_ops *ops) +{ + kref_init(&rsgt->kref); + rsgt->table.sgl = NULL; + rsgt->size = size; + rsgt->ops = ops; +} + +void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size); + +struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, + u64 region_start); -struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res, - u64 region_start); +struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, + u64 region_start); #endif diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 7ea0dbf81530..2a74a9a1cafe 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -18,7 +18,9 @@ #define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG static DEFINE_SPINLOCK(i915_sw_fence_lock); +#endif #define WQ_FLAG_BITS \ BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags)) @@ -34,7 +36,7 @@ enum { static void *i915_sw_fence_debug_hint(void *addr) { - return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK); + return (void *)(((struct i915_sw_fence *)addr)->fn); } #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS @@ -126,10 +128,7 @@ static inline void debug_fence_assert(struct i915_sw_fence *fence) static int __i915_sw_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { - i915_sw_fence_notify_t fn; - - fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK); - return fn(fence, state); + return fence->fn(fence, state); } #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS @@ -242,10 +241,13 @@ void __i915_sw_fence_init(struct i915_sw_fence *fence, const char *name, struct lock_class_key *key) { - BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK); + BUG_ON(!fn); __init_waitqueue_head(&fence->wait, name, key); - fence->flags = (unsigned long)fn; + fence->fn = fn; +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG + fence->flags = 0; +#endif i915_sw_fence_reinit(fence); } @@ -257,7 +259,6 @@ void i915_sw_fence_reinit(struct i915_sw_fence *fence) atomic_set(&fence->pending, 1); fence->error = 0; - I915_SW_FENCE_BUG_ON(!fence->flags); I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head)); } @@ -279,6 +280,7 @@ static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, return 0; } +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence, const struct i915_sw_fence * const signaler) { @@ -322,9 +324,6 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, unsigned long flags; bool err; - if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG)) - return false; - spin_lock_irqsave(&i915_sw_fence_lock, flags); err = __i915_sw_fence_check_if_after(fence, signaler); __i915_sw_fence_clear_checked_bit(fence); @@ -332,6 +331,13 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, return err; } +#else +static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence, + const struct i915_sw_fence * const signaler) +{ + return false; +} +#endif static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h index 30a863353ee6..a7c603bc1b01 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.h +++ b/drivers/gpu/drm/i915/i915_sw_fence.h @@ -17,26 +17,27 @@ struct completion; struct dma_resv; +struct i915_sw_fence; + +enum i915_sw_fence_notify { + FENCE_COMPLETE, + FENCE_FREE +}; + +typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *, + enum i915_sw_fence_notify state); struct i915_sw_fence { wait_queue_head_t wait; + i915_sw_fence_notify_t fn; +#ifdef CONFIG_DRM_I915_SW_FENCE_CHECK_DAG unsigned long flags; +#endif atomic_t pending; int error; }; #define I915_SW_FENCE_CHECKED_BIT 0 /* used internally for DAG checking */ -#define I915_SW_FENCE_PRIVATE_BIT 1 /* available for use by owner */ -#define I915_SW_FENCE_MASK (~3) - -enum i915_sw_fence_notify { - FENCE_COMPLETE, - FENCE_FREE -}; - -typedef int (*i915_sw_fence_notify_t)(struct i915_sw_fence *, - enum i915_sw_fence_notify state); -#define __i915_sw_fence_call __aligned(4) void __i915_sw_fence_init(struct i915_sw_fence *fence, i915_sw_fence_notify_t fn, diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c index 5b33ef23d54c..d2e56b387993 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence_work.c +++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c @@ -23,7 +23,7 @@ static void fence_work(struct work_struct *work) dma_fence_put(&f->dma); } -static int __i915_sw_fence_call +static int fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct dma_fence_work *f = container_of(fence, typeof(*f), chain); diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c index de0e224b56ce..23777d500cdf 100644 --- a/drivers/gpu/drm/i915/i915_switcheroo.c +++ b/drivers/gpu/drm/i915/i915_switcheroo.c @@ -5,6 +5,7 @@ #include <linux/vga_switcheroo.h> +#include "i915_driver.h" #include "i915_drv.h" #include "i915_switcheroo.h" @@ -24,12 +25,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; /* i915 resume handler doesn't set to D0 */ pci_set_power_state(pdev, PCI_D0); - i915_resume_switcheroo(i915); + i915_driver_resume_switcheroo(i915); i915->drm.switch_power_state = DRM_SWITCH_POWER_ON; } else { drm_info(&i915->drm, "switched off\n"); i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; - i915_suspend_switcheroo(i915, pmm); + i915_driver_suspend_switcheroo(i915, pmm); i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF; } } diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 1804f4142740..59d441cedc75 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -279,7 +279,7 @@ static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribu struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); struct intel_rps *rps = &i915->gt.rps; - return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->boost_freq)); + return sysfs_emit(buf, "%d\n", intel_rps_get_boost_frequency(rps)); } static ssize_t gt_boost_freq_mhz_store(struct device *kdev, @@ -288,7 +288,6 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); struct intel_rps *rps = &dev_priv->gt.rps; - bool boost = false; ssize_t ret; u32 val; @@ -296,21 +295,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, if (ret) return ret; - /* Validate against (static) hardware limits */ - val = intel_freq_opcode(rps, val); - if (val < rps->min_freq || val > rps->max_freq) - return -EINVAL; - - mutex_lock(&rps->lock); - if (val != rps->boost_freq) { - rps->boost_freq = val; - boost = atomic_read(&rps->num_waiters); - } - mutex_unlock(&rps->lock); - if (boost) - schedule_work(&rps->work); + ret = intel_rps_set_boost_frequency(rps, val); - return count; + return ret ?: count; } static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 8104981a6604..6b8fb6ffe8da 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -288,7 +288,7 @@ TRACE_EVENT(vlv_fifo_size, /* plane updates */ -TRACE_EVENT(intel_update_plane, +TRACE_EVENT(intel_plane_update_noarm, TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), TP_ARGS(plane, crtc), @@ -317,7 +317,36 @@ TRACE_EVENT(intel_update_plane, DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) ); -TRACE_EVENT(intel_disable_plane, +TRACE_EVENT(intel_plane_update_arm, + TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), + TP_ARGS(plane, crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + __array(int, src, 4) + __array(int, dst, 4) + __string(name, plane->name) + ), + + TP_fast_assign( + __assign_str(name, plane->name); + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + memcpy(__entry->src, &plane->state->src, sizeof(__entry->src)); + memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst)); + ), + + TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT, + pipe_name(__entry->pipe), __get_str(name), + __entry->frame, __entry->scanline, + DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src), + DRM_RECT_ARG((const struct drm_rect *)__entry->dst)) +); + +TRACE_EVENT(intel_plane_disable_arm, TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc), TP_ARGS(plane, crtc), @@ -404,6 +433,48 @@ TRACE_EVENT(intel_fbc_nuke, /* pipe updates */ +TRACE_EVENT(intel_crtc_vblank_work_start, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + +TRACE_EVENT(intel_crtc_vblank_work_end, + TP_PROTO(struct intel_crtc *crtc), + TP_ARGS(crtc), + + TP_STRUCT__entry( + __field(enum pipe, pipe) + __field(u32, frame) + __field(u32, scanline) + ), + + TP_fast_assign( + __entry->pipe = crtc->pipe; + __entry->frame = intel_crtc_get_vblank_counter(crtc); + __entry->scanline = intel_get_crtc_scanline(crtc); + ), + + TP_printk("pipe %c, frame=%u, scanline=%u", + pipe_name(__entry->pipe), __entry->frame, + __entry->scanline) +); + TRACE_EVENT(intel_pipe_update_start, TP_PROTO(struct intel_crtc *crtc), TP_ARGS(crtc), diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index bef795e265a6..927f0d4f8e11 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -40,12 +40,12 @@ static struct kmem_cache *slab_vmas; -struct i915_vma *i915_vma_alloc(void) +static struct i915_vma *i915_vma_alloc(void) { return kmem_cache_zalloc(slab_vmas, GFP_KERNEL); } -void i915_vma_free(struct i915_vma *vma) +static void i915_vma_free(struct i915_vma *vma) { return kmem_cache_free(slab_vmas, vma); } @@ -113,7 +113,6 @@ vma_create(struct drm_i915_gem_object *obj, vma->vm = i915_vm_get(vm); vma->ops = &vm->vma_ops; vma->obj = obj; - vma->resv = obj->base.resv; vma->size = obj->base.size; vma->display_alignment = I915_GTT_MIN_ALIGNMENT; @@ -346,7 +345,7 @@ int i915_vma_wait_for_bind(struct i915_vma *vma) fence = dma_fence_get_rcu_safe(&vma->active.excl.fence); rcu_read_unlock(); if (fence) { - err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT); + err = dma_fence_wait(fence, true); dma_fence_put(fence); } } @@ -354,6 +353,32 @@ int i915_vma_wait_for_bind(struct i915_vma *vma) return err; } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +static int i915_vma_verify_bind_complete(struct i915_vma *vma) +{ + int err = 0; + + if (i915_active_has_exclusive(&vma->active)) { + struct dma_fence *fence = + i915_active_fence_get(&vma->active.excl); + + if (!fence) + return 0; + + if (dma_fence_is_signaled(fence)) + err = fence->error; + else + err = -EBUSY; + + dma_fence_put(fence); + } + + return err; +} +#else +#define i915_vma_verify_bind_complete(_vma) 0 +#endif + /** * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. * @vma: VMA to map @@ -423,11 +448,16 @@ int i915_vma_bind(struct i915_vma *vma, work->base.dma.error = 0; /* enable the queue_work() */ + __i915_gem_object_pin_pages(vma->obj); + work->pinned = i915_gem_object_get(vma->obj); + } else { if (vma->obj) { - __i915_gem_object_pin_pages(vma->obj); - work->pinned = i915_gem_object_get(vma->obj); + int ret; + + ret = i915_gem_object_wait_moving_fence(vma->obj, true); + if (ret) + return ret; } - } else { vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); } @@ -449,6 +479,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) GEM_BUG_ON(!i915_vma_is_ggtt(vma)); GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); + GEM_BUG_ON(i915_vma_verify_bind_complete(vma)); ptr = READ_ONCE(vma->iomap); if (ptr == NULL) { @@ -667,7 +698,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } color = 0; - if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) + if (i915_vm_has_cache_coloring(vma->vm)) color = vma->obj->cache_level; if (flags & PIN_OFFSET_FIXED) { @@ -792,17 +823,14 @@ unpinned: static int vma_get_pages(struct i915_vma *vma) { int err = 0; - bool pinned_pages = false; + bool pinned_pages = true; if (atomic_add_unless(&vma->pages_count, 1, 0)) return 0; - if (vma->obj) { - err = i915_gem_object_pin_pages(vma->obj); - if (err) - return err; - pinned_pages = true; - } + err = i915_gem_object_pin_pages(vma->obj); + if (err) + return err; /* Allocations ahoy! */ if (mutex_lock_interruptible(&vma->pages_mutex)) { @@ -835,8 +863,8 @@ static void __vma_put_pages(struct i915_vma *vma, unsigned int count) if (atomic_sub_return(count, &vma->pages_count) == 0) { vma->ops->clear_pages(vma); GEM_BUG_ON(vma->pages); - if (vma->obj) - i915_gem_object_unpin_pages(vma->obj); + + i915_gem_object_unpin_pages(vma->obj); } mutex_unlock(&vma->pages_mutex); } @@ -867,12 +895,13 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, u64 size, u64 alignment, u64 flags) { struct i915_vma_work *work = NULL; + struct dma_fence *moving = NULL; intel_wakeref_t wakeref = 0; unsigned int bound; int err; #ifdef CONFIG_PROVE_LOCKING - if (debug_locks && !WARN_ON(!ww) && vma->resv) + if (debug_locks && !WARN_ON(!ww)) assert_vma_held(vma); #endif @@ -892,7 +921,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, if (flags & PIN_GLOBAL) wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm); - if (flags & vma->vm->bind_async_flags) { + moving = vma->obj ? i915_gem_object_get_moving_fence(vma->obj) : NULL; + if (flags & vma->vm->bind_async_flags || moving) { /* lock VM */ err = i915_vm_lock_objects(vma->vm, ww); if (err) @@ -906,6 +936,8 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, work->vm = i915_vm_get(vma->vm); + dma_fence_work_chain(&work->base, moving); + /* Allocate enough page directories to used PTE */ if (vma->vm->allocate_va_range) { err = i915_vm_alloc_pt_stash(vma->vm, @@ -980,7 +1012,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!vma->pages); err = i915_vma_bind(vma, - vma->obj ? vma->obj->cache_level : 0, + vma->obj->cache_level, flags, work); if (err) goto err_remove; @@ -1010,7 +1042,10 @@ err_fence: err_rpm: if (wakeref) intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref); + if (moving) + dma_fence_put(moving); vma_put_pages(vma); + return err; } @@ -1034,7 +1069,7 @@ int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, GEM_BUG_ON(!i915_vma_is_ggtt(vma)); #ifdef CONFIG_LOCKDEP - WARN_ON(!ww && vma->resv && dma_resv_held(vma->resv)); + WARN_ON(!ww && dma_resv_held(vma->obj->base.resv)); #endif do { @@ -1113,6 +1148,7 @@ void i915_vma_reopen(struct i915_vma *vma) void i915_vma_release(struct kref *ref) { struct i915_vma *vma = container_of(ref, typeof(*vma), ref); + struct drm_i915_gem_object *obj = vma->obj; if (drm_mm_node_allocated(&vma->node)) { mutex_lock(&vma->vm->mutex); @@ -1123,15 +1159,11 @@ void i915_vma_release(struct kref *ref) } GEM_BUG_ON(i915_vma_is_active(vma)); - if (vma->obj) { - struct drm_i915_gem_object *obj = vma->obj; - - spin_lock(&obj->vma.lock); - list_del(&vma->obj_link); - if (!RB_EMPTY_NODE(&vma->obj_node)) - rb_erase(&vma->obj_node, &obj->vma.tree); - spin_unlock(&obj->vma.lock); - } + spin_lock(&obj->vma.lock); + list_del(&vma->obj_link); + if (!RB_EMPTY_NODE(&vma->obj_node)) + rb_erase(&vma->obj_node, &obj->vma.tree); + spin_unlock(&obj->vma.lock); __i915_vma_remove_closed(vma); i915_vm_put(vma->vm); @@ -1256,19 +1288,19 @@ int _i915_vma_move_to_active(struct i915_vma *vma, } if (fence) { - dma_resv_add_excl_fence(vma->resv, fence); + dma_resv_add_excl_fence(vma->obj->base.resv, fence); obj->write_domain = I915_GEM_DOMAIN_RENDER; obj->read_domains = 0; } } else { if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { - err = dma_resv_reserve_shared(vma->resv, 1); + err = dma_resv_reserve_shared(vma->obj->base.resv, 1); if (unlikely(err)) return err; } if (fence) { - dma_resv_add_shared_fence(vma->resv, fence); + dma_resv_add_shared_fence(vma->obj->base.resv, fence); obj->write_domain = 0; } } diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 648dbe744c96..4033aa08d5e4 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -234,16 +234,16 @@ static inline void __i915_vma_put(struct i915_vma *vma) kref_put(&vma->ref, i915_vma_release); } -#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv) +#define assert_vma_held(vma) dma_resv_assert_held((vma)->obj->base.resv) static inline void i915_vma_lock(struct i915_vma *vma) { - dma_resv_lock(vma->resv, NULL); + dma_resv_lock(vma->obj->base.resv, NULL); } static inline void i915_vma_unlock(struct i915_vma *vma) { - dma_resv_unlock(vma->resv); + dma_resv_unlock(vma->obj->base.resv); } int __must_check @@ -418,9 +418,6 @@ static inline void i915_vma_clear_scanout(struct i915_vma *vma) list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \ for_each_until(!i915_vma_is_ggtt(V)) -struct i915_vma *i915_vma_alloc(void); -void i915_vma_free(struct i915_vma *vma); - struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma); void i915_vma_make_shrinkable(struct i915_vma *vma); void i915_vma_make_purgeable(struct i915_vma *vma); diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.c b/drivers/gpu/drm/i915/i915_vma_snapshot.c new file mode 100644 index 000000000000..2949ceea9884 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_vma_snapshot.c @@ -0,0 +1,134 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include "i915_vma_snapshot.h" +#include "i915_vma_types.h" +#include "i915_vma.h" + +/** + * i915_vma_snapshot_init - Initialize a struct i915_vma_snapshot from + * a struct i915_vma. + * @vsnap: The i915_vma_snapshot to init. + * @vma: A struct i915_vma used to initialize @vsnap. + * @name: Name associated with the snapshot. The character pointer needs to + * stay alive over the lifitime of the shapsot + */ +void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name) +{ + if (!i915_vma_is_pinned(vma)) + assert_object_held(vma->obj); + + vsnap->name = name; + vsnap->size = vma->size; + vsnap->obj_size = vma->obj->base.size; + vsnap->gtt_offset = vma->node.start; + vsnap->gtt_size = vma->node.size; + vsnap->page_sizes = vma->page_sizes.gtt; + vsnap->pages = vma->pages; + vsnap->pages_rsgt = NULL; + vsnap->mr = NULL; + if (vma->obj->mm.rsgt) + vsnap->pages_rsgt = i915_refct_sgt_get(vma->obj->mm.rsgt); + vsnap->mr = vma->obj->mm.region; + kref_init(&vsnap->kref); + vsnap->vma_resource = &vma->active; + vsnap->onstack = false; + vsnap->present = true; +} + +/** + * i915_vma_snapshot_init_onstack - Initialize a struct i915_vma_snapshot from + * a struct i915_vma, but avoid kfreeing it on last put. + * @vsnap: The i915_vma_snapshot to init. + * @vma: A struct i915_vma used to initialize @vsnap. + * @name: Name associated with the snapshot. The character pointer needs to + * stay alive over the lifitime of the shapsot + */ +void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name) +{ + i915_vma_snapshot_init(vsnap, vma, name); + vsnap->onstack = true; +} + +static void vma_snapshot_release(struct kref *ref) +{ + struct i915_vma_snapshot *vsnap = + container_of(ref, typeof(*vsnap), kref); + + vsnap->present = false; + if (vsnap->pages_rsgt) + i915_refct_sgt_put(vsnap->pages_rsgt); + if (!vsnap->onstack) + kfree(vsnap); +} + +/** + * i915_vma_snapshot_put - Put an i915_vma_snapshot pointer reference + * @vsnap: The pointer reference + */ +void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap) +{ + kref_put(&vsnap->kref, vma_snapshot_release); +} + +/** + * i915_vma_snapshot_put_onstack - Put an onstcak i915_vma_snapshot pointer + * reference and varify that the structure is released + * @vsnap: The pointer reference + * + * This function is intended to be paired with a i915_vma_init_onstack() + * and should be called before exiting the scope that declared or + * freeing the structure that embedded @vsnap to verify that all references + * have been released. + */ +void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap) +{ + if (!kref_put(&vsnap->kref, vma_snapshot_release)) + GEM_BUG_ON(1); +} + +/** + * i915_vma_snapshot_resource_pin - Temporarily block the memory the + * vma snapshot is pointing to from being released. + * @vsnap: The vma snapshot. + * @lockdep_cookie: Pointer to bool needed for lockdep support. This needs + * to be passed to the paired i915_vma_snapshot_resource_unpin. + * + * This function will temporarily try to hold up a fence or similar structure + * and will therefore enter a fence signaling critical section. + * + * Return: true if we succeeded in blocking the memory from being released, + * false otherwise. + */ +bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap, + bool *lockdep_cookie) +{ + bool pinned = i915_active_acquire_if_busy(vsnap->vma_resource); + + if (pinned) + *lockdep_cookie = dma_fence_begin_signalling(); + + return pinned; +} + +/** + * i915_vma_snapshot_resource_unpin - Unblock vma snapshot memory from + * being released. + * @vsnap: The vma snapshot. + * @lockdep_cookie: Cookie returned from matching i915_vma_resource_pin(). + * + * Might leave a fence signalling critical section and signal a fence. + */ +void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap, + bool lockdep_cookie) +{ + dma_fence_end_signalling(lockdep_cookie); + + return i915_active_release(vsnap->vma_resource); +} diff --git a/drivers/gpu/drm/i915/i915_vma_snapshot.h b/drivers/gpu/drm/i915/i915_vma_snapshot.h new file mode 100644 index 000000000000..940581df4622 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_vma_snapshot.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ +#ifndef _I915_VMA_SNAPSHOT_H_ +#define _I915_VMA_SNAPSHOT_H_ + +#include <linux/kref.h> +#include <linux/slab.h> +#include <linux/types.h> + +struct i915_active; +struct i915_refct_sgt; +struct i915_vma; +struct intel_memory_region; +struct sg_table; + +/** + * DOC: Simple utilities for snapshotting GPU vma metadata, later used for + * error capture. Vi use a separate header for this to avoid issues due to + * recursive header includes. + */ + +/** + * struct i915_vma_snapshot - Snapshot of vma metadata. + * @size: The vma size in bytes. + * @obj_size: The size of the underlying object in bytes. + * @gtt_offset: The gtt offset the vma is bound to. + * @gtt_size: The size in bytes allocated for the vma in the GTT. + * @pages: The struct sg_table pointing to the pages bound. + * @pages_rsgt: The refcounted sg_table holding the reference for @pages if any. + * @mr: The memory region pointed for the pages bound. + * @kref: Reference for this structure. + * @vma_resource: FIXME: A means to keep the unbind fence from signaling. + * Temporarily while we have only sync unbinds, and still use the vma + * active, we use that. With async unbinding we need a signaling refcount + * for the unbind fence. + * @page_sizes: The vma GTT page sizes information. + * @onstack: Whether the structure shouldn't be freed on final put. + * @present: Whether the structure is present and initialized. + */ +struct i915_vma_snapshot { + const char *name; + size_t size; + size_t obj_size; + size_t gtt_offset; + size_t gtt_size; + struct sg_table *pages; + struct i915_refct_sgt *pages_rsgt; + struct intel_memory_region *mr; + struct kref kref; + struct i915_active *vma_resource; + u32 page_sizes; + bool onstack:1; + bool present:1; +}; + +void i915_vma_snapshot_init(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name); + +void i915_vma_snapshot_init_onstack(struct i915_vma_snapshot *vsnap, + struct i915_vma *vma, + const char *name); + +void i915_vma_snapshot_put(struct i915_vma_snapshot *vsnap); + +void i915_vma_snapshot_put_onstack(struct i915_vma_snapshot *vsnap); + +bool i915_vma_snapshot_resource_pin(struct i915_vma_snapshot *vsnap, + bool *lockdep_cookie); + +void i915_vma_snapshot_resource_unpin(struct i915_vma_snapshot *vsnap, + bool lockdep_cookie); + +/** + * i915_vma_snapshot_alloc - Allocate a struct i915_vma_snapshot + * @gfp: Allocation mode. + * + * Return: A pointer to a struct i915_vma_snapshot if successful. + * NULL otherwise. + */ +static inline struct i915_vma_snapshot *i915_vma_snapshot_alloc(gfp_t gfp) +{ + return kmalloc(sizeof(struct i915_vma_snapshot), gfp); +} + +/** + * i915_vma_snapshot_get - Take a reference on a struct i915_vma_snapshot + * + * Return: A pointer to a struct i915_vma_snapshot. + */ +static inline struct i915_vma_snapshot * +i915_vma_snapshot_get(struct i915_vma_snapshot *vsnap) +{ + kref_get(&vsnap->kref); + return vsnap; +} + +/** + * i915_vma_snapshot_present - Whether a struct i915_vma_snapshot is + * present and initialized. + * + * Return: true if present and initialized; false otherwise. + */ +static inline bool +i915_vma_snapshot_present(const struct i915_vma_snapshot *vsnap) +{ + return vsnap && vsnap->present; +} + +#endif diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h index 80e93bf00f2e..f03fa96a1701 100644 --- a/drivers/gpu/drm/i915/i915_vma_types.h +++ b/drivers/gpu/drm/i915/i915_vma_types.h @@ -97,11 +97,20 @@ enum i915_cache_level; struct intel_remapped_plane_info { /* in gtt pages */ - u32 offset; - u16 width; - u16 height; - u16 src_stride; - u16 dst_stride; + u32 offset:31; + u32 linear:1; + union { + /* in gtt pages for !linear */ + struct { + u16 width; + u16 height; + u16 src_stride; + u16 dst_stride; + }; + + /* in gtt pages for linear */ + u32 size; + }; } __packed; struct intel_remapped_info { @@ -178,7 +187,6 @@ struct i915_vma { const struct i915_vma_ops *ops; struct drm_i915_gem_object *obj; - struct dma_resv *resv; /** Alias of obj->resv */ struct sg_table *pages; void __iomem *iomap; diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index 305facedd284..e6605b5181a5 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -83,33 +83,26 @@ const char *intel_platform_name(enum intel_platform platform) return platform_names[platform]; } -static const char *iommu_name(void) -{ - const char *msg = "n/a"; - -#ifdef CONFIG_INTEL_IOMMU - msg = enableddisabled(intel_iommu_gfx_mapped); -#endif - - return msg; -} - void intel_device_info_print_static(const struct intel_device_info *info, struct drm_printer *p) { - if (info->graphics_rel) - drm_printf(p, "graphics version: %u.%02u\n", info->graphics_ver, info->graphics_rel); + if (info->graphics.rel) + drm_printf(p, "graphics version: %u.%02u\n", info->graphics.ver, + info->graphics.rel); + else + drm_printf(p, "graphics version: %u\n", info->graphics.ver); + + if (info->media.rel) + drm_printf(p, "media version: %u.%02u\n", info->media.ver, info->media.rel); else - drm_printf(p, "graphics version: %u\n", info->graphics_ver); + drm_printf(p, "media version: %u\n", info->media.ver); - if (info->media_rel) - drm_printf(p, "media version: %u.%02u\n", info->media_ver, info->media_rel); + if (info->display.rel) + drm_printf(p, "display version: %u.%02u\n", info->display.ver, info->display.rel); else - drm_printf(p, "media version: %u\n", info->media_ver); + drm_printf(p, "display version: %u\n", info->display.ver); - drm_printf(p, "display version: %u\n", info->display.ver); drm_printf(p, "gt: %d\n", info->gt); - drm_printf(p, "iommu: %s\n", iommu_name()); drm_printf(p, "memory-regions: %x\n", info->memory_regions); drm_printf(p, "page-sizes: %x\n", info->page_sizes); drm_printf(p, "platform: %s\n", intel_platform_name(info->platform)); @@ -369,7 +362,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->display.has_dsc = 0; } - if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active()) { + if (GRAPHICS_VER(dev_priv) == 6 && intel_vtd_active(dev_priv)) { drm_info(&dev_priv->drm, "Disabling ppGTT for VT-d support\n"); info->ppgtt_type = INTEL_PPGTT_NONE; diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 8e6f48d1eb7b..669f0d26c3c3 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -166,11 +166,14 @@ enum intel_ppgtt_type { func(overlay_needs_physical); \ func(supports_tv); +struct ip_version { + u8 ver; + u8 rel; +}; + struct intel_device_info { - u8 graphics_ver; - u8 graphics_rel; - u8 media_ver; - u8 media_rel; + struct ip_version graphics; + struct ip_version media; intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */ @@ -200,6 +203,7 @@ struct intel_device_info { struct { u8 ver; + u8 rel; #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_DISPLAY_FOR_EACH_FLAG(DEFINE_FLAG); diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index e7f7e6627750..b43121609e25 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -126,7 +126,6 @@ intel_memory_region_create(struct drm_i915_private *i915, goto err_free; } - kref_init(&mem->kref); return mem; err_free: @@ -144,28 +143,17 @@ void intel_memory_region_set_name(struct intel_memory_region *mem, va_end(ap); } -static void __intel_memory_region_destroy(struct kref *kref) +void intel_memory_region_destroy(struct intel_memory_region *mem) { - struct intel_memory_region *mem = - container_of(kref, typeof(*mem), kref); + int ret = 0; if (mem->ops->release) - mem->ops->release(mem); + ret = mem->ops->release(mem); + GEM_WARN_ON(!list_empty_careful(&mem->objects.list)); mutex_destroy(&mem->objects.lock); - kfree(mem); -} - -struct intel_memory_region * -intel_memory_region_get(struct intel_memory_region *mem) -{ - kref_get(&mem->kref); - return mem; -} - -void intel_memory_region_put(struct intel_memory_region *mem) -{ - kref_put(&mem->kref, __intel_memory_region_destroy); + if (!ret) + kfree(mem); } /* Global memory region registration -- only slight layer inversions! */ @@ -234,7 +222,7 @@ void intel_memory_regions_driver_release(struct drm_i915_private *i915) fetch_and_zero(&i915->mm.regions[i]); if (region) - intel_memory_region_put(region); + intel_memory_region_destroy(region); } } diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 3feae3353d33..5625c9c38993 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -6,7 +6,6 @@ #ifndef __INTEL_MEMORY_REGION_H__ #define __INTEL_MEMORY_REGION_H__ -#include <linux/kref.h> #include <linux/ioport.h> #include <linux/mutex.h> #include <linux/io-mapping.h> @@ -51,7 +50,7 @@ struct intel_memory_region_ops { unsigned int flags; int (*init)(struct intel_memory_region *mem); - void (*release)(struct intel_memory_region *mem); + int (*release)(struct intel_memory_region *mem); int (*init_object)(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, @@ -71,8 +70,6 @@ struct intel_memory_region { /* For fake LMEM */ struct drm_mm_node fake_mappable; - struct kref kref; - resource_size_t io_start; resource_size_t min_page_size; resource_size_t total; @@ -110,9 +107,7 @@ intel_memory_region_create(struct drm_i915_private *i915, u16 instance, const struct intel_memory_region_ops *ops); -struct intel_memory_region * -intel_memory_region_get(struct intel_memory_region *mem); -void intel_memory_region_put(struct intel_memory_region *mem); +void intel_memory_region_destroy(struct intel_memory_region *mem); int intel_memory_regions_hw_probe(struct drm_i915_private *i915); void intel_memory_regions_driver_release(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ecbb3d141632..cff0f32bedc9 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -37,6 +37,7 @@ #include "display/intel_bw.h" #include "display/intel_de.h" #include "display/intel_display_types.h" +#include "display/intel_fb.h" #include "display/intel_fbc.h" #include "display/intel_sprite.h" #include "display/skl_universal_plane.h" @@ -97,7 +98,7 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) * "Plane N strech max must be programmed to 11b (x1) * when Async flips are enabled on that plane." */ - if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active()) + if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active(dev_priv)) intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1); } @@ -160,7 +161,7 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0883: bxt */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_DISABLE_DUMMY0); + DPFC_DISABLE_DUMMY0); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) @@ -3062,9 +3063,9 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) * The BIOS provided WM memory latency values are often * inadequate for high resolution displays. Adjust them. */ - changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) | - ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) | - ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); + changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12); + changed |= ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12); if (!changed) return; @@ -3374,7 +3375,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv, * enabled sometime later. */ if (DISPLAY_VER(dev_priv) == 5 && !merged->fbc_wm_enabled && - intel_fbc_is_active(dev_priv)) { + intel_fbc_is_active(&dev_priv->fbc)) { for (level = 2; level <= max_level; level++) { struct intel_wm_level *wm = &merged->wm[level]; @@ -5094,6 +5095,18 @@ skl_check_nv12_wm_level(struct skl_wm_level *wm, struct skl_wm_level *uv_wm, } } +static bool icl_need_wm1_wa(struct drm_i915_private *i915, + enum plane_id plane_id) +{ + /* + * Wa_1408961008:icl, ehl + * Wa_14012656716:tgl, adl + * Underruns with WM1+ disabled + */ + return DISPLAY_VER(i915) == 11 || + (IS_DISPLAY_VER(i915, 12, 13) && plane_id == PLANE_CURSOR); +} + static int skl_allocate_plane_ddb(struct intel_atomic_state *state, struct intel_crtc *crtc) @@ -5264,11 +5277,7 @@ skl_allocate_plane_ddb(struct intel_atomic_state *state, skl_check_nv12_wm_level(&wm->wm[level], &wm->uv_wm[level], total[plane_id], uv_total[plane_id]); - /* - * Wa_1408961008:icl, ehl - * Underruns with WM1+ disabled - */ - if (DISPLAY_VER(dev_priv) == 11 && + if (icl_need_wm1_wa(dev_priv, plane_id) && level == 1 && wm->wm[0].enable) { wm->wm[level].blocks = wm->wm[0].blocks; wm->wm[level].lines = wm->wm[0].lines; @@ -7434,7 +7443,7 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) { /* Wa_1409120013:icl,ehl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, - ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + DPFC_CHICKEN_COMP_DUMMY_PIXEL); /*Wa_14010594013:icl, ehl */ intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, @@ -7447,7 +7456,7 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, - ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); + DPFC_CHICKEN_COMP_DUMMY_PIXEL); /* Wa_1409825376:tgl (pre-prod)*/ if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) @@ -7473,11 +7482,34 @@ static void dg1_init_clock_gating(struct drm_i915_private *dev_priv) gen12lp_init_clock_gating(dev_priv); /* Wa_1409836686:dg1[a0] */ - if (IS_DG1_GT_STEP(dev_priv, STEP_A0, STEP_B0)) + if (IS_DG1_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0)) intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) | DPT_GATING_DIS); } +static void xehpsdv_init_clock_gating(struct drm_i915_private *dev_priv) +{ + /* Wa_22010146351:xehpsdv */ + if (IS_XEHPSDV_GRAPHICS_STEP(dev_priv, STEP_A0, STEP_B0)) + intel_uncore_rmw(&dev_priv->uncore, XEHP_CLOCK_GATE_DIS, 0, SGR_DIS); +} + +static void dg2_init_clock_gating(struct drm_i915_private *i915) +{ + /* Wa_22010954014:dg2_g10 */ + if (IS_DG2_G10(i915)) + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, + SGSI_SIDECLK_DIS); + + /* + * Wa_14010733611:dg2_g10 + * Wa_22010146351:dg2_g10 + */ + if (IS_DG2_GRAPHICS_STEP(i915, G10, STEP_A0, STEP_B0)) + intel_uncore_rmw(&i915->uncore, XEHP_CLOCK_GATE_DIS, 0, + SGR_DIS | SGGI_DIS); +} + static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) { if (!HAS_PCH_CNP(dev_priv)) @@ -7509,7 +7541,7 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0873: cfl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); + DPFC_NUKE_ON_ANY_MODIFICATION); } static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7521,12 +7553,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) FBC_LLC_FULLY_OPEN); /* WaDisableSDEUnitClockGating:kbl */ - if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0)) intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* WaDisableGamClockGating:kbl */ - if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0)) + if (IS_KBL_GRAPHICS_STEP(dev_priv, 0, STEP_C0)) intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_GAMUNIT_CLOCK_GATE_DISABLE); @@ -7542,7 +7574,7 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0873: kbl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); + DPFC_NUKE_ON_ANY_MODIFICATION); } static void skl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7569,14 +7601,14 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv) * Display WA #0873: skl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_NUKE_ON_ANY_MODIFICATION); + DPFC_NUKE_ON_ANY_MODIFICATION); /* * WaFbcHighMemBwCorruptionAvoidance:skl * Display WA #0883: skl */ intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - ILK_DPFC_DISABLE_DUMMY0); + DPFC_DISABLE_DUMMY0); } static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7888,6 +7920,8 @@ static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = .init_clock_gating = platform##_init_clock_gating, \ } +CG_FUNCS(dg2); +CG_FUNCS(xehpsdv); CG_FUNCS(adlp); CG_FUNCS(dg1); CG_FUNCS(gen12lp); @@ -7924,7 +7958,11 @@ CG_FUNCS(nop); */ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { - if (IS_ALDERLAKE_P(dev_priv)) + if (IS_DG2(dev_priv)) + dev_priv->clock_gating_funcs = &dg2_clock_gating_funcs; + else if (IS_XEHPSDV(dev_priv)) + dev_priv->clock_gating_funcs = &xehpsdv_clock_gating_funcs; + else if (IS_ALDERLAKE_P(dev_priv)) dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs; else if (IS_DG1(dev_priv)) dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs; diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index 98c7339bf8ba..f2b888c16958 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -104,19 +104,50 @@ int intel_region_ttm_init(struct intel_memory_region *mem) * memory region, and if it was registered with the TTM device, * removes that registration. */ -void intel_region_ttm_fini(struct intel_memory_region *mem) +int intel_region_ttm_fini(struct intel_memory_region *mem) { - int ret; + struct ttm_resource_manager *man = mem->region_private; + int ret = -EBUSY; + int count; + + /* + * Put the region's move fences. This releases requests that + * may hold on to contexts and vms that may hold on to buffer + * objects placed in this region. + */ + if (man) + ttm_resource_manager_cleanup(man); + + /* Flush objects from region. */ + for (count = 0; count < 10; ++count) { + i915_gem_flush_free_objects(mem->i915); + + mutex_lock(&mem->objects.lock); + if (list_empty(&mem->objects.list)) + ret = 0; + mutex_unlock(&mem->objects.lock); + if (!ret) + break; + + msleep(20); + flush_delayed_work(&mem->i915->bdev.wq); + } + + /* If we leaked objects, Don't free the region causing use after free */ + if (ret || !man) + return ret; ret = i915_ttm_buddy_man_fini(&mem->i915->bdev, intel_region_to_ttm_type(mem)); GEM_WARN_ON(ret); mem->region_private = NULL; + + return ret; } /** - * intel_region_ttm_resource_to_st - Convert an opaque TTM resource manager resource - * to an sg_table. + * intel_region_ttm_resource_to_rsgt - + * Convert an opaque TTM resource manager resource to a refcounted sg_table. * @mem: The memory region. * @res: The resource manager resource obtained from the TTM resource manager. * @@ -126,17 +157,18 @@ void intel_region_ttm_fini(struct intel_memory_region *mem) * * Return: A malloced sg_table on success, an error pointer on failure. */ -struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem, - struct ttm_resource *res) +struct i915_refct_sgt * +intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, + struct ttm_resource *res) { if (mem->is_range_manager) { struct ttm_range_mgr_node *range_node = to_ttm_range_mgr_node(res); - return i915_sg_from_mm_node(&range_node->mm_nodes[0], - mem->region.start); + return i915_rsgt_from_mm_node(&range_node->mm_nodes[0], + mem->region.start); } else { - return i915_sg_from_buddy_resource(res, mem->region.start); + return i915_rsgt_from_buddy_resource(res, mem->region.start); } } diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h index 6f44075920f2..fdee5e7bd46c 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.h +++ b/drivers/gpu/drm/i915/intel_region_ttm.h @@ -20,10 +20,11 @@ void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv); int intel_region_ttm_init(struct intel_memory_region *mem); -void intel_region_ttm_fini(struct intel_memory_region *mem); +int intel_region_ttm_fini(struct intel_memory_region *mem); -struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem, - struct ttm_resource *res); +struct i915_refct_sgt * +intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, + struct ttm_resource *res); void intel_region_ttm_resource_free(struct intel_memory_region *mem, struct ttm_resource *res); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 0d85f3c5c526..22dab36afcb6 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -590,6 +590,9 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) pm_runtime_use_autosuspend(kdev); } + /* Enable by default */ + pm_runtime_allow(kdev); + /* * The core calls the driver load handler with an RPM reference held. * We drop that here and will reacquire it during unloading in diff --git a/drivers/gpu/drm/i915/intel_step.c b/drivers/gpu/drm/i915/intel_step.c index 6cf967631395..a4b16b9e2e55 100644 --- a/drivers/gpu/drm/i915/intel_step.c +++ b/drivers/gpu/drm/i915/intel_step.c @@ -23,7 +23,8 @@ * use a macro to define these to make it easier to identify the platforms * where the two steppings can deviate. */ -#define COMMON_STEP(x) .gt_step = STEP_##x, .display_step = STEP_##x +#define COMMON_STEP(x) .graphics_step = STEP_##x, .display_step = STEP_##x, .media_step = STEP_##x +#define COMMON_GT_MEDIA_STEP(x) .graphics_step = STEP_##x, .media_step = STEP_##x static const struct intel_step_info skl_revids[] = { [0x6] = { COMMON_STEP(G0) }, @@ -33,13 +34,13 @@ static const struct intel_step_info skl_revids[] = { }; static const struct intel_step_info kbl_revids[] = { - [1] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [2] = { .gt_step = STEP_C0, .display_step = STEP_B0 }, - [3] = { .gt_step = STEP_D0, .display_step = STEP_B0 }, - [4] = { .gt_step = STEP_F0, .display_step = STEP_C0 }, - [5] = { .gt_step = STEP_C0, .display_step = STEP_B1 }, - [6] = { .gt_step = STEP_D1, .display_step = STEP_B1 }, - [7] = { .gt_step = STEP_G0, .display_step = STEP_C0 }, + [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [2] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 }, + [3] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_B0 }, + [4] = { COMMON_GT_MEDIA_STEP(F0), .display_step = STEP_C0 }, + [5] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B1 }, + [6] = { COMMON_GT_MEDIA_STEP(D1), .display_step = STEP_B1 }, + [7] = { COMMON_GT_MEDIA_STEP(G0), .display_step = STEP_C0 }, }; static const struct intel_step_info bxt_revids[] = { @@ -63,16 +64,16 @@ static const struct intel_step_info jsl_ehl_revids[] = { }; static const struct intel_step_info tgl_uy_revids[] = { - [0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [1] = { .gt_step = STEP_B0, .display_step = STEP_C0 }, - [2] = { .gt_step = STEP_B1, .display_step = STEP_C0 }, - [3] = { .gt_step = STEP_C0, .display_step = STEP_D0 }, + [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 }, + [2] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 }, + [3] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 }, }; /* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */ static const struct intel_step_info tgl_revids[] = { - [0] = { .gt_step = STEP_A0, .display_step = STEP_B0 }, - [1] = { .gt_step = STEP_B0, .display_step = STEP_D0 }, + [0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 }, + [1] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_D0 }, }; static const struct intel_step_info rkl_revids[] = { @@ -87,38 +88,38 @@ static const struct intel_step_info dg1_revids[] = { }; static const struct intel_step_info adls_revids[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [0x1] = { .gt_step = STEP_A0, .display_step = STEP_A2 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0, .display_step = STEP_B0 }, - [0xC] = { .gt_step = STEP_D0, .display_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [0x1] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A2 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_B0 }, + [0xC] = { COMMON_GT_MEDIA_STEP(D0), .display_step = STEP_C0 }, }; static const struct intel_step_info adlp_revids[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 }, - [0xC] = { .gt_step = STEP_C0, .display_step = STEP_D0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 }, + [0xC] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_D0 }, }; static const struct intel_step_info xehpsdv_revids[] = { - [0x0] = { .gt_step = STEP_A0 }, - [0x1] = { .gt_step = STEP_A1 }, - [0x4] = { .gt_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0) }, + [0x1] = { COMMON_GT_MEDIA_STEP(A1) }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0) }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0) }, }; static const struct intel_step_info dg2_g10_revid_step_tbl[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 }, - [0x1] = { .gt_step = STEP_A1, .display_step = STEP_A0 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 }, - [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_A0 }, + [0x1] = { COMMON_GT_MEDIA_STEP(A1), .display_step = STEP_A0 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_B0 }, + [0x8] = { COMMON_GT_MEDIA_STEP(C0), .display_step = STEP_C0 }, }; static const struct intel_step_info dg2_g11_revid_step_tbl[] = { - [0x0] = { .gt_step = STEP_A0, .display_step = STEP_B0 }, - [0x4] = { .gt_step = STEP_B0, .display_step = STEP_C0 }, - [0x5] = { .gt_step = STEP_B1, .display_step = STEP_C0 }, + [0x0] = { COMMON_GT_MEDIA_STEP(A0), .display_step = STEP_B0 }, + [0x4] = { COMMON_GT_MEDIA_STEP(B0), .display_step = STEP_C0 }, + [0x5] = { COMMON_GT_MEDIA_STEP(B1), .display_step = STEP_C0 }, }; void intel_step_init(struct drm_i915_private *i915) @@ -179,7 +180,7 @@ void intel_step_init(struct drm_i915_private *i915) if (!revids) return; - if (revid < size && revids[revid].gt_step != STEP_NONE) { + if (revid < size && revids[revid].graphics_step != STEP_NONE) { step = revids[revid]; } else { drm_warn(&i915->drm, "Unknown revid 0x%02x\n", revid); @@ -192,7 +193,7 @@ void intel_step_init(struct drm_i915_private *i915) * steppings in the array are not monotonically increasing, but * it's better than defaulting to 0. */ - while (revid < size && revids[revid].gt_step == STEP_NONE) + while (revid < size && revids[revid].graphics_step == STEP_NONE) revid++; if (revid < size) { @@ -201,12 +202,12 @@ void intel_step_init(struct drm_i915_private *i915) step = revids[revid]; } else { drm_dbg(&i915->drm, "Using future steppings\n"); - step.gt_step = STEP_FUTURE; + step.graphics_step = STEP_FUTURE; step.display_step = STEP_FUTURE; } } - if (drm_WARN_ON(&i915->drm, step.gt_step == STEP_NONE)) + if (drm_WARN_ON(&i915->drm, step.graphics_step == STEP_NONE)) return; RUNTIME_INFO(i915)->step = step; diff --git a/drivers/gpu/drm/i915/intel_step.h b/drivers/gpu/drm/i915/intel_step.h index f6641e2a3c77..d71a99bd5179 100644 --- a/drivers/gpu/drm/i915/intel_step.h +++ b/drivers/gpu/drm/i915/intel_step.h @@ -11,8 +11,9 @@ struct drm_i915_private; struct intel_step_info { - u8 gt_step; + u8 graphics_step; u8 display_step; + u8 media_step; }; #define STEP_ENUM_VAL(name) STEP_##name, diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index e072054adac5..abdac78d3976 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -22,11 +22,11 @@ */ #include <linux/pm_runtime.h> -#include <asm/iosf_mbi.h> #include "gt/intel_lrc_reg.h" /* for shadow reg list */ #include "i915_drv.h" +#include "i915_iosf_mbi.h" #include "i915_trace.h" #include "i915_vgpu.h" #include "intel_pm.h" @@ -2020,7 +2020,7 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb, return NOTIFY_OK; } -static int uncore_mmio_setup(struct intel_uncore *uncore) +int intel_uncore_setup_mmio(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; struct pci_dev *pdev = to_pci_dev(i915->drm.dev); @@ -2053,7 +2053,7 @@ static int uncore_mmio_setup(struct intel_uncore *uncore) return 0; } -static void uncore_mmio_cleanup(struct intel_uncore *uncore) +void intel_uncore_cleanup_mmio(struct intel_uncore *uncore) { struct pci_dev *pdev = to_pci_dev(uncore->i915->drm.dev); @@ -2146,10 +2146,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) struct drm_i915_private *i915 = uncore->i915; int ret; - ret = uncore_mmio_setup(uncore); - if (ret) - return ret; - /* * The boot firmware initializes local memory and assesses its health. * If memory training fails, the punit will have been instructed to @@ -2170,7 +2166,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) } else { ret = uncore_forcewake_init(uncore); if (ret) - goto out_mmio_cleanup; + return ret; } /* make sure fw funcs are set if and only if we have fw*/ @@ -2192,11 +2188,6 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n"); return 0; - -out_mmio_cleanup: - uncore_mmio_cleanup(uncore); - - return ret; } /* @@ -2261,8 +2252,6 @@ void intel_uncore_fini_mmio(struct intel_uncore *uncore) intel_uncore_fw_domains_fini(uncore); iosf_mbi_punit_release(); } - - uncore_mmio_cleanup(uncore); } static const struct reg_whitelist { diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 3248e4e2c540..d1d17b04e29f 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -218,11 +218,13 @@ void intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug); void intel_uncore_init_early(struct intel_uncore *uncore, struct drm_i915_private *i915); +int intel_uncore_setup_mmio(struct intel_uncore *uncore); int intel_uncore_init_mmio(struct intel_uncore *uncore); void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore, struct intel_gt *gt); bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore); bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore); +void intel_uncore_cleanup_mmio(struct intel_uncore *uncore); void intel_uncore_fini_mmio(struct intel_uncore *uncore); void intel_uncore_suspend(struct intel_uncore *uncore); void intel_uncore_resume_early(struct intel_uncore *uncore); diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c index 23fd86de5a24..6a7d4e2ee138 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c @@ -7,26 +7,29 @@ #include "intel_pxp_irq.h" #include "intel_pxp_pm.h" #include "intel_pxp_session.h" +#include "i915_drv.h" -void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime) +void intel_pxp_suspend_prepare(struct intel_pxp *pxp) { if (!intel_pxp_is_enabled(pxp)) return; pxp->arb_is_valid = false; - /* - * Contexts using protected objects keep a runtime PM reference, so we - * can only runtime suspend when all of them have been either closed - * or banned. Therefore, there is no need to invalidate in that - * scenario. - */ - if (!runtime) - intel_pxp_invalidate(pxp); + intel_pxp_invalidate(pxp); +} - intel_pxp_fini_hw(pxp); +void intel_pxp_suspend(struct intel_pxp *pxp) +{ + intel_wakeref_t wakeref; - pxp->hw_state_invalidated = false; + if (!intel_pxp_is_enabled(pxp)) + return; + + with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) { + intel_pxp_fini_hw(pxp); + pxp->hw_state_invalidated = false; + } } void intel_pxp_resume(struct intel_pxp *pxp) @@ -44,3 +47,15 @@ void intel_pxp_resume(struct intel_pxp *pxp) intel_pxp_init_hw(pxp); } + +void intel_pxp_runtime_suspend(struct intel_pxp *pxp) +{ + if (!intel_pxp_is_enabled(pxp)) + return; + + pxp->arb_is_valid = false; + + intel_pxp_fini_hw(pxp); + + pxp->hw_state_invalidated = false; +} diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h index c89e97a0c3d0..16990a3f2f85 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h @@ -9,16 +9,29 @@ #include "intel_pxp_types.h" #ifdef CONFIG_DRM_I915_PXP -void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime); +void intel_pxp_suspend_prepare(struct intel_pxp *pxp); +void intel_pxp_suspend(struct intel_pxp *pxp); void intel_pxp_resume(struct intel_pxp *pxp); +void intel_pxp_runtime_suspend(struct intel_pxp *pxp); #else -static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime) +static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp) +{ +} + +static inline void intel_pxp_suspend(struct intel_pxp *pxp) { } static inline void intel_pxp_resume(struct intel_pxp *pxp) { } -#endif +static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp) +{ +} +#endif +static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp) +{ + intel_pxp_resume(pxp); +} #endif /* __INTEL_PXP_PM_H__ */ diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c index d02732f04757..598840b73dfa 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c @@ -3,7 +3,8 @@ * Copyright(c) 2020, Intel Corporation. All rights reserved. */ -#include "drm/i915_drm.h" +#include <drm/i915_drm.h> + #include "i915_drv.h" #include "intel_pxp.h" diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c index 49508f31dcb7..5d169624ad60 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c @@ -4,8 +4,10 @@ */ #include <linux/component.h> -#include "drm/i915_pxp_tee_interface.h" -#include "drm/i915_component.h" + +#include <drm/i915_pxp_tee_interface.h> +#include <drm/i915_component.h> + #include "i915_drv.h" #include "intel_pxp.h" #include "intel_pxp_session.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index f99bb0113726..7e0658a77659 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -442,6 +442,7 @@ static int igt_evict_contexts(void *arg) /* Overfill the GGTT with context objects and so try to evict one. */ for_each_engine(engine, gt, id) { struct i915_sw_fence fence; + struct i915_request *last = NULL; count = 0; onstack_fence_init(&fence); @@ -479,6 +480,9 @@ static int igt_evict_contexts(void *arg) i915_request_add(rq); count++; + if (last) + i915_request_put(last); + last = i915_request_get(rq); err = 0; } while(1); onstack_fence_fini(&fence); @@ -486,6 +490,21 @@ static int igt_evict_contexts(void *arg) count, engine->name); if (err) break; + if (last) { + if (i915_request_wait(last, 0, HZ) < 0) { + err = -EIO; + i915_request_put(last); + pr_err("Failed waiting for last request (on %s)", + engine->name); + break; + } + i915_request_put(last); + } + err = intel_gt_wait_for_idle(engine->gt, HZ * 3); + if (err) { + pr_err("Failed to idle GT (on %s)", engine->name); + break; + } } mutex_lock(&ggtt->vm.mutex); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index d67710d10615..9979ef9197cd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -209,6 +209,10 @@ static int igt_request_rewind(void *arg) int err = -EINVAL; ctx[0] = mock_context(i915, "A"); + if (!ctx[0]) { + err = -ENOMEM; + goto err_ctx_0; + } ce = i915_gem_context_get_engine(ctx[0], RCS0); GEM_BUG_ON(IS_ERR(ce)); @@ -223,6 +227,10 @@ static int igt_request_rewind(void *arg) i915_request_add(request); ctx[1] = mock_context(i915, "B"); + if (!ctx[1]) { + err = -ENOMEM; + goto err_ctx_1; + } ce = i915_gem_context_get_engine(ctx[1], RCS0); GEM_BUG_ON(IS_ERR(ce)); @@ -261,9 +269,11 @@ err: i915_request_put(vip); err_context_1: mock_context_close(ctx[1]); +err_ctx_1: i915_request_put(request); err_context_0: mock_context_close(ctx[0]); +err_ctx_0: mock_device_flush(i915); return err; } @@ -2805,7 +2815,7 @@ static int p_sync0(void *arg) i915_request_add(rq); err = 0; - if (i915_request_wait(rq, 0, HZ / 5) < 0) + if (i915_request_wait(rq, 0, HZ) < 0) err = -ETIME; i915_request_put(rq); if (err) @@ -2876,7 +2886,7 @@ static int p_sync1(void *arg) i915_request_add(rq); err = 0; - if (prev && i915_request_wait(prev, 0, HZ / 5) < 0) + if (prev && i915_request_wait(prev, 0, HZ) < 0) err = -ETIME; i915_request_put(prev); prev = rq; diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c index cbf45d85cbff..daa985e5a19b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c @@ -28,7 +28,7 @@ #include "../i915_selftest.h" -static int __i915_sw_fence_call +static int fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { switch (state) { diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c index 9f8590b868a9..a2838c65f8a5 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.c +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c @@ -36,7 +36,7 @@ void igt_global_reset_unlock(struct intel_gt *gt) enum intel_engine_id id; for_each_engine(engine, gt, id) - clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, >->reset.flags); clear_bit(I915_RESET_BACKOFF, >->reset.flags); wake_up_all(>->reset.queue); diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 418caae84759..0d5df0dc7212 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -225,7 +225,7 @@ static int igt_mock_reserve(void *arg) out_close: close_objects(mem, &objects); - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); out_free_order: kfree(order); return err; @@ -439,7 +439,7 @@ static int igt_mock_splintered_region(void *arg) out_close: close_objects(mem, &objects); out_put: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return err; } @@ -507,7 +507,7 @@ static int igt_mock_max_segment(void *arg) out_close: close_objects(mem, &objects); out_put: - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); return err; } @@ -1196,7 +1196,7 @@ int intel_memory_region_mock_selftests(void) err = i915_subtests(tests, mem); - intel_memory_region_put(mem); + intel_memory_region_destroy(mem); out_unref: mock_destroy_device(i915); return err; diff --git a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c index 080b90b63d16..bf2752cc1e0b 100644 --- a/drivers/gpu/drm/i915/selftests/lib_sw_fence.c +++ b/drivers/gpu/drm/i915/selftests/lib_sw_fence.c @@ -26,7 +26,7 @@ /* Small library of different fence types useful for writing tests */ -static int __i915_sw_fence_call +static int nop_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { return NOTIFY_DONE; @@ -41,12 +41,12 @@ void __onstack_fence_init(struct i915_sw_fence *fence, __init_waitqueue_head(&fence->wait, name, key); atomic_set(&fence->pending, 1); fence->error = 0; - fence->flags = (unsigned long)nop_fence_notify; + fence->fn = nop_fence_notify; } void onstack_fence_fini(struct i915_sw_fence *fence) { - if (!fence->flags) + if (!fence->fn) return; i915_sw_fence_commit(fence); @@ -89,7 +89,7 @@ struct heap_fence { }; }; -static int __i915_sw_fence_call +static int heap_fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) { struct heap_fence *h = container_of(fence, typeof(*h), fence); diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 4f8180146888..d0e2e61de8d4 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -165,7 +165,7 @@ struct drm_i915_private *mock_gem_device(void) /* Using the global GTT may ask questions about KMS users, so prepare */ drm_mode_config_init(&i915->drm); - mkwrite_device_info(i915)->graphics_ver = -1; + mkwrite_device_info(i915)->graphics.ver = -1; mkwrite_device_info(i915)->page_sizes = I915_GTT_PAGE_SIZE_4K | @@ -177,6 +177,8 @@ struct drm_i915_private *mock_gem_device(void) mock_uncore_init(&i915->uncore, i915); + spin_lock_init(&i915->gpu_error.lock); + i915_gem_init__mm(i915); intel_gt_init_early(&i915->gt, i915); atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 75793008c4ef..19bff8afcaaa 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -15,9 +15,9 @@ static void mock_region_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { + i915_refct_sgt_put(obj->mm.rsgt); + obj->mm.rsgt = NULL; intel_region_ttm_resource_free(obj->mm.region, obj->mm.res); - sg_free_table(pages); - kfree(pages); } static int mock_region_get_pages(struct drm_i915_gem_object *obj) @@ -36,12 +36,14 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj) if (IS_ERR(obj->mm.res)) return PTR_ERR(obj->mm.res); - pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res); - if (IS_ERR(pages)) { - err = PTR_ERR(pages); + obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, + obj->mm.res); + if (IS_ERR(obj->mm.rsgt)) { + err = PTR_ERR(obj->mm.rsgt); goto err_free_resource; } + pages = &obj->mm.rsgt->table; __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl)); return 0; @@ -82,13 +84,16 @@ static int mock_object_init(struct intel_memory_region *mem, return 0; } -static void mock_region_fini(struct intel_memory_region *mem) +static int mock_region_fini(struct intel_memory_region *mem) { struct drm_i915_private *i915 = mem->i915; int instance = mem->instance; + int ret; - intel_region_ttm_fini(mem); + ret = intel_region_ttm_fini(mem); ida_free(&i915->selftest.mock_region_instances, instance); + + return ret; } static const struct intel_memory_region_ops mock_region_ops = { diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c index 35380738a951..ed2ac5752ac4 100644 --- a/drivers/gpu/drm/i915/vlv_sideband.c +++ b/drivers/gpu/drm/i915/vlv_sideband.c @@ -3,9 +3,8 @@ * Copyright © 2013-2021 Intel Corporation */ -#include <asm/iosf_mbi.h> - #include "i915_drv.h" +#include "i915_iosf_mbi.h" #include "vlv_sideband.h" /* diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index 36c990589427..02cef0cea657 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -4,6 +4,7 @@ #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/clk.h> +#include <linux/slab.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index ae11061727ff..39197b4beea7 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -4,8 +4,8 @@ config DRM_MSM tristate "MSM DRM" depends on DRM depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST + depends on COMMON_CLK depends on IOMMU_SUPPORT - depends on (OF && COMMON_CLK) || COMPILE_TEST depends on QCOM_OCMEM || QCOM_OCMEM=n depends on QCOM_LLCC || QCOM_LLCC=n depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile index 40577f8856d8..093454457545 100644 --- a/drivers/gpu/drm/msm/Makefile +++ b/drivers/gpu/drm/msm/Makefile @@ -23,8 +23,10 @@ msm-y := \ hdmi/hdmi_i2c.o \ hdmi/hdmi_phy.o \ hdmi/hdmi_phy_8960.o \ + hdmi/hdmi_phy_8996.o \ hdmi/hdmi_phy_8x60.o \ hdmi/hdmi_phy_8x74.o \ + hdmi/hdmi_pll_8960.o \ edp/edp.o \ edp/edp_aux.o \ edp/edp_bridge.o \ @@ -37,6 +39,7 @@ msm-y := \ disp/mdp4/mdp4_dtv_encoder.o \ disp/mdp4/mdp4_lcdc_encoder.o \ disp/mdp4/mdp4_lvds_connector.o \ + disp/mdp4/mdp4_lvds_pll.o \ disp/mdp4/mdp4_irq.o \ disp/mdp4/mdp4_kms.o \ disp/mdp4/mdp4_plane.o \ @@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \ dp/dp_audio.o msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o -msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o -msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o -msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 267a880811d6..78aad5216a61 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) { struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; - u32 gpu_scid, cntl1_regval = 0; + u32 cntl1_regval = 0; if (IS_ERR(a6xx_gpu->llc_mmio)) return; if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { - gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); gpu_scid &= 0x1f; cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | (gpu_scid << 15) | (gpu_scid << 20); + + /* On A660, the SCID programming for UCHE traffic is done in + * A6XX_GBIF_SCACHE_CNTL0[14:10] + */ + if (adreno_is_a660_family(adreno_gpu)) + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | + (1 << 8), (gpu_scid << 10) | (1 << 8)); } /* @@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) } gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); - - /* On A660, the SCID programming for UCHE traffic is done in - * A6XX_GBIF_SCACHE_CNTL0[14:10] - */ - if (adreno_is_a660_family(adreno_gpu)) - gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | - (1 << 8), (gpu_scid << 10) | (1 << 8)); } static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) @@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu) return (unsigned long)busy_time; } -void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) +static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 7501849ed15d..6e90209cd543 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu, struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); a6xx_state->gmu_registers = state_kcalloc(a6xx_state, - 2, sizeof(*a6xx_state->gmu_registers)); + 3, sizeof(*a6xx_state->gmu_registers)); if (!a6xx_state->gmu_registers) return; - a6xx_state->nr_gmu_registers = 2; + a6xx_state->nr_gmu_registers = 3; /* Get the CX GMU registers from AHB */ _a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0], diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c index eb40d8413bca..6d36f63c3338 100644 --- a/drivers/gpu/drm/msm/dp/dp_aux.c +++ b/drivers/gpu/drm/msm/dp/dp_aux.c @@ -33,6 +33,7 @@ struct dp_aux_private { bool read; bool no_send_addr; bool no_send_stop; + bool initted; u32 offset; u32 segment; @@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, } mutex_lock(&aux->mutex); + if (!aux->initted) { + ret = -EIO; + goto exit; + } dp_aux_update_offset_and_segment(aux, msg); dp_aux_transfer_helper(aux, msg, true); @@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, } aux->cmd_busy = false; + +exit: mutex_unlock(&aux->mutex); return ret; @@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux) aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + mutex_lock(&aux->mutex); + dp_catalog_aux_enable(aux->catalog, true); aux->retry_cnt = 0; + aux->initted = true; + + mutex_unlock(&aux->mutex); } void dp_aux_deinit(struct drm_dp_aux *dp_aux) @@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux) aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + mutex_lock(&aux->mutex); + + aux->initted = false; dp_catalog_aux_enable(aux->catalog, false); + + mutex_unlock(&aux->mutex); } int dp_aux_register(struct drm_dp_aux *dp_aux) diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index 4c7b6944fc0d..a6893cc45fe4 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -1664,6 +1664,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, if (!prop) { DRM_DEV_DEBUG(dev, "failed to find data lane mapping, using default\n"); + /* Set the number of date lanes to 4 by default. */ + msm_host->num_data_lanes = 4; return 0; } diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 09d2d279c30a..dee13fedee3b 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file) goto free_priv; pm_runtime_get_sync(&gpu->pdev->dev); + msm_gpu_hw_init(gpu); show_priv->state = gpu->funcs->gpu_state_get(gpu); pm_runtime_put_sync(&gpu->pdev->dev); diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 7936e8d498dd..892c04365239 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data, return ret; } -static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, - struct drm_file *file) +static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id, + ktime_t timeout) { - struct msm_drm_private *priv = dev->dev_private; - struct drm_msm_wait_fence *args = data; - ktime_t timeout = to_ktime(args->timeout); - struct msm_gpu_submitqueue *queue; - struct msm_gpu *gpu = priv->gpu; struct dma_fence *fence; int ret; - if (args->pad) { - DRM_ERROR("invalid pad: %08x\n", args->pad); + if (fence_id > queue->last_fence) { + DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n", + fence_id, queue->last_fence); return -EINVAL; } - if (!gpu) - return 0; - - queue = msm_submitqueue_get(file->driver_priv, args->queueid); - if (!queue) - return -ENOENT; - /* * Map submitqueue scoped "seqno" (which is actually an idr key) * back to underlying dma-fence @@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, ret = mutex_lock_interruptible(&queue->lock); if (ret) return ret; - fence = idr_find(&queue->fence_idr, args->fence); + fence = idr_find(&queue->fence_idr, fence_id); if (fence) fence = dma_fence_get_rcu(fence); mutex_unlock(&queue->lock); @@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, } dma_fence_put(fence); + + return ret; +} + +static int msm_ioctl_wait_fence(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_msm_wait_fence *args = data; + struct msm_gpu_submitqueue *queue; + int ret; + + if (args->pad) { + DRM_ERROR("invalid pad: %08x\n", args->pad); + return -EINVAL; + } + + if (!priv->gpu) + return 0; + + queue = msm_submitqueue_get(file->driver_priv, args->queueid); + if (!queue) + return -ENOENT; + + ret = wait_fence(queue, args->fence, to_ktime(args->timeout)); + msm_submitqueue_put(queue); return ret; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 2916480d9115..02b9ae65a96a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1029,8 +1029,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct { struct msm_gem_object *msm_obj = to_msm_bo(obj); - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags)); return 0; @@ -1094,7 +1093,7 @@ static int msm_gem_new_impl(struct drm_device *dev, break; fallthrough; default: - DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", + DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n", (flags & MSM_BO_CACHE_MASK)); return -EINVAL; } diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 4a1420b05e97..086dacf2f26a 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -5,6 +5,7 @@ */ #include <linux/vmalloc.h> +#include <linux/sched/mm.h> #include "msm_drv.h" #include "msm_gem.h" diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 3cb029f10925..282628d6b72c 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, args->nr_cmds); if (IS_ERR(submit)) { ret = PTR_ERR(submit); + submit = NULL; goto out_unlock; } @@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, drm_sched_entity_push_job(&submit->base); args->fence = submit->fence_id; + queue->last_fence = submit->fence_id; msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs); msm_process_post_deps(post_deps, args->nr_out_syncobjs, diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 59cdd00b69d0..48ea2de911f1 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, * @ring_nr: the ringbuffer used by this submitqueue, which is determined * by the submitqueue's priority * @faults: the number of GPU hangs associated with this submitqueue + * @last_fence: the sequence number of the last allocated fence (for error + * checking) * @ctx: the per-drm_file context associated with the submitqueue (ie. * which set of pgtables do submits jobs associated with the * submitqueue use) @@ -374,6 +376,7 @@ struct msm_gpu_submitqueue { u32 flags; u32 ring_nr; int faults; + uint32_t last_fence; struct msm_file_private *ctx; struct list_head node; struct idr fence_idr; diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c index 8b7473f69cb8..384e90c4b2a7 100644 --- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c +++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c @@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, struct msm_gpu *gpu = dev_to_gpu(dev); struct dev_pm_opp *opp; + /* + * Note that devfreq_recommended_opp() can modify the freq + * to something that actually is in the opp table: + */ opp = devfreq_recommended_opp(dev, freq, flags); /* @@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq, */ if (gpu->devfreq.idle_freq) { gpu->devfreq.idle_freq = *freq; + dev_pm_opp_put(opp); return 0; } @@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work) struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq); unsigned long idle_freq, target_freq = 0; - if (!df->devfreq) - return; - /* * Hold devfreq lock to synchronize with get_dev_status()/ * target() callbacks @@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu) { struct msm_gpu_devfreq *df = &gpu->devfreq; + if (!df->devfreq) + return; + msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1), - HRTIMER_MODE_ABS); + HRTIMER_MODE_REL); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index b51d690f375f..88d262ba648c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2627,6 +2627,27 @@ nv174_chipset = { }; static const struct nvkm_device_chip +nv176_chipset = { + .name = "GA106", + .bar = { 0x00000001, tu102_bar_new }, + .bios = { 0x00000001, nvkm_bios_new }, + .devinit = { 0x00000001, ga100_devinit_new }, + .fb = { 0x00000001, ga102_fb_new }, + .gpio = { 0x00000001, ga102_gpio_new }, + .i2c = { 0x00000001, gm200_i2c_new }, + .imem = { 0x00000001, nv50_instmem_new }, + .mc = { 0x00000001, ga100_mc_new }, + .mmu = { 0x00000001, tu102_mmu_new }, + .pci = { 0x00000001, gp100_pci_new }, + .privring = { 0x00000001, gm200_privring_new }, + .timer = { 0x00000001, gk20a_timer_new }, + .top = { 0x00000001, ga100_top_new }, + .disp = { 0x00000001, ga102_disp_new }, + .dma = { 0x00000001, gv100_dma_new }, + .fifo = { 0x00000001, ga102_fifo_new }, +}; + +static const struct nvkm_device_chip nv177_chipset = { .name = "GA107", .bar = { 0x00000001, tu102_bar_new }, @@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x168: device->chip = &nv168_chipset; break; case 0x172: device->chip = &nv172_chipset; break; case 0x174: device->chip = &nv174_chipset; break; + case 0x176: device->chip = &nv176_chipset; break; case 0x177: device->chip = &nv177_chipset; break; default: if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c index 6e3c450eaace..3ff49344abc7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigv100.c @@ -62,7 +62,6 @@ gv100_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet, nvkm_wr32(device, 0x6f0108 + hdmi, vendor_infoframe.header); nvkm_wr32(device, 0x6f010c + hdmi, vendor_infoframe.subpack0_low); nvkm_wr32(device, 0x6f0110 + hdmi, vendor_infoframe.subpack0_high); - nvkm_wr32(device, 0x6f0110 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f0114 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f0118 + hdmi, 0x00000000); nvkm_wr32(device, 0x6f011c + hdmi, 0x00000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c index cdb1ead26d84..82b4c8e1457c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gm200.c @@ -207,11 +207,13 @@ int gm200_acr_wpr_parse(struct nvkm_acr *acr) { const struct wpr_header *hdr = (void *)acr->wpr_fw->data; + struct nvkm_acr_lsfw *lsfw; while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) { wpr_header_dump(&acr->subdev, hdr); - if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) - return -ENOMEM; + lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); } return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c index fb9132a39bb1..fd97a935a380 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/gp102.c @@ -161,11 +161,13 @@ int gp102_acr_wpr_parse(struct nvkm_acr *acr) { const struct wpr_header_v1 *hdr = (void *)acr->wpr_fw->data; + struct nvkm_acr_lsfw *lsfw; while (hdr->falcon_id != WPR_HEADER_V1_FALCON_ID_INVALID) { wpr_header_v1_dump(&acr->subdev, hdr); - if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id)) - return -ENOMEM; + lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id); + if (IS_ERR(lsfw)) + return PTR_ERR(lsfw); } return 0; diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 482fb0ae6cb5..7afe28408085 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -168,7 +168,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) if (!r) { acpi_status = radeon_acpi_init(rdev); if (acpi_status) - dev_dbg(dev->dev, "Error during ACPI methods call\n"); + dev_dbg(dev->dev, "Error during ACPI methods call\n"); } if (radeon_is_px(dev)) { diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index 71a4611e1557..f6e6a6d5d987 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -4,8 +4,6 @@ config DRM_RCAR_DU depends on DRM && OF depends on ARM || ARM64 depends on ARCH_RENESAS || COMPILE_TEST - imply DRM_RCAR_CMM - imply DRM_RCAR_LVDS select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER select VIDEOMODE_HELPERS @@ -13,13 +11,17 @@ config DRM_RCAR_DU Choose this option if you have an R-Car chipset. If M is selected the module will be called rcar-du-drm. -config DRM_RCAR_CMM - tristate "R-Car DU Color Management Module (CMM) Support" - depends on DRM && OF +config DRM_RCAR_USE_CMM + bool "R-Car DU Color Management Module (CMM) Support" depends on DRM_RCAR_DU + default DRM_RCAR_DU help Enable support for R-Car Color Management Module (CMM). +config DRM_RCAR_CMM + def_tristate DRM_RCAR_DU + depends on DRM_RCAR_USE_CMM + config DRM_RCAR_DW_HDMI tristate "R-Car Gen3 and RZ/G2 DU HDMI Encoder Support" depends on DRM && OF @@ -27,15 +29,27 @@ config DRM_RCAR_DW_HDMI help Enable support for R-Car Gen3 or RZ/G2 internal HDMI encoder. +config DRM_RCAR_USE_LVDS + bool "R-Car DU LVDS Encoder Support" + depends on DRM_BRIDGE && OF + default DRM_RCAR_DU + help + Enable support for the R-Car Display Unit embedded LVDS encoders. + config DRM_RCAR_LVDS - tristate "R-Car DU LVDS Encoder Support" - depends on DRM && DRM_BRIDGE && OF + def_tristate DRM_RCAR_DU + depends on DRM_RCAR_USE_LVDS select DRM_KMS_HELPER select DRM_PANEL select OF_FLATTREE select OF_OVERLAY + +config DRM_RCAR_MIPI_DSI + tristate "R-Car DU MIPI DSI Encoder Support" + depends on DRM && DRM_BRIDGE && OF + select DRM_MIPI_DSI help - Enable support for the R-Car Display Unit embedded LVDS encoders. + Enable support for the R-Car Display Unit embedded MIPI DSI encoders. config DRM_RCAR_VSP bool "R-Car DU VSP Compositor Support" if ARM diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile index 4d1187ccc3e5..286bc81b3e7c 100644 --- a/drivers/gpu/drm/rcar-du/Makefile +++ b/drivers/gpu/drm/rcar-du/Makefile @@ -19,6 +19,7 @@ obj-$(CONFIG_DRM_RCAR_CMM) += rcar_cmm.o obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o obj-$(CONFIG_DRM_RCAR_LVDS) += rcar_lvds.o +obj-$(CONFIG_DRM_RCAR_MIPI_DSI) += rcar_mipi_dsi.o # 'remote-endpoint' is fixed up at run-time DTC_FLAGS_rcar_du_of_lvds_r8a7790 += -Wno-graph_endpoint diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 5672830ca184..f361a604337f 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -215,6 +215,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode; struct rcar_du_device *rcdu = rcrtc->dev; unsigned long mode_clock = mode->clock * 1000; + unsigned int hdse_offset; u32 dsmr; u32 escr; @@ -261,12 +262,13 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr); escr = ESCR_DCLKSEL_DCLKIN | div; - } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) { + } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) || + rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) { /* - * Use the LVDS PLL output as the dot clock when outputting to - * the LVDS encoder on an SoC that supports this clock routing - * option. We use the clock directly in that case, without any - * additional divider. + * Use the external LVDS or DSI PLL output as the dot clock when + * outputting to the LVDS or DSI encoder on an SoC that supports + * this clock routing option. We use the clock directly in that + * case, without any additional divider. */ escr = ESCR_DCLKSEL_DCLKIN; } else { @@ -298,10 +300,15 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc) | DSMR_DIPM_DISP | DSMR_CSPM; rcar_du_crtc_write(rcrtc, DSMR, dsmr); + hdse_offset = 19; + if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) + hdse_offset += 25; + /* Display timings */ - rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - 19); + rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start - + hdse_offset); rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start + - mode->hdisplay - 19); + mode->hdisplay - hdse_offset); rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end - mode->hsync_start - 1); rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1); @@ -836,6 +843,7 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc, struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc); struct rcar_du_device *rcdu = rcrtc->dev; bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE; + unsigned int min_sync_porch; unsigned int vbp; if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED)) @@ -843,9 +851,14 @@ rcar_du_crtc_mode_valid(struct drm_crtc *crtc, /* * The hardware requires a minimum combined horizontal sync and back - * porch of 20 pixels and a minimum vertical back porch of 3 lines. + * porch of 20 pixels (when CMM isn't used) or 45 pixels (when CMM is + * used), and a minimum vertical back porch of 3 lines. */ - if (mode->htotal - mode->hsync_start < 20) + min_sync_porch = 20; + if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2)) + min_sync_porch += 25; + + if (mode->htotal - mode->hsync_start < min_sync_porch) return MODE_HBLANK_NARROW; vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 5612a9e7a905..5a8131ef81d5 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -544,10 +544,12 @@ const char *rcar_du_output_name(enum rcar_du_output output) static const char * const names[] = { [RCAR_DU_OUTPUT_DPAD0] = "DPAD0", [RCAR_DU_OUTPUT_DPAD1] = "DPAD1", - [RCAR_DU_OUTPUT_LVDS0] = "LVDS0", - [RCAR_DU_OUTPUT_LVDS1] = "LVDS1", + [RCAR_DU_OUTPUT_DSI0] = "DSI0", + [RCAR_DU_OUTPUT_DSI1] = "DSI1", [RCAR_DU_OUTPUT_HDMI0] = "HDMI0", [RCAR_DU_OUTPUT_HDMI1] = "HDMI1", + [RCAR_DU_OUTPUT_LVDS0] = "LVDS0", + [RCAR_DU_OUTPUT_LVDS1] = "LVDS1", [RCAR_DU_OUTPUT_TCON] = "TCON", }; diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c new file mode 100644 index 000000000000..891bb956fd61 --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi.c @@ -0,0 +1,819 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * rcar_mipi_dsi.c -- R-Car MIPI DSI Encoder + * + * Copyright (C) 2020 Renesas Electronics Corporation + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_graph.h> +#include <linux/platform_device.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +#include "rcar_mipi_dsi_regs.h" + +struct rcar_mipi_dsi { + struct device *dev; + const struct rcar_mipi_dsi_device_info *info; + struct reset_control *rstc; + + struct mipi_dsi_host host; + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + struct drm_connector connector; + + void __iomem *mmio; + struct { + struct clk *mod; + struct clk *pll; + struct clk *dsi; + } clocks; + + enum mipi_dsi_pixel_format format; + unsigned int num_data_lanes; + unsigned int lanes; +}; + +static inline struct rcar_mipi_dsi * +bridge_to_rcar_mipi_dsi(struct drm_bridge *bridge) +{ + return container_of(bridge, struct rcar_mipi_dsi, bridge); +} + +static inline struct rcar_mipi_dsi * +host_to_rcar_mipi_dsi(struct mipi_dsi_host *host) +{ + return container_of(host, struct rcar_mipi_dsi, host); +} + +static const u32 phtw[] = { + 0x01020114, 0x01600115, /* General testing */ + 0x01030116, 0x0102011d, /* General testing */ + 0x011101a4, 0x018601a4, /* 1Gbps testing */ + 0x014201a0, 0x010001a3, /* 1Gbps testing */ + 0x0101011f, /* 1Gbps testing */ +}; + +static const u32 phtw2[] = { + 0x010c0130, 0x010c0140, /* General testing */ + 0x010c0150, 0x010c0180, /* General testing */ + 0x010c0190, + 0x010a0160, 0x010a0170, + 0x01800164, 0x01800174, /* 1Gbps testing */ +}; + +static const u32 hsfreqrange_table[][2] = { + { 80000000U, 0x00 }, { 90000000U, 0x10 }, { 100000000U, 0x20 }, + { 110000000U, 0x30 }, { 120000000U, 0x01 }, { 130000000U, 0x11 }, + { 140000000U, 0x21 }, { 150000000U, 0x31 }, { 160000000U, 0x02 }, + { 170000000U, 0x12 }, { 180000000U, 0x22 }, { 190000000U, 0x32 }, + { 205000000U, 0x03 }, { 220000000U, 0x13 }, { 235000000U, 0x23 }, + { 250000000U, 0x33 }, { 275000000U, 0x04 }, { 300000000U, 0x14 }, + { 325000000U, 0x25 }, { 350000000U, 0x35 }, { 400000000U, 0x05 }, + { 450000000U, 0x16 }, { 500000000U, 0x26 }, { 550000000U, 0x37 }, + { 600000000U, 0x07 }, { 650000000U, 0x18 }, { 700000000U, 0x28 }, + { 750000000U, 0x39 }, { 800000000U, 0x09 }, { 850000000U, 0x19 }, + { 900000000U, 0x29 }, { 950000000U, 0x3a }, { 1000000000U, 0x0a }, + { 1050000000U, 0x1a }, { 1100000000U, 0x2a }, { 1150000000U, 0x3b }, + { 1200000000U, 0x0b }, { 1250000000U, 0x1b }, { 1300000000U, 0x2b }, + { 1350000000U, 0x3c }, { 1400000000U, 0x0c }, { 1450000000U, 0x1c }, + { 1500000000U, 0x2c }, { 1550000000U, 0x3d }, { 1600000000U, 0x0d }, + { 1650000000U, 0x1d }, { 1700000000U, 0x2e }, { 1750000000U, 0x3e }, + { 1800000000U, 0x0e }, { 1850000000U, 0x1e }, { 1900000000U, 0x2f }, + { 1950000000U, 0x3f }, { 2000000000U, 0x0f }, { 2050000000U, 0x40 }, + { 2100000000U, 0x41 }, { 2150000000U, 0x42 }, { 2200000000U, 0x43 }, + { 2250000000U, 0x44 }, { 2300000000U, 0x45 }, { 2350000000U, 0x46 }, + { 2400000000U, 0x47 }, { 2450000000U, 0x48 }, { 2500000000U, 0x49 }, + { /* sentinel */ }, +}; + +struct vco_cntrl_value { + u32 min_freq; + u32 max_freq; + u16 value; +}; + +static const struct vco_cntrl_value vco_cntrl_table[] = { + { .min_freq = 40000000U, .max_freq = 55000000U, .value = 0x3f }, + { .min_freq = 52500000U, .max_freq = 80000000U, .value = 0x39 }, + { .min_freq = 80000000U, .max_freq = 110000000U, .value = 0x2f }, + { .min_freq = 105000000U, .max_freq = 160000000U, .value = 0x29 }, + { .min_freq = 160000000U, .max_freq = 220000000U, .value = 0x1f }, + { .min_freq = 210000000U, .max_freq = 320000000U, .value = 0x19 }, + { .min_freq = 320000000U, .max_freq = 440000000U, .value = 0x0f }, + { .min_freq = 420000000U, .max_freq = 660000000U, .value = 0x09 }, + { .min_freq = 630000000U, .max_freq = 1149000000U, .value = 0x03 }, + { .min_freq = 1100000000U, .max_freq = 1152000000U, .value = 0x01 }, + { .min_freq = 1150000000U, .max_freq = 1250000000U, .value = 0x01 }, + { /* sentinel */ }, +}; + +static void rcar_mipi_dsi_write(struct rcar_mipi_dsi *dsi, u32 reg, u32 data) +{ + iowrite32(data, dsi->mmio + reg); +} + +static u32 rcar_mipi_dsi_read(struct rcar_mipi_dsi *dsi, u32 reg) +{ + return ioread32(dsi->mmio + reg); +} + +static void rcar_mipi_dsi_clr(struct rcar_mipi_dsi *dsi, u32 reg, u32 clr) +{ + rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) & ~clr); +} + +static void rcar_mipi_dsi_set(struct rcar_mipi_dsi *dsi, u32 reg, u32 set) +{ + rcar_mipi_dsi_write(dsi, reg, rcar_mipi_dsi_read(dsi, reg) | set); +} + +static int rcar_mipi_dsi_phtw_test(struct rcar_mipi_dsi *dsi, u32 phtw) +{ + u32 status; + int ret; + + rcar_mipi_dsi_write(dsi, PHTW, phtw); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + !(status & (PHTW_DWEN | PHTW_CWEN)), + 2000, 10000, false, dsi, PHTW); + if (ret < 0) { + dev_err(dsi->dev, "PHY test interface write timeout (0x%08x)\n", + phtw); + return ret; + } + + return ret; +} + +/* ----------------------------------------------------------------------------- + * Hardware Setup + */ + +struct dsi_setup_info { + unsigned long fout; + u16 vco_cntrl; + u16 prop_cntrl; + u16 hsfreqrange; + u16 div; + unsigned int m; + unsigned int n; +}; + +static void rcar_mipi_dsi_parameters_calc(struct rcar_mipi_dsi *dsi, + struct clk *clk, unsigned long target, + struct dsi_setup_info *setup_info) +{ + + const struct vco_cntrl_value *vco_cntrl; + unsigned long fout_target; + unsigned long fin, fout; + unsigned long hsfreq; + unsigned int best_err = -1; + unsigned int divider; + unsigned int n; + unsigned int i; + unsigned int err; + + /* + * Calculate Fout = dot clock * ColorDepth / (2 * Lane Count) + * The range out Fout is [40 - 1250] Mhz + */ + fout_target = target * mipi_dsi_pixel_format_to_bpp(dsi->format) + / (2 * dsi->lanes); + if (fout_target < 40000000 || fout_target > 1250000000) + return; + + /* Find vco_cntrl */ + for (vco_cntrl = vco_cntrl_table; vco_cntrl->min_freq != 0; vco_cntrl++) { + if (fout_target > vco_cntrl->min_freq && + fout_target <= vco_cntrl->max_freq) { + setup_info->vco_cntrl = vco_cntrl->value; + if (fout_target >= 1150000000) + setup_info->prop_cntrl = 0x0c; + else + setup_info->prop_cntrl = 0x0b; + break; + } + } + + /* Add divider */ + setup_info->div = (setup_info->vco_cntrl & 0x30) >> 4; + + /* Find hsfreqrange */ + hsfreq = fout_target * 2; + for (i = 0; i < ARRAY_SIZE(hsfreqrange_table); i++) { + if (hsfreqrange_table[i][0] >= hsfreq) { + setup_info->hsfreqrange = hsfreqrange_table[i][1]; + break; + } + } + + /* + * Calculate n and m for PLL clock + * Following the HW manual the ranges of n and m are + * n = [3-8] and m = [64-625] + */ + fin = clk_get_rate(clk); + divider = 1 << setup_info->div; + for (n = 3; n < 9; n++) { + unsigned long fpfd; + unsigned int m; + + fpfd = fin / n; + + for (m = 64; m < 626; m++) { + fout = fpfd * m / divider; + err = abs((long)(fout - fout_target) * 10000 / + (long)fout_target); + if (err < best_err) { + setup_info->m = m - 2; + setup_info->n = n - 1; + setup_info->fout = fout; + best_err = err; + if (err == 0) + goto done; + } + } + } + +done: + dev_dbg(dsi->dev, + "%pC %lu Hz -> Fout %lu Hz (target %lu Hz, error %d.%02u%%), PLL M/N/DIV %u/%u/%u\n", + clk, fin, setup_info->fout, fout_target, best_err / 100, + best_err % 100, setup_info->m, setup_info->n, setup_info->div); + dev_dbg(dsi->dev, + "vco_cntrl = 0x%x\tprop_cntrl = 0x%x\thsfreqrange = 0x%x\n", + setup_info->vco_cntrl, setup_info->prop_cntrl, + setup_info->hsfreqrange); +} + +static void rcar_mipi_dsi_set_display_timing(struct rcar_mipi_dsi *dsi, + const struct drm_display_mode *mode) +{ + u32 setr; + u32 vprmset0r; + u32 vprmset1r; + u32 vprmset2r; + u32 vprmset3r; + u32 vprmset4r; + + /* Configuration for Pixel Stream and Packet Header */ + if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 24) + rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB24); + else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 18) + rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB18); + else if (mipi_dsi_pixel_format_to_bpp(dsi->format) == 16) + rcar_mipi_dsi_write(dsi, TXVMPSPHSETR, TXVMPSPHSETR_DT_RGB16); + else { + dev_warn(dsi->dev, "unsupported format"); + return; + } + + /* Configuration for Blanking sequence and Input Pixel */ + setr = TXVMSETR_HSABPEN_EN | TXVMSETR_HBPBPEN_EN + | TXVMSETR_HFPBPEN_EN | TXVMSETR_SYNSEQ_PULSES + | TXVMSETR_PIXWDTH | TXVMSETR_VSTPM; + rcar_mipi_dsi_write(dsi, TXVMSETR, setr); + + /* Configuration for Video Parameters */ + vprmset0r = (mode->flags & DRM_MODE_FLAG_PVSYNC ? + TXVMVPRMSET0R_VSPOL_HIG : TXVMVPRMSET0R_VSPOL_LOW) + | (mode->flags & DRM_MODE_FLAG_PHSYNC ? + TXVMVPRMSET0R_HSPOL_HIG : TXVMVPRMSET0R_HSPOL_LOW) + | TXVMVPRMSET0R_CSPC_RGB | TXVMVPRMSET0R_BPP_24; + + vprmset1r = TXVMVPRMSET1R_VACTIVE(mode->vdisplay) + | TXVMVPRMSET1R_VSA(mode->vsync_end - mode->vsync_start); + + vprmset2r = TXVMVPRMSET2R_VFP(mode->vsync_start - mode->vdisplay) + | TXVMVPRMSET2R_VBP(mode->vtotal - mode->vsync_end); + + vprmset3r = TXVMVPRMSET3R_HACTIVE(mode->hdisplay) + | TXVMVPRMSET3R_HSA(mode->hsync_end - mode->hsync_start); + + vprmset4r = TXVMVPRMSET4R_HFP(mode->hsync_start - mode->hdisplay) + | TXVMVPRMSET4R_HBP(mode->htotal - mode->hsync_end); + + rcar_mipi_dsi_write(dsi, TXVMVPRMSET0R, vprmset0r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET1R, vprmset1r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET2R, vprmset2r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET3R, vprmset3r); + rcar_mipi_dsi_write(dsi, TXVMVPRMSET4R, vprmset4r); +} + +static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, + const struct drm_display_mode *mode) +{ + struct dsi_setup_info setup_info = {}; + unsigned int timeout; + int ret, i; + int dsi_format; + u32 phy_setup; + u32 clockset2, clockset3; + u32 ppisetr; + u32 vclkset; + + /* Checking valid format */ + dsi_format = mipi_dsi_pixel_format_to_bpp(dsi->format); + if (dsi_format < 0) { + dev_warn(dsi->dev, "invalid format"); + return -EINVAL; + } + + /* Parameters Calculation */ + rcar_mipi_dsi_parameters_calc(dsi, dsi->clocks.pll, + mode->clock * 1000, &setup_info); + + /* LPCLK enable */ + rcar_mipi_dsi_set(dsi, LPCLKSET, LPCLKSET_CKEN); + + /* CFGCLK enabled */ + rcar_mipi_dsi_set(dsi, CFGCLKSET, CFGCLKSET_CKEN); + + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ); + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); + + rcar_mipi_dsi_set(dsi, PHTC, PHTC_TESTCLR); + rcar_mipi_dsi_clr(dsi, PHTC, PHTC_TESTCLR); + + /* PHY setting */ + phy_setup = rcar_mipi_dsi_read(dsi, PHYSETUP); + phy_setup &= ~PHYSETUP_HSFREQRANGE_MASK; + phy_setup |= PHYSETUP_HSFREQRANGE(setup_info.hsfreqrange); + rcar_mipi_dsi_write(dsi, PHYSETUP, phy_setup); + + for (i = 0; i < ARRAY_SIZE(phtw); i++) { + ret = rcar_mipi_dsi_phtw_test(dsi, phtw[i]); + if (ret < 0) + return ret; + } + + /* PLL Clock Setting */ + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR); + rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR); + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_SHADOW_CLEAR); + + clockset2 = CLOCKSET2_M(setup_info.m) | CLOCKSET2_N(setup_info.n) + | CLOCKSET2_VCO_CNTRL(setup_info.vco_cntrl); + clockset3 = CLOCKSET3_PROP_CNTRL(setup_info.prop_cntrl) + | CLOCKSET3_INT_CNTRL(0) + | CLOCKSET3_CPBIAS_CNTRL(0x10) + | CLOCKSET3_GMP_CNTRL(1); + rcar_mipi_dsi_write(dsi, CLOCKSET2, clockset2); + rcar_mipi_dsi_write(dsi, CLOCKSET3, clockset3); + + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); + rcar_mipi_dsi_set(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); + udelay(10); + rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); + + ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN; + rcar_mipi_dsi_write(dsi, PPISETR, ppisetr); + + rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); + rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_RSTZ); + usleep_range(400, 500); + + /* Checking PPI clock status register */ + for (timeout = 10; timeout > 0; --timeout) { + if ((rcar_mipi_dsi_read(dsi, PPICLSR) & PPICLSR_STPST) && + (rcar_mipi_dsi_read(dsi, PPIDLSR) & PPIDLSR_STPST) && + (rcar_mipi_dsi_read(dsi, CLOCKSET1) & CLOCKSET1_LOCK)) + break; + + usleep_range(1000, 2000); + } + + if (!timeout) { + dev_err(dsi->dev, "failed to enable PPI clock\n"); + return -ETIMEDOUT; + } + + for (i = 0; i < ARRAY_SIZE(phtw2); i++) { + ret = rcar_mipi_dsi_phtw_test(dsi, phtw2[i]); + if (ret < 0) + return ret; + } + + /* Enable DOT clock */ + vclkset = VCLKSET_CKEN; + rcar_mipi_dsi_set(dsi, VCLKSET, vclkset); + + if (dsi_format == 24) + vclkset |= VCLKSET_BPP_24; + else if (dsi_format == 18) + vclkset |= VCLKSET_BPP_18; + else if (dsi_format == 16) + vclkset |= VCLKSET_BPP_16; + else { + dev_warn(dsi->dev, "unsupported format"); + return -EINVAL; + } + vclkset |= VCLKSET_COLOR_RGB | VCLKSET_DIV(setup_info.div) + | VCLKSET_LANE(dsi->lanes - 1); + + rcar_mipi_dsi_set(dsi, VCLKSET, vclkset); + + /* After setting VCLKSET register, enable VCLKEN */ + rcar_mipi_dsi_set(dsi, VCLKEN, VCLKEN_CKEN); + + dev_dbg(dsi->dev, "DSI device is started\n"); + + return 0; +} + +static void rcar_mipi_dsi_shutdown(struct rcar_mipi_dsi *dsi) +{ + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_RSTZ); + rcar_mipi_dsi_clr(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); + + dev_dbg(dsi->dev, "DSI device is shutdown\n"); +} + +static int rcar_mipi_dsi_clk_enable(struct rcar_mipi_dsi *dsi) +{ + int ret; + + reset_control_deassert(dsi->rstc); + + ret = clk_prepare_enable(dsi->clocks.mod); + if (ret < 0) + goto err_reset; + + ret = clk_prepare_enable(dsi->clocks.dsi); + if (ret < 0) + goto err_clock; + + return 0; + +err_clock: + clk_disable_unprepare(dsi->clocks.mod); +err_reset: + reset_control_assert(dsi->rstc); + return ret; +} + +static void rcar_mipi_dsi_clk_disable(struct rcar_mipi_dsi *dsi) +{ + clk_disable_unprepare(dsi->clocks.dsi); + clk_disable_unprepare(dsi->clocks.mod); + + reset_control_assert(dsi->rstc); +} + +static int rcar_mipi_dsi_start_hs_clock(struct rcar_mipi_dsi *dsi) +{ + /* + * In HW manual, we need to check TxDDRClkHS-Q Stable? but it dont + * write how to check. So we skip this check in this patch + */ + u32 status; + int ret; + + /* Start HS clock. */ + rcar_mipi_dsi_set(dsi, PPICLCR, PPICLCR_TXREQHS); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + status & PPICLSR_TOHS, + 2000, 10000, false, dsi, PPICLSR); + if (ret < 0) { + dev_err(dsi->dev, "failed to enable HS clock\n"); + return ret; + } + + rcar_mipi_dsi_set(dsi, PPICLSCR, PPICLSCR_TOHS); + + return 0; +} + +static int rcar_mipi_dsi_start_video(struct rcar_mipi_dsi *dsi) +{ + u32 status; + int ret; + + /* Wait for the link to be ready. */ + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + !(status & (LINKSR_LPBUSY | LINKSR_HSBUSY)), + 2000, 10000, false, dsi, LINKSR); + if (ret < 0) { + dev_err(dsi->dev, "Link failed to become ready\n"); + return ret; + } + + /* De-assert video FIFO clear. */ + rcar_mipi_dsi_clr(dsi, TXVMCR, TXVMCR_VFCLR); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + status & TXVMSR_VFRDY, + 2000, 10000, false, dsi, TXVMSR); + if (ret < 0) { + dev_err(dsi->dev, "Failed to de-assert video FIFO clear\n"); + return ret; + } + + /* Enable transmission in video mode. */ + rcar_mipi_dsi_set(dsi, TXVMCR, TXVMCR_EN_VIDEO); + + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + status & TXVMSR_RDY, + 2000, 10000, false, dsi, TXVMSR); + if (ret < 0) { + dev_err(dsi->dev, "Failed to enable video transmission\n"); + return ret; + } + + return 0; +} + +/* ----------------------------------------------------------------------------- + * Bridge + */ + +static int rcar_mipi_dsi_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + + return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge, + flags); +} + +static void rcar_mipi_dsi_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct drm_atomic_state *state = old_bridge_state->base.state; + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + const struct drm_display_mode *mode; + struct drm_connector *connector; + struct drm_crtc *crtc; + int ret; + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + crtc = drm_atomic_get_new_connector_state(state, connector)->crtc; + mode = &drm_atomic_get_new_crtc_state(state, crtc)->adjusted_mode; + + ret = rcar_mipi_dsi_clk_enable(dsi); + if (ret < 0) { + dev_err(dsi->dev, "failed to enable DSI clocks\n"); + return; + } + + ret = rcar_mipi_dsi_startup(dsi, mode); + if (ret < 0) + goto err_dsi_startup; + + rcar_mipi_dsi_set_display_timing(dsi, mode); + + ret = rcar_mipi_dsi_start_hs_clock(dsi); + if (ret < 0) + goto err_dsi_start_hs; + + rcar_mipi_dsi_start_video(dsi); + + return; + +err_dsi_start_hs: + rcar_mipi_dsi_shutdown(dsi); +err_dsi_startup: + rcar_mipi_dsi_clk_disable(dsi); +} + +static void rcar_mipi_dsi_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_bridge_state) +{ + struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); + + rcar_mipi_dsi_shutdown(dsi); + rcar_mipi_dsi_clk_disable(dsi); +} + +static enum drm_mode_status +rcar_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + if (mode->clock > 297000) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static const struct drm_bridge_funcs rcar_mipi_dsi_bridge_ops = { + .attach = rcar_mipi_dsi_attach, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_enable = rcar_mipi_dsi_atomic_enable, + .atomic_disable = rcar_mipi_dsi_atomic_disable, + .mode_valid = rcar_mipi_dsi_bridge_mode_valid, +}; + +/* ----------------------------------------------------------------------------- + * Host setting + */ + +static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host); + int ret; + + if (device->lanes > dsi->num_data_lanes) + return -EINVAL; + + dsi->lanes = device->lanes; + dsi->format = device->format; + + dsi->next_bridge = devm_drm_of_get_bridge(dsi->dev, dsi->dev->of_node, + 1, 0); + if (IS_ERR(dsi->next_bridge)) { + ret = PTR_ERR(dsi->next_bridge); + dev_err(dsi->dev, "failed to get next bridge: %d\n", ret); + return ret; + } + + /* Initialize the DRM bridge. */ + dsi->bridge.funcs = &rcar_mipi_dsi_bridge_ops; + dsi->bridge.of_node = dsi->dev->of_node; + drm_bridge_add(&dsi->bridge); + + return 0; +} + +static int rcar_mipi_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *device) +{ + struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host); + + drm_bridge_remove(&dsi->bridge); + + return 0; +} + +static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = { + .attach = rcar_mipi_dsi_host_attach, + .detach = rcar_mipi_dsi_host_detach, +}; + +/* ----------------------------------------------------------------------------- + * Probe & Remove + */ + +static int rcar_mipi_dsi_parse_dt(struct rcar_mipi_dsi *dsi) +{ + struct device_node *ep; + u32 data_lanes[4]; + int ret; + + ep = of_graph_get_endpoint_by_regs(dsi->dev->of_node, 1, 0); + if (!ep) { + dev_dbg(dsi->dev, "unconnected port@1\n"); + return -ENODEV; + } + + ret = of_property_read_variable_u32_array(ep, "data-lanes", data_lanes, + 1, 4); + of_node_put(ep); + + if (ret < 0) { + dev_err(dsi->dev, "missing or invalid data-lanes property\n"); + return -ENODEV; + } + + dsi->num_data_lanes = ret; + return 0; +} + +static struct clk *rcar_mipi_dsi_get_clock(struct rcar_mipi_dsi *dsi, + const char *name, + bool optional) +{ + struct clk *clk; + + clk = devm_clk_get(dsi->dev, name); + if (!IS_ERR(clk)) + return clk; + + if (PTR_ERR(clk) == -ENOENT && optional) + return NULL; + + dev_err_probe(dsi->dev, PTR_ERR(clk), "failed to get %s clock\n", + name ? name : "module"); + + return clk; +} + +static int rcar_mipi_dsi_get_clocks(struct rcar_mipi_dsi *dsi) +{ + dsi->clocks.mod = rcar_mipi_dsi_get_clock(dsi, NULL, false); + if (IS_ERR(dsi->clocks.mod)) + return PTR_ERR(dsi->clocks.mod); + + dsi->clocks.pll = rcar_mipi_dsi_get_clock(dsi, "pll", true); + if (IS_ERR(dsi->clocks.pll)) + return PTR_ERR(dsi->clocks.pll); + + dsi->clocks.dsi = rcar_mipi_dsi_get_clock(dsi, "dsi", true); + if (IS_ERR(dsi->clocks.dsi)) + return PTR_ERR(dsi->clocks.dsi); + + if (!dsi->clocks.pll && !dsi->clocks.dsi) { + dev_err(dsi->dev, "no input clock (pll, dsi)\n"); + return -EINVAL; + } + + return 0; +} + +static int rcar_mipi_dsi_probe(struct platform_device *pdev) +{ + struct rcar_mipi_dsi *dsi; + struct resource *mem; + int ret; + + dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); + if (dsi == NULL) + return -ENOMEM; + + platform_set_drvdata(pdev, dsi); + + dsi->dev = &pdev->dev; + dsi->info = of_device_get_match_data(&pdev->dev); + + ret = rcar_mipi_dsi_parse_dt(dsi); + if (ret < 0) + return ret; + + /* Acquire resources. */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dsi->mmio = devm_ioremap_resource(dsi->dev, mem); + if (IS_ERR(dsi->mmio)) + return PTR_ERR(dsi->mmio); + + ret = rcar_mipi_dsi_get_clocks(dsi); + if (ret < 0) + return ret; + + dsi->rstc = devm_reset_control_get(dsi->dev, NULL); + if (IS_ERR(dsi->rstc)) { + dev_err(dsi->dev, "failed to get cpg reset\n"); + return PTR_ERR(dsi->rstc); + } + + /* Initialize the DSI host. */ + dsi->host.dev = dsi->dev; + dsi->host.ops = &rcar_mipi_dsi_host_ops; + ret = mipi_dsi_host_register(&dsi->host); + if (ret < 0) + return ret; + + return 0; +} + +static int rcar_mipi_dsi_remove(struct platform_device *pdev) +{ + struct rcar_mipi_dsi *dsi = platform_get_drvdata(pdev); + + mipi_dsi_host_unregister(&dsi->host); + + return 0; +} + +static const struct of_device_id rcar_mipi_dsi_of_table[] = { + { .compatible = "renesas,r8a779a0-dsi-csi2-tx" }, + { } +}; + +MODULE_DEVICE_TABLE(of, rcar_mipi_dsi_of_table); + +static struct platform_driver rcar_mipi_dsi_platform_driver = { + .probe = rcar_mipi_dsi_probe, + .remove = rcar_mipi_dsi_remove, + .driver = { + .name = "rcar-mipi-dsi", + .of_match_table = rcar_mipi_dsi_of_table, + }, +}; + +module_platform_driver(rcar_mipi_dsi_platform_driver); + +MODULE_DESCRIPTION("Renesas R-Car MIPI DSI Encoder Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h new file mode 100644 index 000000000000..0e7a9274749f --- /dev/null +++ b/drivers/gpu/drm/rcar-du/rcar_mipi_dsi_regs.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * rcar_mipi_dsi_regs.h -- R-Car MIPI DSI Interface Registers Definitions + * + * Copyright (C) 2020 Renesas Electronics Corporation + */ + +#ifndef __RCAR_MIPI_DSI_REGS_H__ +#define __RCAR_MIPI_DSI_REGS_H__ + +#define LINKSR 0x010 +#define LINKSR_LPBUSY (1 << 1) +#define LINKSR_HSBUSY (1 << 0) + +/* + * Video Mode Register + */ +#define TXVMSETR 0x180 +#define TXVMSETR_SYNSEQ_PULSES (0 << 16) +#define TXVMSETR_SYNSEQ_EVENTS (1 << 16) +#define TXVMSETR_VSTPM (1 << 15) +#define TXVMSETR_PIXWDTH (1 << 8) +#define TXVMSETR_VSEN_EN (1 << 4) +#define TXVMSETR_VSEN_DIS (0 << 4) +#define TXVMSETR_HFPBPEN_EN (1 << 2) +#define TXVMSETR_HFPBPEN_DIS (0 << 2) +#define TXVMSETR_HBPBPEN_EN (1 << 1) +#define TXVMSETR_HBPBPEN_DIS (0 << 1) +#define TXVMSETR_HSABPEN_EN (1 << 0) +#define TXVMSETR_HSABPEN_DIS (0 << 0) + +#define TXVMCR 0x190 +#define TXVMCR_VFCLR (1 << 12) +#define TXVMCR_EN_VIDEO (1 << 0) + +#define TXVMSR 0x1a0 +#define TXVMSR_STR (1 << 16) +#define TXVMSR_VFRDY (1 << 12) +#define TXVMSR_ACT (1 << 8) +#define TXVMSR_RDY (1 << 0) + +#define TXVMSCR 0x1a4 +#define TXVMSCR_STR (1 << 16) + +#define TXVMPSPHSETR 0x1c0 +#define TXVMPSPHSETR_DT_RGB16 (0x0e << 16) +#define TXVMPSPHSETR_DT_RGB18 (0x1e << 16) +#define TXVMPSPHSETR_DT_RGB18_LS (0x2e << 16) +#define TXVMPSPHSETR_DT_RGB24 (0x3e << 16) +#define TXVMPSPHSETR_DT_YCBCR16 (0x2c << 16) + +#define TXVMVPRMSET0R 0x1d0 +#define TXVMVPRMSET0R_HSPOL_HIG (0 << 17) +#define TXVMVPRMSET0R_HSPOL_LOW (1 << 17) +#define TXVMVPRMSET0R_VSPOL_HIG (0 << 16) +#define TXVMVPRMSET0R_VSPOL_LOW (1 << 16) +#define TXVMVPRMSET0R_CSPC_RGB (0 << 4) +#define TXVMVPRMSET0R_CSPC_YCbCr (1 << 4) +#define TXVMVPRMSET0R_BPP_16 (0 << 0) +#define TXVMVPRMSET0R_BPP_18 (1 << 0) +#define TXVMVPRMSET0R_BPP_24 (2 << 0) + +#define TXVMVPRMSET1R 0x1d4 +#define TXVMVPRMSET1R_VACTIVE(x) (((x) & 0x7fff) << 16) +#define TXVMVPRMSET1R_VSA(x) (((x) & 0xfff) << 0) + +#define TXVMVPRMSET2R 0x1d8 +#define TXVMVPRMSET2R_VFP(x) (((x) & 0x1fff) << 16) +#define TXVMVPRMSET2R_VBP(x) (((x) & 0x1fff) << 0) + +#define TXVMVPRMSET3R 0x1dc +#define TXVMVPRMSET3R_HACTIVE(x) (((x) & 0x7fff) << 16) +#define TXVMVPRMSET3R_HSA(x) (((x) & 0xfff) << 0) + +#define TXVMVPRMSET4R 0x1e0 +#define TXVMVPRMSET4R_HFP(x) (((x) & 0x1fff) << 16) +#define TXVMVPRMSET4R_HBP(x) (((x) & 0x1fff) << 0) + +/* + * PHY-Protocol Interface (PPI) Registers + */ +#define PPISETR 0x700 +#define PPISETR_DLEN_0 (0x1 << 0) +#define PPISETR_DLEN_1 (0x3 << 0) +#define PPISETR_DLEN_2 (0x7 << 0) +#define PPISETR_DLEN_3 (0xf << 0) +#define PPISETR_CLEN (1 << 8) + +#define PPICLCR 0x710 +#define PPICLCR_TXREQHS (1 << 8) +#define PPICLCR_TXULPSEXT (1 << 1) +#define PPICLCR_TXULPSCLK (1 << 0) + +#define PPICLSR 0x720 +#define PPICLSR_HSTOLP (1 << 27) +#define PPICLSR_TOHS (1 << 26) +#define PPICLSR_STPST (1 << 0) + +#define PPICLSCR 0x724 +#define PPICLSCR_HSTOLP (1 << 27) +#define PPICLSCR_TOHS (1 << 26) + +#define PPIDLSR 0x760 +#define PPIDLSR_STPST (0xf << 0) + +/* + * Clocks registers + */ +#define LPCLKSET 0x1000 +#define LPCLKSET_CKEN (1 << 8) +#define LPCLKSET_LPCLKDIV(x) (((x) & 0x3f) << 0) + +#define CFGCLKSET 0x1004 +#define CFGCLKSET_CKEN (1 << 8) +#define CFGCLKSET_CFGCLKDIV(x) (((x) & 0x3f) << 0) + +#define DOTCLKDIV 0x1008 +#define DOTCLKDIV_CKEN (1 << 8) +#define DOTCLKDIV_DOTCLKDIV(x) (((x) & 0x3f) << 0) + +#define VCLKSET 0x100c +#define VCLKSET_CKEN (1 << 16) +#define VCLKSET_COLOR_RGB (0 << 8) +#define VCLKSET_COLOR_YCC (1 << 8) +#define VCLKSET_DIV(x) (((x) & 0x3) << 4) +#define VCLKSET_BPP_16 (0 << 2) +#define VCLKSET_BPP_18 (1 << 2) +#define VCLKSET_BPP_18L (2 << 2) +#define VCLKSET_BPP_24 (3 << 2) +#define VCLKSET_LANE(x) (((x) & 0x3) << 0) + +#define VCLKEN 0x1010 +#define VCLKEN_CKEN (1 << 0) + +#define PHYSETUP 0x1014 +#define PHYSETUP_HSFREQRANGE(x) (((x) & 0x7f) << 16) +#define PHYSETUP_HSFREQRANGE_MASK (0x7f << 16) +#define PHYSETUP_CFGCLKFREQRANGE(x) (((x) & 0x3f) << 8) +#define PHYSETUP_SHUTDOWNZ (1 << 1) +#define PHYSETUP_RSTZ (1 << 0) + +#define CLOCKSET1 0x101c +#define CLOCKSET1_LOCK_PHY (1 << 17) +#define CLOCKSET1_LOCK (1 << 16) +#define CLOCKSET1_CLKSEL (1 << 8) +#define CLOCKSET1_CLKINSEL_EXTAL (0 << 2) +#define CLOCKSET1_CLKINSEL_DIG (1 << 2) +#define CLOCKSET1_CLKINSEL_DU (1 << 3) +#define CLOCKSET1_SHADOW_CLEAR (1 << 1) +#define CLOCKSET1_UPDATEPLL (1 << 0) + +#define CLOCKSET2 0x1020 +#define CLOCKSET2_M(x) (((x) & 0xfff) << 16) +#define CLOCKSET2_VCO_CNTRL(x) (((x) & 0x3f) << 8) +#define CLOCKSET2_N(x) (((x) & 0xf) << 0) + +#define CLOCKSET3 0x1024 +#define CLOCKSET3_PROP_CNTRL(x) (((x) & 0x3f) << 24) +#define CLOCKSET3_INT_CNTRL(x) (((x) & 0x3f) << 16) +#define CLOCKSET3_CPBIAS_CNTRL(x) (((x) & 0x7f) << 8) +#define CLOCKSET3_GMP_CNTRL(x) (((x) & 0x3) << 0) + +#define PHTW 0x1034 +#define PHTW_DWEN (1 << 24) +#define PHTW_TESTDIN_DATA(x) (((x) & 0xff) << 16) +#define PHTW_CWEN (1 << 8) +#define PHTW_TESTDIN_CODE(x) (((x) & 0xff) << 0) + +#define PHTC 0x103c +#define PHTC_TESTCLR (1 << 0) + +#endif /* __RCAR_MIPI_DSI_REGS_H__ */ diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 94fe51b3caa2..f91fb31ab7a7 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -704,12 +704,13 @@ int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job, int ret; dma_resv_for_each_fence(&cursor, obj->resv, write, fence) { - ret = drm_sched_job_add_dependency(job, fence); - if (ret) - return ret; - /* Make sure to grab an additional ref on the added fence */ dma_fence_get(fence); + ret = drm_sched_job_add_dependency(job, fence); + if (ret) { + dma_fence_put(fence); + return ret; + } } return 0; } diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 92651f6a9e7d..befc5a80222d 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -45,6 +45,7 @@ config DRM_SUN6I_DSI default MACH_SUN8I select CRC_CCITT select DRM_MIPI_DSI + select RESET_CONTROLLER select PHY_SUN6I_MIPI_DPHY help Choose this option if you want have an Allwinner SoC with diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index fc124457ba2f..db3dc7ef5382 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1104,7 +1104,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, * as an indication that we're about to swap out. */ memset(&place, 0, sizeof(place)); - place.mem_type = TTM_PL_SYSTEM; + place.mem_type = bo->resource->mem_type; if (!ttm_bo_evict_swapout_allowable(bo, ctx, &place, &locked, NULL)) return -EBUSY; @@ -1136,6 +1136,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, struct ttm_place hop; memset(&hop, 0, sizeof(hop)); + place.mem_type = TTM_PL_SYSTEM; ret = ttm_resource_alloc(bo, &place, &evict_mem); if (unlikely(ret)) goto out; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 7e83c00a3f48..79c870a3bef8 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -34,6 +34,7 @@ #include <linux/sched.h> #include <linux/shmem_fs.h> #include <linux/file.h> +#include <linux/module.h> #include <drm/drm_cache.h> #include <drm/ttm/ttm_bo_driver.h> diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 79d4d9dd1394..24de29bc1cda 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -340,19 +340,19 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; - struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *new_crtc_state; struct vc4_hvs_state *new_hvs_state; struct drm_crtc *crtc; struct vc4_hvs_state *old_hvs_state; + unsigned int channel; int i; old_hvs_state = vc4_hvs_get_old_global_state(state); - if (WARN_ON(!old_hvs_state)) + if (WARN_ON(IS_ERR(old_hvs_state))) return; new_hvs_state = vc4_hvs_get_new_global_state(state); - if (WARN_ON(!new_hvs_state)) + if (WARN_ON(IS_ERR(new_hvs_state))) return; for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { @@ -365,31 +365,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel); } - if (vc4->hvs->hvs5) { - unsigned long core_rate = max_t(unsigned long, - 500000000, - new_hvs_state->core_clock_rate); - - clk_set_min_rate(hvs->core_clk, core_rate); - } - - for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { - struct vc4_crtc_state *vc4_crtc_state = - to_vc4_crtc_state(old_crtc_state); - unsigned int channel = vc4_crtc_state->assigned_channel; + for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) { + struct drm_crtc_commit *commit; int ret; - if (channel == VC4_HVS_CHANNEL_DISABLED) + if (!old_hvs_state->fifo_state[channel].in_use) continue; - if (!old_hvs_state->fifo_state[channel].in_use) + commit = old_hvs_state->fifo_state[channel].pending_commit; + if (!commit) continue; - ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit); + ret = drm_crtc_commit_wait(commit); if (ret) drm_err(dev, "Timed out waiting for commit\n"); + + drm_crtc_commit_put(commit); + old_hvs_state->fifo_state[channel].pending_commit = NULL; } + if (vc4->hvs->hvs5) { + unsigned long core_rate = max_t(unsigned long, + 500000000, + new_hvs_state->core_clock_rate); + + clk_set_min_rate(hvs->core_clk, core_rate); + } drm_atomic_helper_commit_modeset_disables(dev, state); vc4_ctm_commit(vc4, state); @@ -427,8 +428,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state) unsigned int i; hvs_state = vc4_hvs_get_new_global_state(state); - if (!hvs_state) - return -EINVAL; + if (WARN_ON(IS_ERR(hvs_state))) + return PTR_ERR(hvs_state); for_each_new_crtc_in_state(state, crtc, crtc_state, i) { struct vc4_crtc_state *vc4_crtc_state = @@ -676,12 +677,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) for (i = 0; i < HVS_NUM_CHANNELS; i++) { state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load; - - if (!old_state->fifo_state[i].pending_commit) - continue; - - state->fifo_state[i].pending_commit = - drm_crtc_commit_get(old_state->fifo_state[i].pending_commit); } state->core_clock_rate = old_state->core_clock_rate; @@ -772,8 +767,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev, unsigned int i; hvs_new_state = vc4_hvs_get_global_state(state); - if (!hvs_new_state) - return -EINVAL; + if (IS_ERR(hvs_new_state)) + return PTR_ERR(hvs_new_state); for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++) if (!hvs_new_state->fifo_state[i].in_use) @@ -862,8 +857,8 @@ vc4_core_clock_atomic_check(struct drm_atomic_state *state) load_state = to_vc4_load_tracker_state(priv_state); hvs_new_state = vc4_hvs_get_global_state(state); - if (!hvs_new_state) - return -EINVAL; + if (IS_ERR(hvs_new_state)) + return PTR_ERR(hvs_new_state); for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index c20c96a97a6c..5f25a8d15464 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -156,36 +156,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev) schedule_work(&vgdev->config_changed_work); } -static __poll_t virtio_gpu_poll(struct file *filp, - struct poll_table_struct *wait) -{ - struct drm_file *drm_file = filp->private_data; - struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; - struct drm_device *dev = drm_file->minor->dev; - struct virtio_gpu_device *vgdev = dev->dev_private; - struct drm_pending_event *e = NULL; - __poll_t mask = 0; - - if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask) - return drm_poll(filp, wait); - - poll_wait(filp, &drm_file->event_wait, wait); - - if (!list_empty(&drm_file->event_list)) { - spin_lock_irq(&dev->event_lock); - e = list_first_entry(&drm_file->event_list, - struct drm_pending_event, link); - drm_file->event_space += e->event->length; - list_del(&e->link); - spin_unlock_irq(&dev->event_lock); - - kfree(e); - mask |= EPOLLIN | EPOLLRDNORM; - } - - return mask; -} - static struct virtio_device_id id_table[] = { { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID }, { 0 }, @@ -225,17 +195,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>"); MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); MODULE_AUTHOR("Alon Levy"); -static const struct file_operations virtio_gpu_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .compat_ioctl = drm_compat_ioctl, - .poll = virtio_gpu_poll, - .read = drm_read, - .llseek = noop_llseek, - .mmap = drm_gem_mmap -}; +DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); static const struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index e0265fe74aa5..0a194aaad419 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver { spinlock_t lock; }; -#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000 struct virtio_gpu_fence_event { struct drm_pending_event base; struct drm_event event; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 0007e423d885..c708bab555c6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev, if (!e) return -ENOMEM; - e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL; + e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED; e->event.length = sizeof(e->event); ret = drm_event_reserve_init(dev, file, &e->base, &e->event); diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 434064c820e8..e63088c2121d 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -761,6 +761,7 @@ static struct xenbus_driver xen_driver = { .probe = xen_drv_probe, .remove = xen_drv_remove, .otherend_changed = displback_changed, + .not_essential = true, }; static int __init xen_drv_init(void) |