diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
53 files changed, 1664 insertions, 408 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index e74cd443063a..13ebb1f71e49 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -56,7 +56,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \ - amdgpu_fw_attestation.o + amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o @@ -171,7 +171,8 @@ amdgpu-y += \ # add SMUIO block amdgpu-y += \ smuio_v9_0.o \ - smuio_v11_0.o + smuio_v11_0.o \ + smuio_v11_0_6.o # add amdkfd interfaces amdgpu-y += amdgpu_amdkfd.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f4ff8ddb52d4..b6879d97c9c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -287,7 +287,7 @@ enum amdgpu_kiq_irq { #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ -#define MAX_KIQ_REG_TRY 80 /* 20 -> 80 */ +#define MAX_KIQ_REG_TRY 1000 int amdgpu_device_ip_set_clockgating_state(void *dev, enum amd_ip_block_type block_type, @@ -579,7 +579,8 @@ enum amd_reset_method { AMD_RESET_METHOD_MODE0, AMD_RESET_METHOD_MODE1, AMD_RESET_METHOD_MODE2, - AMD_RESET_METHOD_BACO + AMD_RESET_METHOD_BACO, + AMD_RESET_METHOD_PCI, }; /* @@ -891,6 +892,7 @@ struct amdgpu_device { /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ struct work_struct hotplug_work; struct amdgpu_irq_src crtc_irq; + struct amdgpu_irq_src vline0_irq; struct amdgpu_irq_src vupdate_irq; struct amdgpu_irq_src pageflip_irq; struct amdgpu_irq_src hpd_irq; @@ -1006,6 +1008,12 @@ struct amdgpu_device { bool in_suspend; bool in_hibernate; + /* + * The combination flag in_poweroff_reboot_com used to identify the poweroff + * and reboot opt in the s0i3 system-wide suspend. + */ + bool in_poweroff_reboot_com; + atomic_t in_gpu_reset; enum pp_mp1_state mp1_state; struct rw_semaphore reset_sem; @@ -1227,6 +1235,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_job* job); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); +int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 8155c54392c8..36a741d63ddc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -903,10 +903,11 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev) */ bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { +#if defined(CONFIG_AMD_PMC) if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { if (adev->flags & AMD_IS_APU) return true; } - +#endif return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index db96d69eb45e..c5343a5eecbe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -47,12 +47,8 @@ int amdgpu_amdkfd_init(void) amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh; amdgpu_amdkfd_total_mem_size *= si.mem_unit; -#ifdef CONFIG_HSA_AMD ret = kgd2kfd_init(); amdgpu_amdkfd_gpuvm_init_mem_limits(); -#else - ret = -ENOENT; -#endif kfd_initialized = !ret; return ret; @@ -696,86 +692,3 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd) return adev->have_atomics_support; } - -#ifndef CONFIG_HSA_AMD -bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) -{ - return false; -} - -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) -{ -} - -int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) -{ - return 0; -} - -void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, - struct amdgpu_vm *vm) -{ -} - -struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) -{ - return NULL; -} - -int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) -{ - return 0; -} - -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - unsigned int asic_type, bool vf) -{ - return NULL; -} - -bool kgd2kfd_device_init(struct kfd_dev *kfd, - struct drm_device *ddev, - const struct kgd2kfd_shared_resources *gpu_resources) -{ - return false; -} - -void kgd2kfd_device_exit(struct kfd_dev *kfd) -{ -} - -void kgd2kfd_exit(void) -{ -} - -void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) -{ -} - -int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) -{ - return 0; -} - -int kgd2kfd_pre_reset(struct kfd_dev *kfd) -{ - return 0; -} - -int kgd2kfd_post_reset(struct kfd_dev *kfd) -{ - return 0; -} - -void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) -{ -} - -void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) -{ -} - -void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) -{ -} -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index ea391ca7f2f1..a81d9cacf9b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -94,11 +94,6 @@ enum kgd_engine_type { KGD_ENGINE_MAX }; -struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, - struct mm_struct *mm); -bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); -struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); -int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); struct amdkfd_process_info { /* List head of all VMs that belong to a KFD process */ @@ -132,8 +127,6 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev); - -int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, uint32_t *ib_cmd, uint32_t ib_len); @@ -153,6 +146,38 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd); int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int queue_bit); +struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, + struct mm_struct *mm); +#if IS_ENABLED(CONFIG_HSA_AMD) +bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); +struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); +int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); +int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); +#else +static inline +bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) +{ + return false; +} + +static inline +struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) +{ + return NULL; +} + +static inline +int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) +{ + return 0; +} + +static inline +int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) +{ + return 0; +} +#endif /* Shared API */ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size, void **mem_obj, uint64_t *gpu_addr, @@ -215,8 +240,6 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, struct file *filp, u32 pasid, void **vm, void **process_info, struct dma_fence **ef); -void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, - struct amdgpu_vm *vm); void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm); void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm); uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm); @@ -236,23 +259,43 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd, struct kgd_mem *mem, void **kptr, uint64_t *size); int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, struct dma_fence **ef); - int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, struct kfd_vm_fault_info *info); - int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, struct dma_buf *dmabuf, uint64_t va, void *vm, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset); - -void amdgpu_amdkfd_gpuvm_init_mem_limits(void); -void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); - int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, struct tile_config *config); +#if IS_ENABLED(CONFIG_HSA_AMD) +void amdgpu_amdkfd_gpuvm_init_mem_limits(void); +void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, + struct amdgpu_vm *vm); +void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +#else +static inline +void amdgpu_amdkfd_gpuvm_init_mem_limits(void) +{ +} +static inline +void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, + struct amdgpu_vm *vm) +{ +} + +static inline +void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) +{ +} +#endif /* KGD2KFD callbacks */ +int kgd2kfd_quiesce_mm(struct mm_struct *mm); +int kgd2kfd_resume_mm(struct mm_struct *mm); +int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, + struct dma_fence *fence); +#if IS_ENABLED(CONFIG_HSA_AMD) int kgd2kfd_init(void); void kgd2kfd_exit(void); struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, @@ -266,11 +309,68 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); int kgd2kfd_pre_reset(struct kfd_dev *kfd); int kgd2kfd_post_reset(struct kfd_dev *kfd); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); -int kgd2kfd_quiesce_mm(struct mm_struct *mm); -int kgd2kfd_resume_mm(struct mm_struct *mm); -int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, - struct dma_fence *fence); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask); +#else +static inline int kgd2kfd_init(void) +{ + return -ENOENT; +} +static inline void kgd2kfd_exit(void) +{ +} + +static inline +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, + unsigned int asic_type, bool vf) +{ + return NULL; +} + +static inline +bool kgd2kfd_device_init(struct kfd_dev *kfd, struct drm_device *ddev, + const struct kgd2kfd_shared_resources *gpu_resources) +{ + return false; +} + +static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) +{ +} + +static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) +{ +} + +static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) +{ + return 0; +} + +static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd) +{ + return 0; +} + +static inline int kgd2kfd_post_reset(struct kfd_dev *kfd) +{ + return 0; +} + +static inline +void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) +{ +} + +static inline +void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) +{ +} + +static inline +void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask) +{ +} +#endif #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 0849b68e784f..ac0a432a9bf7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -26,6 +26,7 @@ #include <linux/sched/task.h> #include "amdgpu_object.h" +#include "amdgpu_gem.h" #include "amdgpu_vm.h" #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" @@ -1152,7 +1153,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct sg_table *sg = NULL; uint64_t user_addr = 0; struct amdgpu_bo *bo; - struct amdgpu_bo_param bp; + struct drm_gem_object *gobj; u32 domain, alloc_domain; u64 alloc_flags; int ret; @@ -1220,19 +1221,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", va, size, domain_string(alloc_domain)); - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.byte_align = 1; - bp.domain = alloc_domain; - bp.flags = alloc_flags; - bp.type = bo_type; - bp.resv = NULL; - ret = amdgpu_bo_create(adev, &bp, &bo); + ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags, + bo_type, NULL, &gobj); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", - domain_string(alloc_domain), ret); + domain_string(alloc_domain), ret); goto err_bo_create; } + bo = gem_to_amdgpu_bo(gobj); if (bo_type == ttm_bo_type_sg) { bo->tbo.sg = sg; bo->tbo.ttm->sg = sg; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 306077884a67..6107ac91db25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -112,6 +112,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) union igp_info { struct atom_integrated_system_info_v1_11 v11; struct atom_integrated_system_info_v1_12 v12; + struct atom_integrated_system_info_v2_1 v21; }; union umc_info { @@ -209,24 +210,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, if (adev->flags & AMD_IS_APU) { igp_info = (union igp_info *) (mode_info->atom_context->bios + data_offset); - switch (crev) { - case 11: - mem_channel_number = igp_info->v11.umachannelnumber; - /* channel width is 64 */ - if (vram_width) - *vram_width = mem_channel_number * 64; - mem_type = igp_info->v11.memorytype; - if (vram_type) - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + switch (frev) { + case 1: + switch (crev) { + case 11: + case 12: + mem_channel_number = igp_info->v11.umachannelnumber; + if (!mem_channel_number) + mem_channel_number = 1; + /* channel width is 64 */ + if (vram_width) + *vram_width = mem_channel_number * 64; + mem_type = igp_info->v11.memorytype; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + break; + default: + return -EINVAL; + } break; - case 12: - mem_channel_number = igp_info->v12.umachannelnumber; - /* channel width is 64 */ - if (vram_width) - *vram_width = mem_channel_number * 64; - mem_type = igp_info->v12.memorytype; - if (vram_type) - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + case 2: + switch (crev) { + case 1: + case 2: + mem_channel_number = igp_info->v21.umachannelnumber; + if (!mem_channel_number) + mem_channel_number = 1; + /* channel width is 64 */ + if (vram_width) + *vram_width = mem_channel_number * 64; + mem_type = igp_info->v21.memorytype; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + break; + default: + return -EINVAL; + } break; default: return -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index efdf639f6593..cfb1a9a04477 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -291,7 +291,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) continue; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (!ACPI_FAILURE(status)) { + if (ACPI_SUCCESS(status)) { found = true; break; } @@ -304,7 +304,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) continue; status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (!ACPI_FAILURE(status)) { + if (ACPI_SUCCESS(status)) { found = true; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 594a0108e90f..3e240b952e79 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -98,8 +98,7 @@ static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p, return 0; error_free: - if (info) - kvfree(info); + kvfree(info); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index c34be9f612c8..43059ead733b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -35,6 +35,7 @@ #include "amdgpu_dm_debugfs.h" #include "amdgpu_ras.h" #include "amdgpu_rap.h" +#include "amdgpu_securedisplay.h" #include "amdgpu_fw_attestation.h" /** @@ -356,7 +357,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf, while (size) { uint32_t value; - value = RREG32_PCIE(*pos >> 2); + value = RREG32_PCIE(*pos); r = put_user(value, (uint32_t *)buf); if (r) { pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -423,7 +424,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user return r; } - WREG32_PCIE(*pos >> 2, value); + WREG32_PCIE(*pos, value); result += 4; buf += 4; @@ -1669,6 +1670,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) amdgpu_rap_debugfs_init(adev); + amdgpu_securedisplay_debugfs_init(adev); + amdgpu_fw_attestation_debugfs_init(adev); return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 377b32691881..6447cd6ca5a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -81,7 +81,6 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin"); -MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 @@ -930,6 +929,18 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev) pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA); } +/** + * amdgpu_device_pci_reset - reset the GPU using generic PCI means + * + * @adev: amdgpu_device pointer + * + * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.). + */ +int amdgpu_device_pci_reset(struct amdgpu_device *adev) +{ + return pci_reset_function(adev->pdev); +} + /* * GPU doorbell aperture helpers function. */ @@ -1106,8 +1117,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) */ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) { - u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size); - u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; + int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); struct pci_bus *root; struct resource *res; unsigned i; @@ -1138,6 +1148,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (!res) return 0; + /* Limit the BAR size to what is available */ + rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, + rbar_size); + /* Disable memory decoding while we change the BAR addresses and size */ pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); pci_write_config_word(adev->pdev, PCI_COMMAND, @@ -1423,24 +1437,22 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - pci_set_power_state(dev->pdev, PCI_D0); - amdgpu_device_load_pci_state(dev->pdev); - r = pci_enable_device(dev->pdev); + pci_set_power_state(pdev, PCI_D0); + amdgpu_device_load_pci_state(pdev); + r = pci_enable_device(pdev); if (r) DRM_WARN("pci_enable_device failed (%d)\n", r); amdgpu_device_resume(dev, true); dev->switch_power_state = DRM_SWITCH_POWER_ON; - drm_kms_helper_poll_enable(dev); } else { pr_info("switched off\n"); - drm_kms_helper_poll_disable(dev); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; amdgpu_device_suspend(dev, true); - amdgpu_device_cache_pci_state(dev->pdev); + amdgpu_device_cache_pci_state(pdev); /* Shut down the device */ - pci_disable_device(dev->pdev); - pci_set_power_state(dev->pdev, PCI_D3cold); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3cold); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -1703,8 +1715,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) adev->enable_virtual_display = false; if (amdgpu_virtual_display) { - struct drm_device *ddev = adev_to_drm(adev); - const char *pci_address_name = pci_name(ddev->pdev); + const char *pci_address_name = pci_name(adev->pdev); char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); @@ -2667,7 +2678,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) { int i, r; - if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) { + if (adev->in_poweroff_reboot_com || + !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) { amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); } @@ -3117,7 +3129,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) */ adev->gfx_timeout = msecs_to_jiffies(10000); adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + if (amdgpu_sriov_vf(adev)) + adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? + msecs_to_jiffies(60000) : msecs_to_jiffies(10000); + else if (amdgpu_passthrough(adev)) adev->compute_timeout = msecs_to_jiffies(60000); else adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; @@ -3397,7 +3412,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } - pci_enable_pcie_error_reporting(adev->ddev.pdev); + pci_enable_pcie_error_reporting(adev->pdev); /* Post card if necessary */ if (amdgpu_device_need_post(adev)) { @@ -3720,14 +3735,15 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) r = amdgpu_device_ip_suspend_phase1(adev); - amdgpu_amdkfd_suspend(adev, !fbcon); + amdgpu_amdkfd_suspend(adev, adev->in_runpm); /* evict vram memory */ amdgpu_bo_evict_vram(adev); amdgpu_fence_driver_suspend(adev); - if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) + if (adev->in_poweroff_reboot_com || + !amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) r = amdgpu_device_ip_suspend_phase2(adev); else amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); @@ -3804,7 +3820,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) } } } - r = amdgpu_amdkfd_resume(adev, !fbcon); + r = amdgpu_amdkfd_resume(adev, adev->in_runpm); if (r) return r; @@ -4206,6 +4222,8 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) case CHIP_NAVI14: case CHIP_NAVI12: case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: + case CHIP_DIMGREY_CAVEFISH: break; default: goto disabled; @@ -4455,6 +4473,46 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) up_write(&adev->reset_sem); } +/* + * to lockup a list of amdgpu devices in a hive safely, if not a hive + * with multiple nodes, it will be similar as amdgpu_device_lock_adev. + * + * unlock won't require roll back. + */ +static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive) +{ + struct amdgpu_device *tmp_adev = NULL; + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + if (!hive) { + dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes"); + return -ENODEV; + } + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + if (!amdgpu_device_lock_adev(tmp_adev, hive)) + goto roll_back; + } + } else if (!amdgpu_device_lock_adev(adev, hive)) + return -EAGAIN; + + return 0; +roll_back: + if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) { + /* + * if the lockup iteration break in the middle of a hive, + * it may means there may has a race issue, + * or a hive device locked up independently. + * we may be in trouble and may not, so will try to roll back + * the lock and give out a warnning. + */ + dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock"); + list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) { + amdgpu_device_unlock_adev(tmp_adev); + } + } + return -EAGAIN; +} + static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) { struct pci_dev *p = NULL; @@ -4568,20 +4626,36 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress", job ? job->base.id : -1, hive->hive_id); amdgpu_put_xgmi_hive(hive); + if (job) + drm_sched_increase_karma(&job->base); return 0; } mutex_lock(&hive->hive_lock); } /* + * lock the device before we try to operate the linked list + * if didn't get the device lock, don't touch the linked list since + * others may iterating it. + */ + r = amdgpu_device_lock_hive_adev(adev, hive); + if (r) { + dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress", + job ? job->base.id : -1); + + /* even we skipped this reset, still need to set the job to guilty */ + if (job) + drm_sched_increase_karma(&job->base); + goto skip_recovery; + } + + /* * Build list of devices to reset. * In case we are in XGMI hive mode, resort the device list * to put adev in the 1st position. */ INIT_LIST_HEAD(&device_list); if (adev->gmc.xgmi.num_physical_nodes > 1) { - if (!hive) - return -ENODEV; if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list)) list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list); device_list_handle = &hive->device_list; @@ -4592,13 +4666,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, /* block all schedulers and reset given job's ring */ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { - if (!amdgpu_device_lock_adev(tmp_adev, hive)) { - dev_info(tmp_adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress", - job ? job->base.id : -1); - r = 0; - goto skip_recovery; - } - /* * Try to put the audio codec into suspend state * before gpu reset started. @@ -4736,7 +4803,7 @@ skip_recovery: amdgpu_put_xgmi_hive(hive); } - if (r) + if (r && r != -EAGAIN) dev_info(adev->dev, "GPU reset end with ret = %d\n", r); return r; } @@ -4786,7 +4853,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); } else { - if (speed_cap == PCIE_SPEED_16_0GT) + if (speed_cap == PCIE_SPEED_32_0GT) + adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5); + else if (speed_cap == PCIE_SPEED_16_0GT) adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 | @@ -4806,7 +4879,13 @@ static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2); } else { - if (platform_speed_cap == PCIE_SPEED_16_0GT) + if (platform_speed_cap == PCIE_SPEED_32_0GT) + adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 | + CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5); + else if (platform_speed_cap == PCIE_SPEED_16_0GT) adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 | @@ -4950,8 +5029,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; /* Fatal error, prepare for slot reset */ - case pci_channel_io_frozen: - /* + case pci_channel_io_frozen: + /* * Cancel and wait for all TDRs in progress if failing to * set adev->in_gpu_reset in amdgpu_device_lock_adev * @@ -5042,7 +5121,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) goto out; } - adev->in_pci_err_recovery = true; + adev->in_pci_err_recovery = true; r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset); adev->in_pci_err_recovery = false; if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index f764803c53a4..48cb33e5b382 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -926,8 +926,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_gem_object *obj; struct amdgpu_framebuffer *amdgpu_fb; + struct drm_gem_object *obj; + struct amdgpu_bo *bo; + uint32_t domains; int ret; obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); @@ -938,7 +940,9 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, } /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ - if (obj->import_attach) { + bo = gem_to_amdgpu_bo(obj); + domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); + if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 72efd579ec5e..4575192d9b08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -132,8 +132,12 @@ uint amdgpu_pg_mask = 0xffffffff; uint amdgpu_sdma_phase_quantum = 32; char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; -/* OverDrive(bit 14) disabled by default*/ -uint amdgpu_pp_feature_mask = 0xffffbfff; + +/* + * OverDrive(bit 14) disabled by default + * GFX DCS(bit 19) disabled by default + */ +uint amdgpu_pp_feature_mask = 0xfff7bfff; uint amdgpu_force_long_training; int amdgpu_job_hang_limit; int amdgpu_lbpw = -1; @@ -789,9 +793,9 @@ module_param_named(tmz, amdgpu_tmz, int, 0444); /** * DOC: reset_method (int) - * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco) + * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci) */ -MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)"); +MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco, 5 = pci)"); module_param_named(reset_method, amdgpu_reset_method, int, 0444); /** @@ -1085,6 +1089,8 @@ static const struct pci_device_id pciidlist[] = { /* Renoir */ {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, + {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, + {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, /* Navi12 */ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12}, @@ -1092,6 +1098,7 @@ static const struct pci_device_id pciidlist[] = { /* Sienna_Cichlid */ {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, @@ -1204,7 +1211,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, if (ret) return ret; - ddev->pdev = pdev; pci_set_drvdata(pdev, ddev); ret = amdgpu_driver_load_kms(adev, ent->driver_data); @@ -1264,7 +1270,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) */ if (!amdgpu_passthrough(adev)) adev->mp1_state = PP_MP1_STATE_UNLOAD; + adev->in_poweroff_reboot_com = true; amdgpu_device_ip_suspend(adev); + adev->in_poweroff_reboot_com = false; adev->mp1_state = PP_MP1_STATE_NONE; } @@ -1306,8 +1314,13 @@ static int amdgpu_pmops_thaw(struct device *dev) static int amdgpu_pmops_poweroff(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + int r; - return amdgpu_device_suspend(drm_dev, true); + adev->in_poweroff_reboot_com = true; + r = amdgpu_device_suspend(drm_dev, true); + adev->in_poweroff_reboot_com = false; + return r; } static int amdgpu_pmops_restore(struct device *dev) @@ -1342,11 +1355,12 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) adev->in_runpm = true; if (amdgpu_device_supports_atpx(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - drm_kms_helper_poll_disable(drm_dev); ret = amdgpu_device_suspend(drm_dev, false); - if (ret) + if (ret) { + adev->in_runpm = false; return ret; + } if (amdgpu_device_supports_atpx(drm_dev)) { /* Only need to handle PCI state in the driver for ATPX @@ -1399,7 +1413,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) amdgpu_device_baco_exit(drm_dev); } ret = amdgpu_device_resume(drm_dev, false); - drm_kms_helper_poll_enable(drm_dev); if (amdgpu_device_supports_atpx(drm_dev)) drm_dev->switch_power_state = DRM_SWITCH_POWER_ON; adev->in_runpm = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 0bf7d36c6686..51cd49c6f38f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -271,7 +271,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); - vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info); + vga_switcheroo_client_fb_set(adev->pdev, info); return 0; out: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c index 7c6e02e35573..8d1ad294cb02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fw_attestation.c @@ -47,10 +47,9 @@ typedef struct FW_ATT_RECORD uint16_t AttFwIdV2; /* V2 FW ID field */ uint32_t AttFWVersion; /* FW Version */ uint16_t AttFWActiveFunctionID; /* The VF ID (only in VF Attestation Table) */ - uint16_t AttSource; /* FW source indicator */ - uint16_t RecordValid; /* Indicates whether the record is a valid entry */ - uint8_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */ - uint8_t Reserved; + uint8_t AttSource; /* FW source indicator */ + uint8_t RecordValid; /* Indicates whether the record is a valid entry */ + uint32_t AttFwTaId; /* Ta ID (only in TA Attestation Table) */ } FW_ATT_RECORD; static ssize_t amdgpu_fw_attestation_debugfs_read(struct file *f, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d0a1fee1f5f6..b443907afcea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -269,8 +269,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, resv = vm->root.base.bo->tbo.base.resv; } -retry: initial_domain = (u32)(0xffffffff & args->in.domains); +retry: r = amdgpu_gem_object_create(adev, size, args->in.alignment, initial_domain, flags, ttm_bo_type_device, resv, &gobj); @@ -619,7 +619,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, int r = 0; if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%LX is in reserved area 0x%LX\n", args->va_address, AMDGPU_VA_RESERVED_SIZE); return -EINVAL; @@ -627,7 +627,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (args->va_address >= AMDGPU_GMC_HOLE_START && args->va_address < AMDGPU_GMC_HOLE_END) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", args->va_address, AMDGPU_GMC_HOLE_START, AMDGPU_GMC_HOLE_END); @@ -639,14 +639,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; vm_size -= AMDGPU_VA_RESERVED_SIZE; if (args->va_address + args->map_size > vm_size) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%llx is in top reserved area 0x%llx\n", args->va_address + args->map_size, vm_size); return -EINVAL; } if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { - dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n", + dev_dbg(dev->dev, "invalid flags combination 0x%08X\n", args->flags); return -EINVAL; } @@ -658,7 +658,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, case AMDGPU_VA_OP_REPLACE: break; default: - dev_dbg(&dev->pdev->dev, "unsupported operation %d\n", + dev_dbg(dev->dev, "unsupported operation %d\n", args->operation); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index cd2c676a2797..8e0a6c62322e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -193,15 +193,16 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev) } bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, - int pipe, int queue) + struct amdgpu_ring *ring) { - bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev); - int cond; - /* Policy: alternate between normal and high priority */ - cond = multipipe_policy ? pipe : queue; - - return ((cond % 2) != 0); + /* Policy: use 1st queue as high priority compute queue if we + * have more than one compute queue. + */ + if (adev->gfx.num_compute_rings > 1 && + ring == &adev->gfx.compute_ring[0]) + return true; + return false; } void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 6b5a8f4642cc..72dbcd2bc6a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -380,7 +380,7 @@ void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec, int pipe, int queue); bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, - int pipe, int queue); + struct amdgpu_ring *ring); int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 47cad23a6b9e..bca4dddd5a15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -176,7 +176,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; i2c->adapter.class = I2C_CLASS_DDC; - i2c->adapter.dev.parent = &dev->pdev->dev; + i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); mutex_init(&i2c->mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 024d0a563a65..7645223ea0ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -195,6 +195,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync) ring->funcs->emit_mem_sync(ring); + if (ring->funcs->emit_wave_limit && + ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) + ring->funcs->emit_wave_limit(ring, true); + if (ring->funcs->insert_start) ring->funcs->insert_start(ring); @@ -295,6 +299,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, ring->current_ctx = fence_ctx; if (vm && ring->funcs->emit_switch_buffer) amdgpu_ring_emit_switch_buffer(ring); + + if (ring->funcs->emit_wave_limit && + ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) + ring->funcs->emit_wave_limit(ring, false); + amdgpu_ring_commit(ring); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 725a9c73d51f..dc852af4f3b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -209,6 +209,8 @@ restart_ih: * amdgpu_ih_decode_iv_helper - decode an interrupt vector * * @adev: amdgpu_device pointer + * @ih: ih ring to process + * @entry: IV entry * * Decodes the interrupt vector at the current rptr * position and also advance the position for for Vega10 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index b16b32797624..64beb3399604 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -142,7 +142,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) (amdgpu_is_atpx_hybrid() || amdgpu_has_atpx_dgpu_power_cntl()) && ((flags & AMD_IS_APU) == 0) && - !pci_is_thunderbolt_attached(dev->pdev)) + !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) flags |= AMD_IS_PX; parent = pci_upstream_bridge(adev->pdev); @@ -156,7 +156,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) */ r = amdgpu_device_init(adev, flags); if (r) { - dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); + dev_err(dev->dev, "Fatal error during GPU init\n"); goto out; } @@ -173,8 +173,6 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) switch (adev->asic_type) { case CHIP_VEGA20: case CHIP_ARCTURUS: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: /* enable runpm if runpm=1 */ if (amdgpu_runtime_pm > 0) adev->runpm = true; @@ -199,7 +197,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) acpi_status = amdgpu_acpi_init(adev); if (acpi_status) - dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); + dev_dbg(dev->dev, "Error during ACPI methods call\n"); if (adev->runpm) { /* only need to skip on ATPX */ @@ -735,10 +733,10 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (!dev_info) return -ENOMEM; - dev_info->device_id = dev->pdev->device; + dev_info->device_id = adev->pdev->device; dev_info->chip_rev = adev->rev_id; dev_info->external_rev = adev->external_rev_id; - dev_info->pci_rev = dev->pdev->revision; + dev_info->pci_rev = adev->pdev->revision; dev_info->family = adev->family; dev_info->num_shader_engines = adev->gfx.config.max_shader_engines; dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 4ba0024aedf1..7c11bce4514b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -88,6 +88,7 @@ struct amdgpu_nbio_funcs { int (*ras_late_init)(struct amdgpu_device *adev); void (*enable_aspm)(struct amdgpu_device *adev, bool enable); + void (*program_aspm)(struct amdgpu_device *adev); }; struct amdgpu_nbio { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 6cc9919b12cc..4b29b8205442 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -897,7 +897,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; /* A shared bo cannot be migrated to VRAM */ - if (bo->prime_shared_count) { + if (bo->prime_shared_count || bo->tbo.base.import_attach) { if (domain & AMDGPU_GEM_DOMAIN_GTT) domain = AMDGPU_GEM_DOMAIN_GTT; else @@ -911,10 +911,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (bo->tbo.pin_count) { uint32_t mem_type = bo->tbo.mem.mem_type; + uint32_t mem_flags = bo->tbo.mem.placement; if (!(domain & amdgpu_mem_type_to_domain(mem_type))) return -EINVAL; + if ((mem_type == TTM_PL_VRAM) && + (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) && + !(mem_flags & TTM_PL_FLAG_CONTIGUOUS)) + return -EINVAL; + ttm_bo_pin(&bo->tbo); if (max_offset != 0) { @@ -930,7 +936,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, if (bo->tbo.base.import_attach) dma_buf_pin(bo->tbo.base.import_attach); - bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; /* force to pin into visible video ram */ if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; @@ -983,6 +988,7 @@ error: */ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) { + bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; return amdgpu_bo_pin_restricted(bo, domain, 0, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c2d9d072b6fe..839917eb7bc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -36,6 +36,7 @@ #include "psp_v12_0.h" #include "amdgpu_ras.h" +#include "amdgpu_securedisplay.h" static int psp_sysfs_init(struct amdgpu_device *adev); static void psp_sysfs_fini(struct amdgpu_device *adev); @@ -1652,6 +1653,175 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id) } // RAP end +/* securedisplay start */ +static int psp_securedisplay_init_shared_buf(struct psp_context *psp) +{ + int ret; + + /* + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for sa ta <-> Driver + */ + ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &psp->securedisplay_context.securedisplay_shared_bo, + &psp->securedisplay_context.securedisplay_shared_mc_addr, + &psp->securedisplay_context.securedisplay_shared_buf); + + return ret; +} + +static int psp_securedisplay_load(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size); + + psp_prep_ta_load_cmd_buf(cmd, + psp->fw_pri_mc_addr, + psp->ta_securedisplay_ucode_size, + psp->securedisplay_context.securedisplay_shared_mc_addr, + PSP_SECUREDISPLAY_SHARED_MEM_SIZE); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + if (ret) + goto failed; + + psp->securedisplay_context.securedisplay_initialized = true; + psp->securedisplay_context.session_id = cmd->resp.session_id; + mutex_init(&psp->securedisplay_context.mutex); + +failed: + kfree(cmd); + return ret; +} + +static int psp_securedisplay_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static int psp_securedisplay_initialize(struct psp_context *psp) +{ + int ret; + struct securedisplay_cmd *securedisplay_cmd; + + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->adev->psp.ta_securedisplay_ucode_size || + !psp->adev->psp.ta_securedisplay_start_addr) { + dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n"); + return 0; + } + + if (!psp->securedisplay_context.securedisplay_initialized) { + ret = psp_securedisplay_init_shared_buf(psp); + if (ret) + return ret; + } + + ret = psp_securedisplay_load(psp); + if (ret) + return ret; + + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__QUERY_TA); + + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); + if (ret) { + psp_securedisplay_unload(psp); + + amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo, + &psp->securedisplay_context.securedisplay_shared_mc_addr, + &psp->securedisplay_context.securedisplay_shared_buf); + + psp->securedisplay_context.securedisplay_initialized = false; + + dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n"); + return -EINVAL; + } + + if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + dev_err(psp->adev->dev, "SECUREDISPLAY: query securedisplay TA failed. ret 0x%x\n", + securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); + } + + return 0; +} + +static int psp_securedisplay_terminate(struct psp_context *psp) +{ + int ret; + + /* + * TODO:bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->securedisplay_context.securedisplay_initialized) + return 0; + + ret = psp_securedisplay_unload(psp); + if (ret) + return ret; + + psp->securedisplay_context.securedisplay_initialized = false; + + /* free securedisplay shared memory */ + amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo, + &psp->securedisplay_context.securedisplay_shared_mc_addr, + &psp->securedisplay_context.securedisplay_shared_buf); + + return ret; +} + +int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + int ret; + + if (!psp->securedisplay_context.securedisplay_initialized) + return -EINVAL; + + if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA && + ta_cmd_id != TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC) + return -EINVAL; + + mutex_lock(&psp->securedisplay_context.mutex); + + ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.session_id); + + mutex_unlock(&psp->securedisplay_context.mutex); + + return ret; +} +/* SECUREDISPLAY end */ + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -2126,6 +2296,11 @@ skip_memalloc: if (ret) dev_err(psp->adev->dev, "RAP: Failed to initialize RAP\n"); + + ret = psp_securedisplay_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); } return 0; @@ -2176,6 +2351,7 @@ static int psp_hw_fini(void *handle) if (psp->adev->psp.ta_fw) { psp_ras_terminate(psp); + psp_securedisplay_terminate(psp); psp_rap_terminate(psp); psp_dtm_terminate(psp); psp_hdcp_terminate(psp); @@ -2240,6 +2416,11 @@ static int psp_suspend(void *handle) DRM_ERROR("Failed to terminate rap ta\n"); return ret; } + ret = psp_securedisplay_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate securedisplay ta\n"); + return ret; + } } ret = psp_asd_unload(psp); @@ -2323,6 +2504,11 @@ static int psp_resume(void *handle) if (ret) dev_err(psp->adev->dev, "RAP: Failed to initialize RAP\n"); + + ret = psp_securedisplay_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "SECUREDISPLAY: Failed to initialize SECUREDISPLAY\n"); } mutex_unlock(&adev->firmware.mutex); @@ -2629,6 +2815,11 @@ static int parse_ta_bin_descriptor(struct psp_context *psp, psp->ta_rap_ucode_size = le32_to_cpu(desc->size_bytes); psp->ta_rap_start_addr = ucode_start_addr; break; + case TA_FW_TYPE_PSP_SECUREDISPLAY: + psp->ta_securedisplay_ucode_version = le32_to_cpu(desc->fw_version); + psp->ta_securedisplay_ucode_size = le32_to_cpu(desc->size_bytes); + psp->ta_securedisplay_start_addr = ucode_start_addr; + break; default: dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index da250bc1ac57..cb50ba445f8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -30,6 +30,7 @@ #include "ta_xgmi_if.h" #include "ta_ras_if.h" #include "ta_rap_if.h" +#include "ta_secureDisplay_if.h" #define PSP_FENCE_BUFFER_SIZE 0x1000 #define PSP_CMD_BUFFER_SIZE 0x1000 @@ -40,6 +41,7 @@ #define PSP_HDCP_SHARED_MEM_SIZE 0x4000 #define PSP_DTM_SHARED_MEM_SIZE 0x4000 #define PSP_RAP_SHARED_MEM_SIZE 0x4000 +#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE 0x4000 #define PSP_SHARED_MEM_SIZE 0x4000 #define PSP_FW_NAME_LEN 0x24 @@ -171,6 +173,15 @@ struct psp_rap_context { struct mutex mutex; }; +struct psp_securedisplay_context { + bool securedisplay_initialized; + uint32_t session_id; + struct amdgpu_bo *securedisplay_shared_bo; + uint64_t securedisplay_shared_mc_addr; + void *securedisplay_shared_buf; + struct mutex mutex; +}; + #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 #define GDDR6_MEM_TRAINING_OFFSET 0x8000 @@ -298,12 +309,17 @@ struct psp_context uint32_t ta_rap_ucode_size; uint8_t *ta_rap_start_addr; + uint32_t ta_securedisplay_ucode_version; + uint32_t ta_securedisplay_ucode_size; + uint8_t *ta_securedisplay_start_addr; + struct psp_asd_context asd_context; struct psp_xgmi_context xgmi_context; struct psp_ras_context ras; struct psp_hdcp_context hdcp_context; struct psp_dtm_context dtm_context; struct psp_rap_context rap_context; + struct psp_securedisplay_context securedisplay_context; struct mutex mutex; struct psp_memory_training_context mem_train_ctx; }; @@ -380,6 +396,7 @@ int psp_ras_trigger_error(struct psp_context *psp, int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id); +int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_rlc_autoload_start(struct psp_context *psp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 82e952696d24..1fb2a91ad30a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -846,7 +846,7 @@ static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, if (amdgpu_dpm_allow_xgmi_power_down(adev, true)) dev_warn(adev->dev, "Failed to allow XGMI power down"); - if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) dev_warn(adev->dev, "Failed to allow df cstate"); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 1a612f51ecd9..b644c78475fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -166,7 +166,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned int max_dw, struct amdgpu_irq_src *irq_src, unsigned int irq_type, unsigned int hw_prio) { - int r, i; + int r; int sched_hw_submission = amdgpu_sched_hw_submission; u32 *num_sched; u32 hw_ip; @@ -258,8 +258,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } ring->max_dw = max_dw; - ring->priority = DRM_SCHED_PRIORITY_NORMAL; - mutex_init(&ring->priority_mutex); + ring->hw_prio = hw_prio; if (!ring->no_scheduler) { hw_ip = ring->funcs->type; @@ -268,9 +267,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, &ring->sched; } - for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; ++i) - atomic_set(&ring->num_jobs[i], 0); - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index 7112137689db..56acec1075ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -197,6 +197,7 @@ struct amdgpu_ring_funcs { void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid); int (*preempt_ib)(struct amdgpu_ring *ring); void (*emit_mem_sync)(struct amdgpu_ring *ring); + void (*emit_wave_limit)(struct amdgpu_ring *ring, bool enable); }; struct amdgpu_ring { @@ -242,11 +243,7 @@ struct amdgpu_ring { struct dma_fence *vmid_wait; bool has_compute_vm_bug; bool no_scheduler; - - atomic_t num_jobs[DRM_SCHED_PRIORITY_COUNT]; - struct mutex priority_mutex; - /* protected by priority_mutex */ - int priority; + int hw_prio; #if defined(CONFIG_DEBUG_FS) struct dentry *ent; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c new file mode 100644 index 000000000000..834440ab9ff7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c @@ -0,0 +1,176 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#include <linux/debugfs.h> +#include <linux/pm_runtime.h> + +#include "amdgpu.h" +#include "amdgpu_securedisplay.h" + +/** + * DOC: AMDGPU SECUREDISPLAY debugfs test interface + * + * how to use? + * echo opcode <value> > <debugfs_dir>/dri/xxx/securedisplay_test + * eg. echo 1 > <debugfs_dir>/dri/xxx/securedisplay_test + * eg. echo 2 phy_id > <debugfs_dir>/dri/xxx/securedisplay_test + * + * opcode: + * 1:Query whether TA is responding used only for validation pupose + * 2: Send region of Interest and CRC value to I2C. (uint32)phy_id is + * send to determine which DIO scratch register should be used to get + * ROI and receive i2c_buf as the output. + * + * You can refer more detail from header file ta_securedisplay_if.h + * + */ + +void psp_securedisplay_parse_resp_status(struct psp_context *psp, + enum ta_securedisplay_status status) +{ + switch (status) { + case TA_SECUREDISPLAY_STATUS__SUCCESS: + break; + case TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE: + dev_err(psp->adev->dev, "Secure display: Generic Failure."); + break; + case TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER: + dev_err(psp->adev->dev, "Secure display: Invalid Parameter."); + break; + case TA_SECUREDISPLAY_STATUS__NULL_POINTER: + dev_err(psp->adev->dev, "Secure display: Null Pointer."); + break; + case TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR: + dev_err(psp->adev->dev, "Secure display: Failed to write to I2C."); + break; + case TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR: + dev_err(psp->adev->dev, "Secure display: Failed to Read DIO Scratch Register."); + break; + case TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR: + dev_err(psp->adev->dev, "Secure display: Failed to Read CRC"); + break; + default: + dev_err(psp->adev->dev, "Secure display: Failed to parse status: %d\n", status); + } +} + +void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd, + enum ta_securedisplay_command command_id) +{ + *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf; + memset(*cmd, 0, sizeof(struct securedisplay_cmd)); + (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE; + (*cmd)->cmd_id = command_id; +} + +static ssize_t amdgpu_securedisplay_debugfs_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + struct psp_context *psp = &adev->psp; + struct securedisplay_cmd *securedisplay_cmd; + struct drm_device *dev = adev_to_drm(adev); + uint32_t phy_id; + uint32_t op; + int i; + char str[64]; + char i2c_output[256]; + int ret; + + if (*pos || size > sizeof(str) - 1) + return -EINVAL; + + memset(str, 0, sizeof(str)); + ret = copy_from_user(str, buf, size); + if (ret) + return -EFAULT; + + ret = pm_runtime_get_sync(dev->dev); + if (ret < 0) { + pm_runtime_put_autosuspend(dev->dev); + return ret; + } + + if (size < 3) + sscanf(str, "%u ", &op); + else + sscanf(str, "%u %u", &op, &phy_id); + + switch (op) { + case 1: + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__QUERY_TA); + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA); + if (!ret) { + if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) + dev_info(adev->dev, "SECUREDISPLAY: query securedisplay TA ret is 0x%X\n", + securedisplay_cmd->securedisplay_out_message.query_ta.query_cmd_ret); + else + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + } + break; + case 2: + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_id; + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + if (!ret) { + if (securedisplay_cmd->status == TA_SECUREDISPLAY_STATUS__SUCCESS) { + memset(i2c_output, 0, sizeof(i2c_output)); + for (i = 0; i < TA_SECUREDISPLAY_I2C_BUFFER_SIZE; i++) + sprintf(i2c_output, "%s 0x%X", i2c_output, + securedisplay_cmd->securedisplay_out_message.send_roi_crc.i2c_buf[i]); + dev_info(adev->dev, "SECUREDISPLAY: I2C buffer out put is :%s\n", i2c_output); + } else { + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + } + } + break; + default: + dev_err(adev->dev, "Invalid input: %s\n", str); + } + + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + + return size; +} + +static const struct file_operations amdgpu_securedisplay_debugfs_ops = { + .owner = THIS_MODULE, + .read = NULL, + .write = amdgpu_securedisplay_debugfs_write, + .llseek = default_llseek +}; + +void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + + if (!adev->psp.securedisplay_context.securedisplay_initialized) + return; + + debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root, + adev, &amdgpu_securedisplay_debugfs_ops); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h new file mode 100644 index 000000000000..fe98574748f4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.h @@ -0,0 +1,36 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * + */ +#ifndef _AMDGPU_SECUREDISPLAY_H +#define _AMDGPU_SECUREDISPLAY_H + +#include "amdgpu.h" +#include "ta_secureDisplay_if.h" + +void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev); +void psp_securedisplay_parse_resp_status(struct psp_context *psp, + enum ta_securedisplay_status status); +void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd, + enum ta_securedisplay_command command_id); + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 0e43b46d3ab5..46449e70348b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -122,6 +122,9 @@ struct ta_firmware_header_v1_0 { uint32_t ta_dtm_ucode_version; uint32_t ta_dtm_offset_bytes; uint32_t ta_dtm_size_bytes; + uint32_t ta_securedisplay_ucode_version; + uint32_t ta_securedisplay_offset_bytes; + uint32_t ta_securedisplay_size_bytes; }; enum ta_fw_type { @@ -132,6 +135,7 @@ enum ta_fw_type { TA_FW_TYPE_PSP_HDCP, TA_FW_TYPE_PSP_DTM, TA_FW_TYPE_PSP_RAP, + TA_FW_TYPE_PSP_SECUREDISPLAY, }; struct ta_fw_bin_desc { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 2d51b7694d1f..5da04d45b637 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -560,10 +560,14 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev) static void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work) { struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work); + int ret; - amdgpu_virt_read_pf2vf_data(adev); + ret = amdgpu_virt_read_pf2vf_data(adev); + if (ret) + goto out; amdgpu_virt_write_vf2pf_data(adev); +out: schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms); } @@ -571,8 +575,8 @@ void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev) { if (adev->virt.vf2pf_update_interval_ms != 0) { DRM_INFO("clean up the vf2pf work item\n"); - flush_delayed_work(&adev->virt.vf2pf_work); cancel_delayed_work_sync(&adev->virt.vf2pf_work); + adev->virt.vf2pf_update_interval_ms = 0; } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index d2de2a720a3d..c89b66bb70e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -473,6 +473,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, for (i = 0; pages_left >= pages_per_node; ++i) { unsigned long pages = rounddown_pow_of_two(pages_left); + /* Limit maximum size to 2GB due to SG table limitations */ + pages = min(pages, (2UL << (30 - PAGE_SHIFT))); + r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, pages_per_node, 0, place->fpfn, lpfn, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 541ef6be390f..659b385b27b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -324,7 +324,7 @@ static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev, struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) { - struct amdgpu_hive_info *hive = NULL, *tmp = NULL; + struct amdgpu_hive_info *hive = NULL; int ret; if (!adev->gmc.xgmi.hive_id) @@ -337,11 +337,9 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev) mutex_lock(&xgmi_mutex); - if (!list_empty(&xgmi_hive_list)) { - list_for_each_entry_safe(hive, tmp, &xgmi_hive_list, node) { - if (hive->hive_id == adev->gmc.xgmi.hive_id) - goto pro_end; - } + list_for_each_entry(hive, &xgmi_hive_list, node) { + if (hive->hive_id == adev->gmc.xgmi.hive_id) + goto pro_end; } hive = kzalloc(sizeof(*hive), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 13737b317f7c..4d6832cc7fb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1251,13 +1251,22 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev, WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute); } -static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) +/** + * cik_asic_pci_config_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Use PCI Config method to reset the GPU. + * + * Returns 0 for success. + */ +static int cik_asic_pci_config_reset(struct amdgpu_device *adev) { struct kv_reset_save_regs kv_save = { 0 }; u32 i; int r = -EINVAL; - dev_info(adev->dev, "GPU pci config reset\n"); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); if (adev->flags & AMD_IS_APU) kv_save_regs_for_reset(adev, &kv_save); @@ -1285,26 +1294,6 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) if (adev->flags & AMD_IS_APU) kv_restore_regs_for_reset(adev, &kv_save); - return r; -} - -/** - * cik_asic_pci_config_reset - soft reset GPU - * - * @adev: amdgpu_device pointer - * - * Use PCI Config method to reset the GPU. - * - * Returns 0 for success. - */ -static int cik_asic_pci_config_reset(struct amdgpu_device *adev) -{ - int r; - - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - r = cik_gpu_pci_config_reset(adev); - amdgpu_atombios_scratch_regs_engine_hung(adev, false); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 10aae0abcffb..45d1172b7bff 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -70,6 +70,11 @@ #define GB_ADDR_CONFIG__NUM_PKRS__SHIFT 0x8 #define GB_ADDR_CONFIG__NUM_PKRS_MASK 0x00000700L +#define mmCGTS_TCC_DISABLE_gc_10_3 0x5006 +#define mmCGTS_TCC_DISABLE_gc_10_3_BASE_IDX 1 +#define mmCGTS_USER_TCC_DISABLE_gc_10_3 0x5007 +#define mmCGTS_USER_TCC_DISABLE_gc_10_3_BASE_IDX 1 + #define mmCP_MEC_CNTL_Sienna_Cichlid 0x0f55 #define mmCP_MEC_CNTL_Sienna_Cichlid_BASE_IDX 0 #define mmRLC_SAFE_MODE_Sienna_Cichlid 0x4ca0 @@ -98,6 +103,10 @@ #define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0 +#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025 +#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1 +#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026 +#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1 #define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441 #define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1 #define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261 @@ -114,6 +123,9 @@ #define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1 #define mmSPI_CONFIG_CNTL_Vangogh 0x2440 #define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1 +#define mmGCR_GENERAL_CNTL_Vangogh 0x1580 +#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0 +#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh 0x0000FFFFL #define mmCP_HYP_PFP_UCODE_ADDR 0x5814 #define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1 @@ -159,6 +171,9 @@ #define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid 0x15db #define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0 +#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030 +#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0 + MODULE_FIRMWARE("amdgpu/navi10_ce.bin"); MODULE_FIRMWARE("amdgpu/navi10_pfp.bin"); MODULE_FIRMWARE("amdgpu/navi10_me.bin"); @@ -3236,7 +3251,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210), @@ -3323,6 +3338,7 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume); static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev); static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev); +static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev); static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) { @@ -3767,9 +3783,6 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) if (!gfx_v10_0_navi10_gfxoff_should_enable(adev)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; break; - case CHIP_VANGOGH: - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; - break; default: break; } @@ -4479,8 +4492,7 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; - hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, - ring->queue) ? + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024, @@ -4925,8 +4937,15 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev) { /* TCCs are global (not instanced). */ - uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) | - RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); + uint32_t tcc_disable; + + if (adev->asic_type >= CHIP_SIENNA_CICHLID) { + tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) | + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3); + } else { + tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) | + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); + } adev->gfx.config.tcc_disabled_mask = REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | @@ -6522,8 +6541,7 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct struct amdgpu_device *adev = ring->adev; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, - ring->queue)) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; mqd->cp_hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; @@ -7191,6 +7209,9 @@ static int gfx_v10_0_hw_init(void *handle) if (adev->asic_type == CHIP_SIENNA_CICHLID) gfx_v10_3_program_pbb_mode(adev); + if (adev->asic_type >= CHIP_SIENNA_CICHLID) + gfx_v10_3_set_power_brake_sequence(adev); + return r; } @@ -7376,8 +7397,16 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); mutex_lock(&adev->gfx.gpu_clock_mutex); - clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) | - ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL); + switch (adev->asic_type) { + case CHIP_VANGOGH: + clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | + ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); + break; + default: + clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) | + ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL); + break; + } mutex_unlock(&adev->gfx.gpu_clock_mutex); amdgpu_gfx_off_ctrl(adev, true); return clock; @@ -7811,6 +7840,20 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable) data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; WREG32_SOC15(GC, 0, mmRLC_PG_CNTL, data); + + /* + * CGPG enablement required and the register to program the hysteresis value + * RLC_PG_DELAY_3.CGCG_ACTIVE_BEFORE_CGPG to the desired CGPG hysteresis value + * in refclk count. Note that RLC FW is modified to take 16 bits from + * RLC_PG_DELAY_3[15:0] as the hysteresis instead of just 8 bits. + * + * The recommendation from RLC team is setting RLC_PG_DELAY_3 to 200us(0x4E20) + * as part of CGPG enablement starting point. + */ + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && adev->asic_type == CHIP_VANGOGH) { + data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh; + WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data); + } } static void gfx_v10_cntl_pg(struct amdgpu_device *adev, bool enable) @@ -7872,6 +7915,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, break; case CHIP_VANGOGH: gfx_v10_cntl_pg(adev, enable); + amdgpu_gfx_off_ctrl(adev, enable); break; default: break; @@ -9168,6 +9212,31 @@ static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev) } } +static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev) +{ + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, + (0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) | + (0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) | + (0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT)); + + WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL); + WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, + (0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) | + (0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) | + (0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) | + (0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT)); + + WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid, + (0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) | + (0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) | + (0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT)); + + WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL); + + WREG32_SOC15(GC, 0, mmDIDT_IND_DATA, + (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT)); +} + const struct amdgpu_ip_block_version gfx_v10_0_ip_block = { .type = AMD_IP_BLOCK_TYPE_GFX, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 37639214cbbb..84d2eaa38101 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -29,6 +29,7 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" +#include "amdgpu_ring.h" #include "vi.h" #include "vi_structs.h" #include "vid.h" @@ -1923,8 +1924,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; - hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, - ring->queue) ? + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT; /* type-2 packets are deprecated on MEC, use type-3 instead */ r = amdgpu_ring_init(adev, ring, 1024, @@ -4442,8 +4442,7 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m struct amdgpu_device *adev = ring->adev; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, - ring->queue)) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; mqd->cp_hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; @@ -6847,6 +6846,66 @@ static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ } + +/* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ +#define mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT 0x0000007f +static void gfx_v8_0_emit_wave_limit_cs(struct amdgpu_ring *ring, + uint32_t pipe, bool enable) +{ + uint32_t val; + uint32_t wcl_cs_reg; + + val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS_DEFAULT; + + switch (pipe) { + case 0: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS0; + break; + case 1: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS1; + break; + case 2: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS2; + break; + case 3: + wcl_cs_reg = mmSPI_WCL_PIPE_PERCENT_CS3; + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + + amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); + +} + +#define mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT 0x07ffffff +static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + int i; + + /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit + * number of gfx waves. Setting 5 bit will make sure gfx only gets + * around 25% of gpu resources. + */ + val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT; + amdgpu_ring_emit_wreg(ring, mmSPI_WCL_PIPE_PERCENT_GFX, val); + + /* Restrict waves for normal/low priority compute queues as well + * to get best QoS for high priority compute jobs. + * + * amdgpu controls only 1st ME(0-3 CS pipes). + */ + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { + if (i != ring->pipe) + gfx_v8_0_emit_wave_limit_cs(ring, i, enable); + + } + +} + static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, @@ -6930,7 +6989,9 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */ 7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */ - 7, /* gfx_v8_0_emit_mem_sync_compute */ + 7 + /* gfx_v8_0_emit_mem_sync_compute */ + 5 + /* gfx_v8_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */ + 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */ .emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */ .emit_ib = gfx_v8_0_ring_emit_ib_compute, .emit_fence = gfx_v8_0_ring_emit_fence_compute, @@ -6944,6 +7005,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .pad_ib = amdgpu_ring_generic_pad_ib, .emit_wreg = gfx_v8_0_ring_emit_wreg, .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute, + .emit_wave_limit = gfx_v8_0_emit_wave_limit, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index a896e3d0fcf8..65db88bb6cbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -52,6 +52,7 @@ #include "asic_reg/pwr/pwr_10_0_offset.h" #include "asic_reg/pwr/pwr_10_0_sh_mask.h" +#include "asic_reg/gc/gc_9_0_default.h" #define GFX9_NUM_GFX_RINGS 1 #define GFX9_MEC_HPD_SIZE 4096 @@ -2227,8 +2228,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec) + ring->pipe; - hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->pipe, - ring->queue) ? + hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring) ? AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL; /* type-2 packets are deprecated on MEC, use type-3 instead */ return amdgpu_ring_init(adev, ring, 1024, @@ -3390,9 +3390,7 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m struct amdgpu_device *adev = ring->adev; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { - if (amdgpu_gfx_is_high_priority_compute_queue(adev, - ring->pipe, - ring->queue)) { + if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring)) { mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH; mqd->cp_hqd_queue_priority = AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM; @@ -6670,6 +6668,65 @@ static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */ } +static void gfx_v9_0_emit_wave_limit_cs(struct amdgpu_ring *ring, + uint32_t pipe, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + uint32_t wcl_cs_reg; + + /* mmSPI_WCL_PIPE_PERCENT_CS[0-7]_DEFAULT values are same */ + val = enable ? 0x1 : mmSPI_WCL_PIPE_PERCENT_CS0_DEFAULT; + + switch (pipe) { + case 0: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS0); + break; + case 1: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS1); + break; + case 2: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS2); + break; + case 3: + wcl_cs_reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_CS3); + break; + default: + DRM_DEBUG("invalid pipe %d\n", pipe); + return; + } + + amdgpu_ring_emit_wreg(ring, wcl_cs_reg, val); + +} +static void gfx_v9_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) +{ + struct amdgpu_device *adev = ring->adev; + uint32_t val; + int i; + + + /* mmSPI_WCL_PIPE_PERCENT_GFX is 7 bit multiplier register to limit + * number of gfx waves. Setting 5 bit will make sure gfx only gets + * around 25% of gpu resources. + */ + val = enable ? 0x1f : mmSPI_WCL_PIPE_PERCENT_GFX_DEFAULT; + amdgpu_ring_emit_wreg(ring, + SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX), + val); + + /* Restrict waves for normal/low priority compute queues as well + * to get best QoS for high priority compute jobs. + * + * amdgpu controls only 1st ME(0-3 CS pipes). + */ + for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) { + if (i != ring->pipe) + gfx_v9_0_emit_wave_limit_cs(ring, i, enable); + + } +} + static const struct amd_ip_funcs gfx_v9_0_ip_funcs = { .name = "gfx_v9_0", .early_init = gfx_v9_0_early_init, @@ -6759,7 +6816,9 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 + 2 + /* gfx_v9_0_ring_emit_vm_flush */ 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */ - 7, /* gfx_v9_0_emit_mem_sync */ + 7 + /* gfx_v9_0_emit_mem_sync */ + 5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */ + 15, /* for updating 3 mmSPI_WCL_PIPE_PERCENT_CS registers */ .emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */ .emit_ib = gfx_v9_0_ring_emit_ib_compute, .emit_fence = gfx_v9_0_ring_emit_fence, @@ -6775,6 +6834,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = { .emit_reg_wait = gfx_v9_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, .emit_mem_sync = gfx_v9_0_emit_mem_sync, + .emit_wave_limit = gfx_v9_0_emit_wave_limit, }; static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index aedef9017c4c..3686e777c76c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -239,46 +239,44 @@ static const char *mmhub_client_ids_vega20[][2] = { }; static const char *mmhub_client_ids_arcturus[][2] = { + [0][0] = "DBGU1", + [1][0] = "XDP", [2][0] = "MP1", - [3][0] = "MP0", - [10][0] = "UTCL2", - [13][0] = "OSS", [14][0] = "HDP", - [15][0] = "SDMA0", - [32+15][0] = "SDMA1", - [64+15][0] = "SDMA2", - [96+15][0] = "SDMA3", - [128+15][0] = "SDMA4", - [160+11][0] = "JPEG", - [160+12][0] = "VCN", - [160+13][0] = "VCNU", - [160+15][0] = "SDMA5", - [192+10][0] = "UTCL2", - [192+11][0] = "JPEG1", - [192+12][0] = "VCN1", - [192+13][0] = "VCN1U", - [192+15][0] = "SDMA6", - [224+15][0] = "SDMA7", + [171][0] = "JPEG", + [172][0] = "VCN", + [173][0] = "VCNU", + [203][0] = "JPEG1", + [204][0] = "VCN1", + [205][0] = "VCN1U", + [256][0] = "SDMA0", + [257][0] = "SDMA1", + [258][0] = "SDMA2", + [259][0] = "SDMA3", + [260][0] = "SDMA4", + [261][0] = "SDMA5", + [262][0] = "SDMA6", + [263][0] = "SDMA7", + [384][0] = "OSS", [0][1] = "DBGU1", [1][1] = "XDP", [2][1] = "MP1", - [3][1] = "MP0", - [13][1] = "OSS", [14][1] = "HDP", - [15][1] = "SDMA0", - [32+15][1] = "SDMA1", - [64+15][1] = "SDMA2", - [96+15][1] = "SDMA3", - [128+15][1] = "SDMA4", - [160+11][1] = "JPEG", - [160+12][1] = "VCN", - [160+13][1] = "VCNU", - [160+15][1] = "SDMA5", - [192+11][1] = "JPEG1", - [192+12][1] = "VCN1", - [192+13][1] = "VCN1U", - [192+15][1] = "SDMA6", - [224+15][1] = "SDMA7", + [171][1] = "JPEG", + [172][1] = "VCN", + [173][1] = "VCNU", + [203][1] = "JPEG1", + [204][1] = "VCN1", + [205][1] = "VCN1U", + [256][1] = "SDMA0", + [257][1] = "SDMA1", + [258][1] = "SDMA2", + [259][1] = "SDMA3", + [260][1] = "SDMA4", + [261][1] = "SDMA5", + [262][1] = "SDMA6", + [263][1] = "SDMA7", + [384][1] = "OSS", }; static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] = diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index 07104a1de308..ab9be5ad5a5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -491,12 +491,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, { uint32_t def, data, def1, data1; - def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { - data |= MM_ATC_L2_MISC_CG__ENABLE_MASK; - + data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | @@ -505,8 +504,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); } else { - data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK; - + data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | @@ -516,7 +514,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, } if (def != data) - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data); if (def1 != data1) WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); } @@ -525,17 +523,44 @@ static void mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable) { - uint32_t def, data; - - def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) - data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; - else - data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; + uint32_t def, data, def1, data1, def2, data2; + + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); + def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL); + def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { + data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; + data1 &= ~(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + data2 &= ~(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + } else { + data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; + data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + } if (def != data) - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data); + if (def1 != data1) + WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1); + if (def2 != data2) + WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2); } static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev, @@ -554,26 +579,39 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev, static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags) { - int data, data1; + int data, data1, data2, data3; if (amdgpu_sriov_vf(adev)) *flags = 0; - data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); - data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); + data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); + data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); + data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL); + data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL); /* AMD_CG_SUPPORT_MC_MGCG */ - if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) && - !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | + if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))) - *flags |= AMD_CG_SUPPORT_MC_MGCG; + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)) + && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) { + *flags |= AMD_CG_SUPPORT_MC_MGCG; + } /* AMD_CG_SUPPORT_MC_LS */ - if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) + if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK) + && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)) + && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))) *flags |= AMD_CG_SUPPORT_MC_LS; } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 7767ccca526b..3ee481557fc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -255,6 +255,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) if (!down_read_trylock(&adev->reset_sem)) return; + amdgpu_virt_fini_data_exchange(adev); atomic_set(&adev->in_gpu_reset, 1); do { diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index dd5c1e6ce009..48e588d3c409 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -276,6 +276,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) if (!down_read_trylock(&adev->reset_sem)) return; + amdgpu_virt_fini_data_exchange(adev); atomic_set(&adev->in_gpu_reset, 1); do { diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index b860f1c7b5b1..05ddec7ba7e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -34,6 +34,14 @@ #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 #define smnPCIE_LC_CNTL 0x11140280 +#define smnPCIE_LC_CNTL3 0x111402d4 +#define smnPCIE_LC_CNTL6 0x111402ec +#define smnPCIE_LC_CNTL7 0x111402f0 +#define smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2 0x1014008c +#define smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL 0x10123538 +#define smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP 0x10140324 +#define smnPSWUSP0_PCIE_LC_CNTL2 0x111402c4 +#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c #define mmBIF_SDMA2_DOORBELL_RANGE 0x01d6 #define mmBIF_SDMA2_DOORBELL_RANGE_BASE_IDX 2 @@ -350,6 +358,111 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev, WREG32_PCIE(smnPCIE_LC_CNTL, data); } +static void nbio_v2_3_program_ltr(struct amdgpu_device *adev) +{ + uint32_t def, data; + + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, 0x75EB); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2); + data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP2, data); + + def = data = RREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL); + data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK; + if (def != data) + WREG32_PCIE(smnRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data); + + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); + data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + if (def != data) + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); +} + +static void nbio_v2_3_program_aspm(struct amdgpu_device *adev) +{ + uint32_t def, data; + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL7); + data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL7, data); + + def = data = RREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK); + data |= NBIF_MGCG_CTRL_LCLK__NBIF_MGCG_REG_DIS_LCLK_MASK; + if (def != data) + WREG32_PCIE(smnNBIF_MGCG_CTRL_LCLK, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); + data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL3, data); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3); + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK; + data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5); + data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data); + + def = data = RREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); + data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; + if (def != data) + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); + + WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001); + + def = data = RREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2); + data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | + PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; + data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK; + if (def != data) + WREG32_PCIE(smnPSWUSP0_PCIE_LC_CNTL2, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL6); + data |= PCIE_LC_CNTL6__LC_L1_POWERDOWN_MASK | + PCIE_LC_CNTL6__LC_RX_L0S_STANDBY_EN_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL6, data); + + nbio_v2_3_program_ltr(adev); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3); + data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; + data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3, data); + + def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5); + data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT; + if (def != data) + WREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP5, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL); + data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; + data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; + data |= 0x1 << PCIE_LC_CNTL__LC_PMI_TO_L1_DIS__SHIFT; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL, data); + + def = data = RREG32_PCIE(smnPCIE_LC_CNTL3); + data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; + if (def != data) + WREG32_PCIE(smnPCIE_LC_CNTL3, data); +} + const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset, @@ -370,4 +483,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .init_registers = nbio_v2_3_init_registers, .remap_hdp_registers = nbio_v2_3_remap_hdp_registers, .enable_aspm = nbio_v2_3_enable_aspm, + .program_aspm = nbio_v2_3_program_aspm, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 1d785f06c79d..c625c5d8ed89 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -38,7 +38,6 @@ #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" -#include "smuio/smuio_11_0_0_offset.h" #include "mp/mp_11_0_offset.h" #include "soc15.h" @@ -61,6 +60,8 @@ #include "dce_virtual.h" #include "mes_v10_1.h" #include "mxgpu_nv.h" +#include "smuio_v11_0.h" +#include "smuio_v11_0_6.h" static const struct amd_ip_funcs nv_common_ip_funcs; @@ -202,6 +203,7 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev, { u32 *dw_ptr; u32 i, length_dw; + u32 rom_index_offset, rom_data_offset; if (bios == NULL) return false; @@ -214,11 +216,16 @@ static bool nv_read_bios_from_rom(struct amdgpu_device *adev, dw_ptr = (u32 *)bios; length_dw = ALIGN(length_bytes, 4) / 4; + rom_index_offset = + adev->smuio.funcs->get_rom_index_offset(adev); + rom_data_offset = + adev->smuio.funcs->get_rom_data_offset(adev); + /* set rom index to 0 */ - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + WREG32(rom_index_offset, 0); /* read out the rom data */ for (i = 0; i < length_dw; i++) - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + dw_ptr[i] = RREG32(rom_data_offset); return true; } @@ -335,6 +342,38 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) return ret; } +static int nv_asic_mode2_reset(struct amdgpu_device *adev) +{ + u32 i; + int ret = 0; + + amdgpu_atombios_scratch_regs_engine_hung(adev, true); + + /* disable BM */ + pci_clear_master(adev->pdev); + + amdgpu_device_cache_pci_state(adev->pdev); + + ret = amdgpu_dpm_mode2_reset(adev); + if (ret) + dev_err(adev->dev, "GPU mode2 reset failed\n"); + + amdgpu_device_load_pci_state(adev->pdev); + + /* wait for asic to come out of reset */ + for (i = 0; i < adev->usec_timeout; i++) { + u32 memsize = adev->nbio.funcs->get_memsize(adev); + + if (memsize != 0xffffffff) + break; + udelay(1); + } + + amdgpu_atombios_scratch_regs_engine_hung(adev, false); + + return ret; +} + static bool nv_asic_supports_baco(struct amdgpu_device *adev) { struct smu_context *smu = &adev->smu; @@ -351,7 +390,9 @@ nv_asic_reset_method(struct amdgpu_device *adev) struct smu_context *smu = &adev->smu; if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || - amdgpu_reset_method == AMD_RESET_METHOD_BACO) + amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || + amdgpu_reset_method == AMD_RESET_METHOD_BACO || + amdgpu_reset_method == AMD_RESET_METHOD_PCI) return amdgpu_reset_method; if (amdgpu_reset_method != -1) @@ -359,6 +400,8 @@ nv_asic_reset_method(struct amdgpu_device *adev) amdgpu_reset_method); switch (adev->asic_type) { + case CHIP_VANGOGH: + return AMD_RESET_METHOD_MODE2; case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: @@ -376,7 +419,16 @@ static int nv_asic_reset(struct amdgpu_device *adev) int ret = 0; struct smu_context *smu = &adev->smu; - if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { + /* skip reset on vangogh for now */ + if (adev->asic_type == CHIP_VANGOGH) + return 0; + + switch (nv_asic_reset_method(adev)) { + case AMD_RESET_METHOD_PCI: + dev_info(adev->dev, "PCI reset\n"); + ret = amdgpu_device_pci_reset(adev); + break; + case AMD_RESET_METHOD_BACO: dev_info(adev->dev, "BACO reset\n"); ret = smu_baco_enter(smu); @@ -385,9 +437,15 @@ static int nv_asic_reset(struct amdgpu_device *adev) ret = smu_baco_exit(smu); if (ret) return ret; - } else { + break; + case AMD_RESET_METHOD_MODE2: + dev_info(adev->dev, "MODE2 reset\n"); + ret = nv_asic_mode2_reset(adev); + break; + default: dev_info(adev->dev, "MODE1 reset\n"); ret = nv_asic_mode1_reset(adev); + break; } return ret; @@ -422,11 +480,14 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev) static void nv_program_aspm(struct amdgpu_device *adev) { - - if (amdgpu_aspm == 0) + if (amdgpu_aspm != 1) return; - /* todo */ + if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && + !(adev->flags & AMD_IS_APU) && + (adev->nbio.funcs->program_aspm)) + adev->nbio.funcs->program_aspm(adev); + } static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, @@ -497,7 +558,8 @@ static bool nv_is_headless_sku(struct pci_dev *pdev) { if ((pdev->device == 0x731E && (pdev->revision == 0xC6 || pdev->revision == 0xC7)) || - (pdev->device == 0x7340 && pdev->revision == 0xC9)) + (pdev->device == 0x7340 && pdev->revision == 0xC9) || + (pdev->device == 0x7360 && pdev->revision == 0xC7)) return true; return false; } @@ -515,6 +577,11 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) } adev->hdp.funcs = &hdp_v5_0_funcs; + if (adev->asic_type >= CHIP_SIENNA_CICHLID) + adev->smuio.funcs = &smuio_v11_0_6_funcs; + else + adev->smuio.funcs = &smuio_v11_0_funcs; + if (adev->asic_type == CHIP_SIENNA_CICHLID) adev->gmc.xgmi.supported = true; @@ -568,7 +635,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); + if (!nv_is_headless_sku(adev->pdev)) + amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); if (!amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); break; @@ -752,10 +820,10 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev, * The ASPM function is not fully enabled and verified on * Navi yet. Temporarily skip this until ASPM enabled. */ -#if 0 - if (adev->nbio.funcs->enable_aspm) + if ((adev->asic_type >= CHIP_SIENNA_CICHLID) && + !(adev->flags & AMD_IS_APU) && + (adev->nbio.funcs->enable_aspm)) adev->nbio.funcs->enable_aspm(adev, !enter); -#endif return 0; } @@ -1083,6 +1151,8 @@ static int nv_common_set_clockgating_state(void *handle, state == AMD_CG_STATE_GATE); adev->hdp.funcs->update_clock_gating(adev, state == AMD_CG_STATE_GATE); + adev->smuio.funcs->update_rom_clock_gating(adev, + state == AMD_CG_STATE_GATE); break; default: break; @@ -1108,6 +1178,8 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags) adev->hdp.funcs->get_clock_gating_state(adev, flags); + adev->smuio.funcs->get_clock_gating_state(adev, flags); + return; } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index d7f92634eba2..4b1cc5e9ee92 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -92,8 +92,6 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) (uint8_t *)ta_hdr + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); - adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); - adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version); adev->psp.ta_dtm_ucode_size = @@ -101,6 +99,16 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr + le32_to_cpu(ta_hdr->ta_dtm_offset_bytes); + + adev->psp.ta_securedisplay_ucode_version = + le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version); + adev->psp.ta_securedisplay_ucode_size = + le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes); + adev->psp.ta_securedisplay_start_addr = + (uint8_t *)adev->psp.ta_hdcp_start_addr + + le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 3cf0589bfea5..6b5cf7882a12 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -1270,7 +1270,7 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev) u32 i; int r = -EINVAL; - dev_info(adev->dev, "GPU pci config reset\n"); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); /* set mclk/sclk to bypass */ si_set_clk_bypass_mode(adev); @@ -1294,20 +1294,6 @@ static int si_gpu_pci_config_reset(struct amdgpu_device *adev) } udelay(1); } - - return r; -} - -static int si_asic_reset(struct amdgpu_device *adev) -{ - int r; - - dev_info(adev->dev, "PCI CONFIG reset\n"); - - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - r = si_gpu_pci_config_reset(adev); - amdgpu_atombios_scratch_regs_engine_hung(adev, false); return r; @@ -1321,14 +1307,34 @@ static bool si_asic_supports_baco(struct amdgpu_device *adev) static enum amd_reset_method si_asic_reset_method(struct amdgpu_device *adev) { - if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY && - amdgpu_reset_method != -1) + if (amdgpu_reset_method == AMD_RESET_METHOD_PCI) + return amdgpu_reset_method; + else if (amdgpu_reset_method != AMD_RESET_METHOD_LEGACY && + amdgpu_reset_method != -1) dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", - amdgpu_reset_method); + amdgpu_reset_method); return AMD_RESET_METHOD_LEGACY; } +static int si_asic_reset(struct amdgpu_device *adev) +{ + int r; + + switch (si_asic_reset_method(adev)) { + case AMD_RESET_METHOD_PCI: + dev_info(adev->dev, "PCI reset\n"); + r = amdgpu_device_pci_reset(adev); + break; + default: + dev_info(adev->dev, "PCI CONFIG reset\n"); + r = si_gpu_pci_config_reset(adev); + break; + } + + return r; +} + static u32 si_get_config_memsize(struct amdgpu_device *adev) { return RREG32(mmCONFIG_MEMSIZE); diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c new file mode 100644 index 000000000000..3a18dbb55c32 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.c @@ -0,0 +1,77 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smuio_v11_0_6.h" +#include "smuio/smuio_11_0_6_offset.h" +#include "smuio/smuio_11_0_6_sh_mask.h" + +static u32 smuio_v11_0_6_get_rom_index_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX); +} + +static u32 smuio_v11_0_6_get_rom_data_offset(struct amdgpu_device *adev) +{ + return SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA); +} + +static void smuio_v11_0_6_update_rom_clock_gating(struct amdgpu_device *adev, bool enable) +{ + u32 def, data; + + /* enable/disable ROM CG is not supported on APU */ + if (adev->flags & AMD_IS_APU) + return; + + def = data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) + data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); + else + data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | + CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; + + if (def != data) + WREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0, data); +} + +static void smuio_v11_0_6_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags) +{ + u32 data; + + /* CGTT_ROM_CLK_CTRL0 is not available for APU */ + if (adev->flags & AMD_IS_APU) + return; + + data = RREG32_SOC15(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0); + if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) + *flags |= AMD_CG_SUPPORT_ROM_MGCG; +} + +const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs = { + .get_rom_index_offset = smuio_v11_0_6_get_rom_index_offset, + .get_rom_data_offset = smuio_v11_0_6_get_rom_data_offset, + .update_rom_clock_gating = smuio_v11_0_6_update_rom_clock_gating, + .get_clock_gating_state = smuio_v11_0_6_get_clock_gating_state, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h new file mode 100644 index 000000000000..3c3f4ab0bc9b --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/smuio_v11_0_6.h @@ -0,0 +1,30 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMUIO_V11_0_6_H__ +#define __SMUIO_V11_0_6_H__ + +#include "soc15_common.h" + +extern const struct amdgpu_smuio_funcs smuio_v11_0_6_funcs; + +#endif /* __SMUIO_V11_0_6_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 9a25accd48a3..1221aa6b40a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -233,6 +233,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev) { u32 reference_clock = adev->clock.spll.reference_freq; + if (adev->asic_type == CHIP_RENOIR) + return 10000; if (adev->asic_type == CHIP_RAVEN) return reference_clock / 4; @@ -479,7 +481,8 @@ soc15_asic_reset_method(struct amdgpu_device *adev) if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || - amdgpu_reset_method == AMD_RESET_METHOD_BACO) + amdgpu_reset_method == AMD_RESET_METHOD_BACO || + amdgpu_reset_method == AMD_RESET_METHOD_PCI) return amdgpu_reset_method; if (amdgpu_reset_method != -1) @@ -524,15 +527,18 @@ static int soc15_asic_reset(struct amdgpu_device *adev) return 0; switch (soc15_asic_reset_method(adev)) { - case AMD_RESET_METHOD_BACO: - dev_info(adev->dev, "BACO reset\n"); - return soc15_asic_baco_reset(adev); - case AMD_RESET_METHOD_MODE2: - dev_info(adev->dev, "MODE2 reset\n"); - return amdgpu_dpm_mode2_reset(adev); - default: - dev_info(adev->dev, "MODE1 reset\n"); - return soc15_asic_mode1_reset(adev); + case AMD_RESET_METHOD_PCI: + dev_info(adev->dev, "PCI reset\n"); + return amdgpu_device_pci_reset(adev); + case AMD_RESET_METHOD_BACO: + dev_info(adev->dev, "BACO reset\n"); + return soc15_asic_baco_reset(adev); + case AMD_RESET_METHOD_MODE2: + dev_info(adev->dev, "MODE2 reset\n"); + return amdgpu_dpm_mode2_reset(adev); + default: + dev_info(adev->dev, "MODE1 reset\n"); + return soc15_asic_mode1_reset(adev); } } @@ -1204,7 +1210,8 @@ static int soc15_common_early_init(void *handle) break; case CHIP_RENOIR: adev->asic_funcs = &soc15_asic_funcs; - if (adev->pdev->device == 0x1636) + if ((adev->pdev->device == 0x1636) || + (adev->pdev->device == 0x164c)) adev->apu_flags |= AMD_APU_IS_RENOIR; else adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE; diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h new file mode 100644 index 000000000000..5039375bb1d4 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h @@ -0,0 +1,154 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _TA_SECUREDISPLAY_IF_H +#define _TA_SECUREDISPLAY_IF_H + +/** Secure Display related enumerations */ +/**********************************************************/ + +/** @enum ta_securedisplay_command + * Secure Display Command ID + */ +enum ta_securedisplay_command { + /* Query whether TA is responding used only for validation purpose */ + TA_SECUREDISPLAY_COMMAND__QUERY_TA = 1, + /* Send region of Interest and CRC value to I2C */ + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC = 2, + /* Maximum Command ID */ + TA_SECUREDISPLAY_COMMAND__MAX_ID = 0x7FFFFFFF, +}; + +/** @enum ta_securedisplay_status + * Secure Display status returns in shared buffer status + */ +enum ta_securedisplay_status { + TA_SECUREDISPLAY_STATUS__SUCCESS = 0x00, /* Success */ + TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE = 0x01, /* Generic Failure */ + TA_SECUREDISPLAY_STATUS__INVALID_PARAMETER = 0x02, /* Invalid Parameter */ + TA_SECUREDISPLAY_STATUS__NULL_POINTER = 0x03, /* Null Pointer*/ + TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR = 0x04, /* Fail to Write to I2C */ + TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR = 0x05, /*Fail Read DIO Scratch Register*/ + TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR = 0x06, /* Fail to Read CRC*/ + + TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/ +}; + +/** @enum ta_securedisplay_max_phy + * Physical ID number to use for reading corresponding DIO Scratch register for ROI + */ +enum ta_securedisplay_max_phy { + TA_SECUREDISPLAY_PHY0 = 0, + TA_SECUREDISPLAY_PHY1 = 1, + TA_SECUREDISPLAY_PHY2 = 2, + TA_SECUREDISPLAY_PHY3 = 3, + TA_SECUREDISPLAY_MAX_PHY = 4, +}; + +/** @enum ta_securedisplay_ta_query_cmd_ret + * A predefined specific reteurn value which is 0xAB only used to validate + * communication to Secure Display TA is functional. + * This value is used to validate whether TA is responding successfully + */ +enum ta_securedisplay_ta_query_cmd_ret { + /* This is a value to validate if TA is loaded successfully */ + TA_SECUREDISPLAY_QUERY_CMD_RET = 0xAB, +}; + +/** @enum ta_securedisplay_buffer_size + * I2C Buffer size which contains 8 bytes of ROI (X start, X end, Y start, Y end) + * and 6 bytes of CRC( R,G,B) and 1 byte for physical ID + */ +enum ta_securedisplay_buffer_size { + /* 15 bytes = 8 byte (ROI) + 6 byte(CRC) + 1 byte(phy_id) */ + TA_SECUREDISPLAY_I2C_BUFFER_SIZE = 15, +}; + +/** Input/output structures for Secure Display commands */ +/**********************************************************/ +/** + * Input structures + */ + +/** @struct ta_securedisplay_send_roi_crc_input + * Physical ID to determine which DIO scratch register should be used to get ROI + */ +struct ta_securedisplay_send_roi_crc_input { + uint32_t phy_id; /* Physical ID */ +}; + +/** @union ta_securedisplay_cmd_input + * Input buffer + */ +union ta_securedisplay_cmd_input { + /* send ROI and CRC input buffer format */ + struct ta_securedisplay_send_roi_crc_input send_roi_crc; + uint32_t reserved[4]; +}; + +/** + * Output structures + */ + +/** @struct ta_securedisplay_query_ta_output + * Output buffer format for query TA whether TA is responding used only for validation purpose + */ +struct ta_securedisplay_query_ta_output { + /* return value from TA when it is queried for validation purpose only */ + uint32_t query_cmd_ret; +}; + +/** @struct ta_securedisplay_send_roi_crc_output + * Output buffer format for send ROI CRC command which will pass I2c buffer created inside TA + * and used to write to I2C used only for validation purpose + */ +struct ta_securedisplay_send_roi_crc_output { + uint8_t i2c_buf[TA_SECUREDISPLAY_I2C_BUFFER_SIZE]; /* I2C buffer */ + uint8_t reserved; +}; + +/** @union ta_securedisplay_cmd_output + * Output buffer + */ +union ta_securedisplay_cmd_output { + /* Query TA output buffer format used only for validation purpose*/ + struct ta_securedisplay_query_ta_output query_ta; + /* Send ROI CRC output buffer format used only for validation purpose */ + struct ta_securedisplay_send_roi_crc_output send_roi_crc; + uint32_t reserved[4]; +}; + +/** @struct securedisplay_cmd + * Secure Display Command which is shared buffer memory + */ +struct securedisplay_cmd { + uint32_t cmd_id; /* +0 Bytes Command ID */ + enum ta_securedisplay_status status; /* +4 Bytes Status of Secure Display TA */ + uint32_t reserved[2]; /* +8 Bytes Reserved */ + union ta_securedisplay_cmd_input securedisplay_in_message; /* +16 Bytes Input Buffer */ + union ta_securedisplay_cmd_output securedisplay_out_message;/* +32 Bytes Output Buffer */ + /**@note Total 48 Bytes */ +}; + +#endif //_TA_SECUREDISPLAY_IF_H + diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c index 42032ca380cc..5a3c867d5881 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c @@ -88,7 +88,7 @@ static void vega20_ih_init_register_offset(struct amdgpu_device *adev) * vega20_ih_toggle_ring_interrupts - toggle the interrupt ring buffer * * @adev: amdgpu_device pointer - * @ih: amdgpu_ih_ring pointet + * @ih: amdgpu_ih_ring pointer * @enable: true - enable the interrupts, false - disable the interrupts * * Toggle the interrupt ring buffer (VEGA20) @@ -367,6 +367,7 @@ static void vega20_ih_irq_disable(struct amdgpu_device *adev) * vega20_ih_get_wptr - get the IH ring buffer wptr * * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer * * Get the IH ring buffer wptr from either the register * or the writeback memory buffer (VEGA20). Also check for @@ -414,6 +415,7 @@ out: * vega20_ih_irq_rearm - rearm IRQ if lost * * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer * */ static void vega20_ih_irq_rearm(struct amdgpu_device *adev, @@ -439,6 +441,7 @@ static void vega20_ih_irq_rearm(struct amdgpu_device *adev, * vega20_ih_set_rptr - set the IH ring buffer rptr * * @adev: amdgpu_device pointer + * @ih: amdgpu_ih_ring pointer * * Set the IH ring buffer rptr. */ diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index d56b474b3a21..eafb76aebd00 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -642,11 +642,21 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) +/** + * vi_asic_pci_config_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Use PCI Config method to reset the GPU. + * + * Returns 0 for success. + */ +static int vi_asic_pci_config_reset(struct amdgpu_device *adev) { u32 i; + int r = -EINVAL; - dev_info(adev->dev, "GPU pci config reset\n"); + amdgpu_atombios_scratch_regs_engine_hung(adev, true); /* disable BM */ pci_clear_master(adev->pdev); @@ -661,29 +671,11 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) /* enable BM */ pci_set_master(adev->pdev); adev->has_hw_reset = true; - return 0; + r = 0; + break; } udelay(1); } - return -EINVAL; -} - -/** - * vi_asic_pci_config_reset - soft reset GPU - * - * @adev: amdgpu_device pointer - * - * Use PCI Config method to reset the GPU. - * - * Returns 0 for success. - */ -static int vi_asic_pci_config_reset(struct amdgpu_device *adev) -{ - int r; - - amdgpu_atombios_scratch_regs_engine_hung(adev, true); - - r = vi_gpu_pci_config_reset(adev); amdgpu_atombios_scratch_regs_engine_hung(adev, false); |