summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
diff options
context:
space:
mode:
authorJames Zhu <James.Zhu@amd.com>2019-07-10 10:53:34 -0500
committerAlex Deucher <alexander.deucher@amd.com>2019-07-18 14:18:05 -0500
commitc01b6a1d38675652199d12b898c1c23b96b5055f (patch)
tree54dbb93453c26780063265d374a11084475aab1b /drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
parent989b6a0549977faf0b5b8d7e1c2634e880c579a2 (diff)
downloadlinux-c01b6a1d38675652199d12b898c1c23b96b5055f.tar.bz2
drm/amdgpu: modify amdgpu_vcn to support multiple instances
Arcturus has dual-VCN. Need Restruct amdgpu_device::vcn to support multiple vcns. There are no any logical changes here Signed-off-by: James Zhu <James.Zhu@amd.com> Reviewed-by: Leo Liu <leo.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c68
1 files changed, 34 insertions, 34 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index a119a7df0305..c102267da85d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -147,8 +147,8 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
- &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[0].vcpu_bo,
+ &adev->vcn.inst[0].gpu_addr, &adev->vcn.inst[0].cpu_addr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate vcn bo\n", r);
return r;
@@ -171,7 +171,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
int i;
- kvfree(adev->vcn.saved_bo);
+ kvfree(adev->vcn.inst[0].saved_bo);
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo,
@@ -179,16 +179,16 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
(void **)&adev->vcn.dpg_sram_cpu_addr);
}
- amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
- &adev->vcn.gpu_addr,
- (void **)&adev->vcn.cpu_addr);
+ amdgpu_bo_free_kernel(&adev->vcn.inst[0].vcpu_bo,
+ &adev->vcn.inst[0].gpu_addr,
+ (void **)&adev->vcn.inst[0].cpu_addr);
- amdgpu_ring_fini(&adev->vcn.ring_dec);
+ amdgpu_ring_fini(&adev->vcn.inst[0].ring_dec);
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
- amdgpu_ring_fini(&adev->vcn.ring_enc[i]);
+ amdgpu_ring_fini(&adev->vcn.inst[0].ring_enc[i]);
- amdgpu_ring_fini(&adev->vcn.ring_jpeg);
+ amdgpu_ring_fini(&adev->vcn.inst[0].ring_jpeg);
release_firmware(adev->vcn.fw);
@@ -202,17 +202,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (adev->vcn.vcpu_bo == NULL)
+ if (adev->vcn.inst[0].vcpu_bo == NULL)
return 0;
- size = amdgpu_bo_size(adev->vcn.vcpu_bo);
- ptr = adev->vcn.cpu_addr;
+ size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo);
+ ptr = adev->vcn.inst[0].cpu_addr;
- adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
- if (!adev->vcn.saved_bo)
+ adev->vcn.inst[0].saved_bo = kvmalloc(size, GFP_KERNEL);
+ if (!adev->vcn.inst[0].saved_bo)
return -ENOMEM;
- memcpy_fromio(adev->vcn.saved_bo, ptr, size);
+ memcpy_fromio(adev->vcn.inst[0].saved_bo, ptr, size);
return 0;
}
@@ -222,16 +222,16 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
unsigned size;
void *ptr;
- if (adev->vcn.vcpu_bo == NULL)
+ if (adev->vcn.inst[0].vcpu_bo == NULL)
return -EINVAL;
- size = amdgpu_bo_size(adev->vcn.vcpu_bo);
- ptr = adev->vcn.cpu_addr;
+ size = amdgpu_bo_size(adev->vcn.inst[0].vcpu_bo);
+ ptr = adev->vcn.inst[0].cpu_addr;
- if (adev->vcn.saved_bo != NULL) {
- memcpy_toio(ptr, adev->vcn.saved_bo, size);
- kvfree(adev->vcn.saved_bo);
- adev->vcn.saved_bo = NULL;
+ if (adev->vcn.inst[0].saved_bo != NULL) {
+ memcpy_toio(ptr, adev->vcn.inst[0].saved_bo, size);
+ kvfree(adev->vcn.inst[0].saved_bo);
+ adev->vcn.inst[0].saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
unsigned offset;
@@ -239,7 +239,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
+ memcpy_toio(adev->vcn.inst[0].cpu_addr, adev->vcn.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
size -= le32_to_cpu(hdr->ucode_size_bytes);
ptr += le32_to_cpu(hdr->ucode_size_bytes);
@@ -258,7 +258,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
unsigned int i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
@@ -269,7 +269,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
+ if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg))
new_state.jpeg = VCN_DPG_STATE__PAUSE;
else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
@@ -277,8 +277,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
adev->vcn.pause_dpg_mode(adev, &new_state);
}
- fences += amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg);
- fences += amdgpu_fence_count_emitted(&adev->vcn.ring_dec);
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg);
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_dec);
if (fences == 0) {
amdgpu_gfx_off_ctrl(adev, true);
@@ -312,14 +312,14 @@ void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
unsigned int i;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- fences += amdgpu_fence_count_emitted(&adev->vcn.ring_enc[i]);
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_enc[i]);
}
if (fences)
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
- if (amdgpu_fence_count_emitted(&adev->vcn.ring_jpeg))
+ if (amdgpu_fence_count_emitted(&adev->vcn.inst[0].ring_jpeg))
new_state.jpeg = VCN_DPG_STATE__PAUSE;
else
new_state.jpeg = VCN_DPG_STATE__UNPAUSE;
@@ -345,7 +345,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
- WREG32(adev->vcn.external.scratch9, 0xCAFEDEAD);
+ WREG32(adev->vcn.inst[0].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
@@ -353,7 +353,7 @@ int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0xDEADBEEF);
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->vcn.external.scratch9);
+ tmp = RREG32(adev->vcn.inst[0].external.scratch9);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
@@ -664,7 +664,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
- WREG32(adev->vcn.external.jpeg_pitch, 0xCAFEDEAD);
+ WREG32(adev->vcn.inst[0].external.jpeg_pitch, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 3);
if (r)
return r;
@@ -674,7 +674,7 @@ int amdgpu_vcn_jpeg_ring_test_ring(struct amdgpu_ring *ring)
amdgpu_ring_commit(ring);
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->vcn.external.jpeg_pitch);
+ tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch);
if (tmp == 0xDEADBEEF)
break;
udelay(1);
@@ -748,7 +748,7 @@ int amdgpu_vcn_jpeg_ring_test_ib(struct amdgpu_ring *ring, long timeout)
}
for (i = 0; i < adev->usec_timeout; i++) {
- tmp = RREG32(adev->vcn.external.jpeg_pitch);
+ tmp = RREG32(adev->vcn.inst[0].external.jpeg_pitch);
if (tmp == 0xDEADBEEF)
break;
udelay(1);