diff options
author | Dave Airlie <airlied@redhat.com> | 2015-02-24 09:23:58 +1000 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2015-03-19 12:26:47 -0400 |
commit | 47f2467fffc4e1a070b141bc9d1319dc2c0acea5 (patch) | |
tree | 51d3b0c26ef8e3403db16ad05e452bbc5573c90e /drivers/gpu/drm/radeon/si.c | |
parent | 2bc67b4d9e9f2c8d13782387bbdbb6e1b5e12d30 (diff) | |
download | linux-47f2467fffc4e1a070b141bc9d1319dc2c0acea5.tar.bz2 |
radeon/si: add support for short HPD irqs
This adds support to process short HPD irqs on SI gpus.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/radeon/si.c')
-rw-r--r-- | drivers/gpu/drm/radeon/si.c | 100 |
1 files changed, 88 insertions, 12 deletions
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 0d2295fd2844..86e75798320f 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6085,12 +6085,12 @@ int si_irq_set(struct radeon_device *rdev) (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); if (!ASIC_IS_NODCE(rdev)) { - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~(DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN); } dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; @@ -6153,27 +6153,27 @@ int si_irq_set(struct radeon_device *rdev) } if (rdev->irq.hpd[0]) { DRM_DEBUG("si_irq_set: hpd 1\n"); - hpd1 |= DC_HPDx_INT_EN; + hpd1 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[1]) { DRM_DEBUG("si_irq_set: hpd 2\n"); - hpd2 |= DC_HPDx_INT_EN; + hpd2 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[2]) { DRM_DEBUG("si_irq_set: hpd 3\n"); - hpd3 |= DC_HPDx_INT_EN; + hpd3 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[3]) { DRM_DEBUG("si_irq_set: hpd 4\n"); - hpd4 |= DC_HPDx_INT_EN; + hpd4 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[4]) { DRM_DEBUG("si_irq_set: hpd 5\n"); - hpd5 |= DC_HPDx_INT_EN; + hpd5 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } if (rdev->irq.hpd[5]) { DRM_DEBUG("si_irq_set: hpd 6\n"); - hpd6 |= DC_HPDx_INT_EN; + hpd6 |= DC_HPDx_INT_EN | DC_HPDx_RX_INT_EN; } WREG32(CP_INT_CNTL_RING0, cp_int_cntl); @@ -6336,6 +6336,37 @@ static inline void si_irq_ack(struct radeon_device *rdev) tmp |= DC_HPDx_INT_ACK; WREG32(DC_HPD6_INT_CONTROL, tmp); } + + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { + tmp = RREG32(DC_HPD1_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD1_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + tmp = RREG32(DC_HPD2_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD2_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + tmp = RREG32(DC_HPD3_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD3_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + tmp = RREG32(DC_HPD4_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD4_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD5_INT_CONTROL, tmp); + } + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + tmp = RREG32(DC_HPD5_INT_CONTROL); + tmp |= DC_HPDx_RX_INT_ACK; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } } static void si_irq_disable(struct radeon_device *rdev) @@ -6401,6 +6432,7 @@ int si_irq_process(struct radeon_device *rdev) u32 src_id, src_data, ring_id; u32 ring_index; bool queue_hotplug = false; + bool queue_dp = false; bool queue_thermal = false; u32 status, addr; @@ -6641,6 +6673,48 @@ restart_ih: DRM_DEBUG("IH: HPD6\n"); } break; + case 6: + if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 1\n"); + } + break; + case 7: + if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 2\n"); + } + break; + case 8: + if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 3\n"); + } + break; + case 9: + if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 4\n"); + } + break; + case 10: + if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 5\n"); + } + break; + case 11: + if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) { + rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_RX_INTERRUPT; + queue_dp = true; + DRM_DEBUG("IH: HPD_RX 6\n"); + } + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; @@ -6723,6 +6797,8 @@ restart_ih: rptr &= rdev->ih.ptr_mask; WREG32(IH_RB_RPTR, rptr); } + if (queue_dp) + schedule_work(&rdev->dp_work); if (queue_hotplug) schedule_work(&rdev->hotplug_work); if (queue_thermal && rdev->pm.dpm_enabled) |