diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
10 files changed, 1577 insertions, 205 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 55e39b462a5e..00edf78975b1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -34,6 +34,7 @@ #include "dc/inc/hw/dmcu.h" #include "dc/inc/hw/abm.h" #include "dc/dc_dmub_srv.h" +#include "dc/dc_edid_parser.h" #include "amdgpu_dm_trace.h" #include "vid.h" @@ -75,7 +76,6 @@ #include <drm/drm_edid.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> -#include <drm/drm_hdcp.h> #if defined(CONFIG_DRM_AMD_DC_DCN) #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" @@ -212,6 +212,9 @@ static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); static const struct drm_format_info * amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd); +static bool +is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state); /* * dm_vblank_get_counter * @@ -335,6 +338,17 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } +static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, + struct dm_crtc_state *new_state) +{ + if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) + return true; + else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state)) + return true; + else + return false; +} + /** * dm_pflip_high_irq() - Handle pageflip interrupt * @interrupt_params: ignored @@ -566,6 +580,31 @@ static void dm_crtc_high_irq(void *interrupt_params) spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } +#if defined(CONFIG_DRM_AMD_DC_DCN) +/** + * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for + * DCN generation ASICs + * @interrupt params - interrupt parameters + * + * Used to set crc window/read out crc value at vertical line 0 position + */ +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + struct amdgpu_crtc *acrtc; + + acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); + + if (!acrtc) + return; + + amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); +} +#endif +#endif + static int dm_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -951,9 +990,7 @@ static void event_mall_stutter(struct work_struct *work) else dm->active_vblank_irq_count--; - - dc_allow_idle_optimizations( - dm->dc, dm->active_vblank_irq_count == 0 ? true : false); + dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); DRM_DEBUG_DRIVER("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); @@ -1060,6 +1097,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.power_down_display_on_boot = true; + INIT_LIST_HEAD(&adev->dm.da_list); /* Display Core create. */ adev->dm.dc = dc_create(&init_data); @@ -1139,6 +1177,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) dc_init_callbacks(adev->dm.dc, &init_params); } #endif +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); +#endif if (amdgpu_dm_initialize_drm_device(adev)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -1182,6 +1223,13 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) amdgpu_dm_destroy_drm_device(&adev->dm); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (adev->dm.crc_rd_wrk) { + flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); + kfree(adev->dm.crc_rd_wrk); + adev->dm.crc_rd_wrk = NULL; + } +#endif #ifdef CONFIG_DRM_AMD_DC_HDCP if (adev->dm.hdcp_workqueue) { hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); @@ -1191,6 +1239,15 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) if (adev->dm.dc) dc_deinit_callbacks(adev->dm.dc); #endif + +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (adev->dm.vblank_workqueue) { + adev->dm.vblank_workqueue->dm = NULL; + kfree(adev->dm.vblank_workqueue); + adev->dm.vblank_workqueue = NULL; + } +#endif + if (adev->dm.dc->ctx->dmub_srv) { dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); adev->dm.dc->ctx->dmub_srv = NULL; @@ -1847,6 +1904,9 @@ static int dm_suspend(void *handle) return ret; } +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + amdgpu_dm_crtc_secure_display_suspend(adev); +#endif WARN_ON(adev->dm.cached_state); adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); @@ -2171,6 +2231,10 @@ static int dm_resume(void *handle) dm->cached_state = NULL; +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + amdgpu_dm_crtc_secure_display_resume(adev); +#endif + amdgpu_dm_irq_resume_late(adev); amdgpu_dm_smu_write_watermarks_table(adev); @@ -2907,6 +2971,16 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) struct dc_interrupt_params int_params = {0}; int r; int i; +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + static const unsigned int vrtl_int_srcid[] = { + DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL + }; +#endif int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; @@ -2947,6 +3021,37 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev) adev, &int_params, dm_crtc_high_irq, c_irq_params); } + /* Use otg vertical line interrupt */ +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, + vrtl_int_srcid[i], &adev->vline0_irq); + + if (r) { + DRM_ERROR("Failed to add vline0 irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); + + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { + DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); + break; + } + + c_irq_params = &adev->dm.vline0_params[int_params.irq_source + - DC_IRQ_SOURCE_DC1_VLINE0]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_dcn_vertical_interrupt0_high_irq, c_irq_params); + } +#endif + /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx * to trigger at end of each vblank, regardless of state of the lock, @@ -5001,19 +5106,16 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->hdmi_vic = hv_frame.vic; } - timing_out->h_addressable = mode_in->crtc_hdisplay; - timing_out->h_total = mode_in->crtc_htotal; - timing_out->h_sync_width = - mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; - timing_out->h_front_porch = - mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; - timing_out->v_total = mode_in->crtc_vtotal; - timing_out->v_addressable = mode_in->crtc_vdisplay; - timing_out->v_front_porch = - mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; - timing_out->v_sync_width = - mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; - timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; + timing_out->h_addressable = mode_in->hdisplay; + timing_out->h_total = mode_in->htotal; + timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; + timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; + timing_out->v_total = mode_in->vtotal; + timing_out->v_addressable = mode_in->vdisplay; + timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; + timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; + timing_out->pix_clk_100hz = mode_in->clock * 10; + timing_out->aspect_ratio = get_aspect_ratio(mode_in); stream->output_color_space = get_output_color_space(timing_out); @@ -5180,6 +5282,86 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) set_master_stream(context->streams, context->stream_count); } +static struct drm_display_mode * +get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, + bool use_probed_modes) +{ + struct drm_display_mode *m, *m_pref = NULL; + u16 current_refresh, highest_refresh; + struct list_head *list_head = use_probed_modes ? + &aconnector->base.probed_modes : + &aconnector->base.modes; + + if (aconnector->freesync_vid_base.clock != 0) + return &aconnector->freesync_vid_base; + + /* Find the preferred mode */ + list_for_each_entry (m, list_head, head) { + if (m->type & DRM_MODE_TYPE_PREFERRED) { + m_pref = m; + break; + } + } + + if (!m_pref) { + /* Probably an EDID with no preferred mode. Fallback to first entry */ + m_pref = list_first_entry_or_null( + &aconnector->base.modes, struct drm_display_mode, head); + if (!m_pref) { + DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); + return NULL; + } + } + + highest_refresh = drm_mode_vrefresh(m_pref); + + /* + * Find the mode with highest refresh rate with same resolution. + * For some monitors, preferred mode is not the mode with highest + * supported refresh rate. + */ + list_for_each_entry (m, list_head, head) { + current_refresh = drm_mode_vrefresh(m); + + if (m->hdisplay == m_pref->hdisplay && + m->vdisplay == m_pref->vdisplay && + highest_refresh < current_refresh) { + highest_refresh = current_refresh; + m_pref = m; + } + } + + aconnector->freesync_vid_base = *m_pref; + return m_pref; +} + +static bool is_freesync_video_mode(struct drm_display_mode *mode, + struct amdgpu_dm_connector *aconnector) +{ + struct drm_display_mode *high_mode; + int timing_diff; + + high_mode = get_highest_refresh_rate_mode(aconnector, false); + if (!high_mode || !mode) + return false; + + timing_diff = high_mode->vtotal - mode->vtotal; + + if (high_mode->clock == 0 || high_mode->clock != mode->clock || + high_mode->hdisplay != mode->hdisplay || + high_mode->vdisplay != mode->vdisplay || + high_mode->hsync_start != mode->hsync_start || + high_mode->hsync_end != mode->hsync_end || + high_mode->htotal != mode->htotal || + high_mode->hskew != mode->hskew || + high_mode->vscan != mode->vscan || + high_mode->vsync_start - mode->vsync_start != timing_diff || + high_mode->vsync_end - mode->vsync_end != timing_diff) + return false; + else + return true; +} + static struct dc_stream_state * create_stream_for_sink(struct amdgpu_dm_connector *aconnector, const struct drm_display_mode *drm_mode, @@ -5193,8 +5375,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, dm_state ? &dm_state->base : NULL; struct dc_stream_state *stream = NULL; struct drm_display_mode mode = *drm_mode; + struct drm_display_mode saved_mode; + struct drm_display_mode *freesync_mode = NULL; bool native_mode_found = false; - bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; + bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false; int mode_refresh; int preferred_refresh = 0; #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -5202,6 +5386,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, uint32_t link_bandwidth_kbps; #endif struct dc_sink *sink = NULL; + + memset(&saved_mode, 0, sizeof(saved_mode)); + if (aconnector == NULL) { DRM_ERROR("aconnector is NULL!\n"); return stream; @@ -5254,25 +5441,38 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { - decide_crtc_timing_for_drm_display_mode( + recalculate_timing |= amdgpu_freesync_vid_mode && + is_freesync_video_mode(&mode, aconnector); + if (recalculate_timing) { + freesync_mode = get_highest_refresh_rate_mode(aconnector, false); + saved_mode = mode; + mode = *freesync_mode; + } else { + decide_crtc_timing_for_drm_display_mode( &mode, preferred_mode, dm_state ? (dm_state->scaling != RMX_OFF) : false); + } + preferred_refresh = drm_mode_vrefresh(preferred_mode); } - if (!dm_state) + if (recalculate_timing) + drm_mode_set_crtcinfo(&saved_mode, 0); + else drm_mode_set_crtcinfo(&mode, 0); - /* + /* * If scaling is enabled and refresh rate didn't change * we copy the vic and polarities of the old timings */ - if (!scale || mode_refresh != preferred_refresh) - fill_stream_properties_from_drm_display_mode(stream, - &mode, &aconnector->base, con_state, NULL, requested_bpc); + if (!recalculate_timing || mode_refresh != preferred_refresh) + fill_stream_properties_from_drm_display_mode( + stream, &mode, &aconnector->base, con_state, NULL, + requested_bpc); else - fill_stream_properties_from_drm_display_mode(stream, - &mode, &aconnector->base, con_state, old_stream, requested_bpc); + fill_stream_properties_from_drm_display_mode( + stream, &mode, &aconnector->base, con_state, old_stream, + requested_bpc); stream->timing.flags.DSC = 0; @@ -5409,15 +5609,22 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc) state->abm_level = cur->abm_level; state->vrr_supported = cur->vrr_supported; state->freesync_config = cur->freesync_config; - state->crc_src = cur->crc_src; state->cm_has_degamma = cur->cm_has_degamma; state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; - /* TODO Duplicate dc_stream after objects are stream object is flattened */ return &state->base; } +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) +{ + crtc_debugfs_init(crtc); + + return 0; +} +#endif + static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) { enum dc_irq_source irq_source; @@ -5503,6 +5710,9 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .enable_vblank = dm_enable_vblank, .disable_vblank = dm_disable_vblank, .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + .late_register = amdgpu_dm_crtc_late_register, +#endif }; static enum drm_connector_status @@ -6488,13 +6698,17 @@ static int dm_plane_helper_check_state(struct drm_plane_state *state, else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; - /* If completely outside of screen, viewport_width and/or viewport_height will be negative, - * which is still OK to satisfy the condition below, thereby also covering these cases - * (when plane is completely outside of screen). - * x2 for width is because of pipe-split. - */ - if (viewport_width < MIN_VIEWPORT_SIZE*2 || viewport_height < MIN_VIEWPORT_SIZE) + if (viewport_width < 0 || viewport_height < 0) { + DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); + return -EINVAL; + } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ + DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); return -EINVAL; + } else if (viewport_height < MIN_VIEWPORT_SIZE) { + DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); + return -EINVAL; + } + } /* Get min/max allowed scaling factors from plane caps. */ @@ -6975,11 +7189,118 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, */ drm_mode_sort(&connector->probed_modes); amdgpu_dm_get_native_mode(connector); + + /* Freesync capabilities are reset by calling + * drm_add_edid_modes() and need to be + * restored here. + */ + amdgpu_dm_update_freesync_caps(connector, edid); } else { amdgpu_dm_connector->num_modes = 0; } } +static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, + struct drm_display_mode *mode) +{ + struct drm_display_mode *m; + + list_for_each_entry (m, &aconnector->base.probed_modes, head) { + if (drm_mode_equal(m, mode)) + return true; + } + + return false; +} + +static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) +{ + const struct drm_display_mode *m; + struct drm_display_mode *new_mode; + uint i; + uint32_t new_modes_count = 0; + + /* Standard FPS values + * + * 23.976 - TV/NTSC + * 24 - Cinema + * 25 - TV/PAL + * 29.97 - TV/NTSC + * 30 - TV/NTSC + * 48 - Cinema HFR + * 50 - TV/PAL + * 60 - Commonly used + * 48,72,96 - Multiples of 24 + */ + const uint32_t common_rates[] = { 23976, 24000, 25000, 29970, 30000, + 48000, 50000, 60000, 72000, 96000 }; + + /* + * Find mode with highest refresh rate with the same resolution + * as the preferred mode. Some monitors report a preferred mode + * with lower resolution than the highest refresh rate supported. + */ + + m = get_highest_refresh_rate_mode(aconnector, true); + if (!m) + return 0; + + for (i = 0; i < ARRAY_SIZE(common_rates); i++) { + uint64_t target_vtotal, target_vtotal_diff; + uint64_t num, den; + + if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) + continue; + + if (common_rates[i] < aconnector->min_vfreq * 1000 || + common_rates[i] > aconnector->max_vfreq * 1000) + continue; + + num = (unsigned long long)m->clock * 1000 * 1000; + den = common_rates[i] * (unsigned long long)m->htotal; + target_vtotal = div_u64(num, den); + target_vtotal_diff = target_vtotal - m->vtotal; + + /* Check for illegal modes */ + if (m->vsync_start + target_vtotal_diff < m->vdisplay || + m->vsync_end + target_vtotal_diff < m->vsync_start || + m->vtotal + target_vtotal_diff < m->vsync_end) + continue; + + new_mode = drm_mode_duplicate(aconnector->base.dev, m); + if (!new_mode) + goto out; + + new_mode->vtotal += (u16)target_vtotal_diff; + new_mode->vsync_start += (u16)target_vtotal_diff; + new_mode->vsync_end += (u16)target_vtotal_diff; + new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; + new_mode->type |= DRM_MODE_TYPE_DRIVER; + + if (!is_duplicate_mode(aconnector, new_mode)) { + drm_mode_probed_add(&aconnector->base, new_mode); + new_modes_count += 1; + } else + drm_mode_destroy(aconnector->base.dev, new_mode); + } + out: + return new_modes_count; +} + +static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, + struct edid *edid) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + + if (!(amdgpu_freesync_vid_mode && edid)) + return; + + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + amdgpu_dm_connector->num_modes += + add_fs_modes(amdgpu_dm_connector); +} + static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = @@ -6995,6 +7316,7 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) } else { amdgpu_dm_connector_ddc_get_modes(connector, edid); amdgpu_dm_connector_add_common_modes(encoder, connector); + amdgpu_dm_connector_add_freesync_modes(connector, edid); } amdgpu_dm_fbc_init(connector); @@ -7299,8 +7621,19 @@ static void manage_dm_interrupts(struct amdgpu_device *adev, adev, &adev->pageflip_irq, irq_type); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + amdgpu_irq_get( + adev, + &adev->vline0_irq, + irq_type); +#endif } else { - +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + amdgpu_irq_put( + adev, + &adev->vline0_irq, + irq_type); +#endif amdgpu_irq_put( adev, &adev->pageflip_irq, @@ -7424,10 +7757,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, int x, y; int xorigin = 0, yorigin = 0; - position->enable = false; - position->x = 0; - position->y = 0; - if (!crtc || !plane->state->fb) return 0; @@ -7474,7 +7803,7 @@ static void handle_cursor_update(struct drm_plane *plane, struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); uint64_t address = afb ? afb->address : 0; - struct dc_cursor_position position; + struct dc_cursor_position position = {0}; struct dc_cursor_attributes attributes; int ret; @@ -7559,6 +7888,7 @@ static void update_freesync_state_on_stream( struct amdgpu_device *adev = dm->adev; struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); unsigned long flags; + bool pack_sdp_v1_3 = false; if (!new_stream) return; @@ -7600,7 +7930,8 @@ static void update_freesync_state_on_stream( &vrr_params, PACKET_TYPE_VRR, TRANSFER_FUNC_UNKNOWN, - &vrr_infopacket); + &vrr_infopacket, + pack_sdp_v1_3); new_crtc_state->freesync_timing_changed |= (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, @@ -7654,9 +7985,22 @@ static void update_stream_irq_parameters( if (new_crtc_state->vrr_supported && config.min_refresh_in_uhz && config.max_refresh_in_uhz) { - config.state = new_crtc_state->base.vrr_enabled ? - VRR_STATE_ACTIVE_VARIABLE : - VRR_STATE_INACTIVE; + /* + * if freesync compatible mode was set, config.state will be set + * in atomic check + */ + if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && + (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || + new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { + vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; + vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; + vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; + vrr_params.state = VRR_STATE_ACTIVE_FIXED; + } else { + config.state = new_crtc_state->base.vrr_enabled ? + VRR_STATE_ACTIVE_VARIABLE : + VRR_STATE_INACTIVE; + } } else { config.state = VRR_STATE_UNSUPPORTED; } @@ -7977,8 +8321,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, * re-adjust the min/max bounds now that DC doesn't handle this * as part of commit. */ - if (amdgpu_dm_vrr_active(dm_old_crtc_state) != - amdgpu_dm_vrr_active(acrtc_state)) { + if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { spin_lock_irqsave(&pcrtc->dev->event_lock, flags); dc_stream_adjust_vmin_vmax( dm->dc, acrtc_state->stream, @@ -8263,6 +8606,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) /* i.e. reset mode */ if (dm_old_crtc_state->stream) remove_stream(adev, acrtc, dm_old_crtc_state->stream); + mode_set_reset_required = true; } } /* for_each_crtc_in_state() */ @@ -8321,8 +8665,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) hdcp_update_display( adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, new_con_state->hdcp_content_type, - new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED ? true - : false); + new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); } #endif @@ -8432,7 +8775,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) */ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); - +#ifdef CONFIG_DEBUG_FS + bool configure_crc = false; + enum amdgpu_dm_pipe_crc_source cur_crc_src; +#endif dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (new_crtc_state->active && @@ -8448,12 +8794,21 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * settings for the stream. */ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + cur_crc_src = acrtc->dm_irq_params.crc_src; + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); + + if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { + configure_crc = true; +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (amdgpu_dm_crc_window_is_activated(crtc)) + configure_crc = false; +#endif + } - if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) { + if (configure_crc) amdgpu_dm_crtc_configure_crc_source( - crtc, dm_new_crtc_state, - dm_new_crtc_state->crc_src); - } + crtc, dm_new_crtc_state, cur_crc_src); #endif } } @@ -8662,6 +9017,7 @@ static void get_freesync_config_for_crtc( to_amdgpu_dm_connector(new_con_state->base.connector); struct drm_display_mode *mode = &new_crtc_state->base.mode; int vrefresh = drm_mode_vrefresh(mode); + bool fs_vid_mode = false; new_crtc_state->vrr_supported = new_con_state->freesync_capable && vrefresh >= aconnector->min_vfreq && @@ -8669,17 +9025,24 @@ static void get_freesync_config_for_crtc( if (new_crtc_state->vrr_supported) { new_crtc_state->stream->ignore_msa_timing_param = true; - config.state = new_crtc_state->base.vrr_enabled ? - VRR_STATE_ACTIVE_VARIABLE : - VRR_STATE_INACTIVE; - config.min_refresh_in_uhz = - aconnector->min_vfreq * 1000000; - config.max_refresh_in_uhz = - aconnector->max_vfreq * 1000000; + fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; + + config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; + config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; config.vsif_supported = true; config.btr = true; - } + if (fs_vid_mode) { + config.state = VRR_STATE_ACTIVE_FIXED; + config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; + goto out; + } else if (new_crtc_state->base.vrr_enabled) { + config.state = VRR_STATE_ACTIVE_VARIABLE; + } else { + config.state = VRR_STATE_INACTIVE; + } + } +out: new_crtc_state->freesync_config = config; } @@ -8692,6 +9055,50 @@ static void reset_freesync_config_for_crtc( sizeof(new_crtc_state->vrr_infopacket)); } +static bool +is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state) +{ + struct drm_display_mode old_mode, new_mode; + + if (!old_crtc_state || !new_crtc_state) + return false; + + old_mode = old_crtc_state->mode; + new_mode = new_crtc_state->mode; + + if (old_mode.clock == new_mode.clock && + old_mode.hdisplay == new_mode.hdisplay && + old_mode.vdisplay == new_mode.vdisplay && + old_mode.htotal == new_mode.htotal && + old_mode.vtotal != new_mode.vtotal && + old_mode.hsync_start == new_mode.hsync_start && + old_mode.vsync_start != new_mode.vsync_start && + old_mode.hsync_end == new_mode.hsync_end && + old_mode.vsync_end != new_mode.vsync_end && + old_mode.hskew == new_mode.hskew && + old_mode.vscan == new_mode.vscan && + (old_mode.vsync_end - old_mode.vsync_start) == + (new_mode.vsync_end - new_mode.vsync_start)) + return true; + + return false; +} + +static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) { + uint64_t num, den, res; + struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; + + dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; + + num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; + den = (unsigned long long)new_crtc_state->mode.htotal * + (unsigned long long)new_crtc_state->mode.vtotal; + + res = div_u64(num, den); + dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; +} + static int dm_update_crtc_state(struct amdgpu_display_manager *dm, struct drm_atomic_state *state, struct drm_crtc *crtc, @@ -8782,6 +9189,11 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ + if (amdgpu_freesync_vid_mode && + dm_new_crtc_state->stream && + is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) + goto skip_modeset; + if (dm_new_crtc_state->stream && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { @@ -8813,6 +9225,24 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!dm_old_crtc_state->stream) goto skip_modeset; + if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && + is_timing_unchanged_for_freesync(new_crtc_state, + old_crtc_state)) { + new_crtc_state->mode_changed = false; + DRM_DEBUG_DRIVER( + "Mode change not required for front porch change, " + "setting mode_changed to %d", + new_crtc_state->mode_changed); + + set_freesync_fixed_config(dm_new_crtc_state); + + goto skip_modeset; + } else if (amdgpu_freesync_vid_mode && aconnector && + is_freesync_video_mode(&new_crtc_state->mode, + aconnector)) { + set_freesync_fixed_config(dm_new_crtc_state); + } + ret = dm_atomic_get_state(state, &dm_state); if (ret) goto fail; @@ -9390,7 +9820,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (adev->asic_type >= CHIP_NAVI10) { + if (dc_resource_is_dsc_encoding_supported(dc)) { for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { ret = add_affected_mst_dsc_crtcs(state, crtc); @@ -9696,11 +10126,85 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc, return capable; } + +static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, + uint8_t *edid_ext, int len, + struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + int i; + struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); + struct dc *dc = adev->dm.dc; + + /* send extension block to DMCU for parsing */ + for (i = 0; i < len; i += 8) { + bool res; + int offset; + + /* send 8 bytes a time */ + if (!dc_edid_parser_send_cea(dc, i, len, &edid_ext[i], 8)) + return false; + + if (i+8 == len) { + /* EDID block sent completed, expect result */ + int version, min_rate, max_rate; + + res = dc_edid_parser_recv_amd_vsdb(dc, &version, &min_rate, &max_rate); + if (res) { + /* amd vsdb found */ + vsdb_info->freesync_supported = 1; + vsdb_info->amd_vsdb_version = version; + vsdb_info->min_refresh_rate_hz = min_rate; + vsdb_info->max_refresh_rate_hz = max_rate; + return true; + } + /* not amd vsdb */ + return false; + } + + /* check for ack*/ + res = dc_edid_parser_recv_cea_ack(dc, &offset); + if (!res) + return false; + } + + return false; +} + +static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, + struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + uint8_t *edid_ext = NULL; + int i; + bool valid_vsdb_found = false; + + /*----- drm_find_cea_extension() -----*/ + /* No EDID or EDID extensions */ + if (edid == NULL || edid->extensions == 0) + return -ENODEV; + + /* Find CEA extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); + if (edid_ext[0] == CEA_EXT) + break; + } + + if (i == edid->extensions) + return -ENODEV; + + /*----- cea_db_offsets() -----*/ + if (edid_ext[0] != CEA_EXT) + return -ENODEV; + + valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); + + return valid_vsdb_found ? i : -ENODEV; +} + void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct edid *edid) { - int i; - bool edid_check_required; + int i = 0; struct detailed_timing *timing; struct detailed_non_pixel *data; struct detailed_data_monitor_range *range; @@ -9711,6 +10215,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); bool freesync_capable = false; + struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; if (!connector->state) { DRM_ERROR("%s - Connector has no state", __func__); @@ -9729,60 +10234,75 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, dm_con_state = to_dm_connector_state(connector->state); - edid_check_required = false; if (!amdgpu_dm_connector->dc_sink) { DRM_ERROR("dc_sink NULL, could not add free_sync module.\n"); goto update; } if (!adev->dm.freesync_module) goto update; - /* - * if edid non zero restrict freesync only for dp and edp - */ - if (edid) { - if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT - || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { + + + if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT + || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) { + bool edid_check_required = false; + + if (edid) { edid_check_required = is_dp_capable_without_timing_msa( adev->dm.dc, amdgpu_dm_connector); } - } - if (edid_check_required == true && (edid->version > 1 || - (edid->version == 1 && edid->revision > 1))) { - for (i = 0; i < 4; i++) { - timing = &edid->detailed_timings[i]; - data = &timing->data.other_data; - range = &data->data.range; - /* - * Check if monitor has continuous frequency mode - */ - if (data->type != EDID_DETAIL_MONITOR_RANGE) - continue; - /* - * Check for flag range limits only. If flag == 1 then - * no additional timing information provided. - * Default GTF, GTF Secondary curve and CVT are not - * supported - */ - if (range->flags != 1) - continue; + if (edid_check_required == true && (edid->version > 1 || + (edid->version == 1 && edid->revision > 1))) { + for (i = 0; i < 4; i++) { - amdgpu_dm_connector->min_vfreq = range->min_vfreq; - amdgpu_dm_connector->max_vfreq = range->max_vfreq; - amdgpu_dm_connector->pixel_clock_mhz = - range->pixel_clock_mhz * 10; + timing = &edid->detailed_timings[i]; + data = &timing->data.other_data; + range = &data->data.range; + /* + * Check if monitor has continuous frequency mode + */ + if (data->type != EDID_DETAIL_MONITOR_RANGE) + continue; + /* + * Check for flag range limits only. If flag == 1 then + * no additional timing information provided. + * Default GTF, GTF Secondary curve and CVT are not + * supported + */ + if (range->flags != 1) + continue; - connector->display_info.monitor_range.min_vfreq = range->min_vfreq; - connector->display_info.monitor_range.max_vfreq = range->max_vfreq; + amdgpu_dm_connector->min_vfreq = range->min_vfreq; + amdgpu_dm_connector->max_vfreq = range->max_vfreq; + amdgpu_dm_connector->pixel_clock_mhz = + range->pixel_clock_mhz * 10; - break; - } + connector->display_info.monitor_range.min_vfreq = range->min_vfreq; + connector->display_info.monitor_range.max_vfreq = range->max_vfreq; - if (amdgpu_dm_connector->max_vfreq - - amdgpu_dm_connector->min_vfreq > 10) { + break; + } - freesync_capable = true; + if (amdgpu_dm_connector->max_vfreq - + amdgpu_dm_connector->min_vfreq > 10) { + + freesync_capable = true; + } + } + } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); + if (i >= 0 && vsdb_info.freesync_supported) { + timing = &edid->detailed_timings[i]; + data = &timing->data.other_data; + + amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; + amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; + + connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; + connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; } } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 8bfe901cf237..8f98d44490aa 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -69,18 +69,6 @@ struct common_irq_params { }; /** - * struct irq_list_head - Linked-list for low context IRQ handlers. - * - * @head: The list_head within &struct handler_data - * @work: A work_struct containing the deferred handler work - */ -struct irq_list_head { - struct list_head head; - /* In case this interrupt needs post-processing, 'work' will be queued*/ - struct work_struct work; -}; - -/** * struct dm_compressor_info - Buffer info used by frame buffer compression * @cpu_addr: MMIO cpu addr * @bo_ptr: Pointer to the buffer object @@ -145,6 +133,16 @@ struct amdgpu_dm_backlight_caps { }; /** + * struct dal_allocation - Tracks mapped FB memory for SMU communication + */ +struct dal_allocation { + struct list_head list; + struct amdgpu_bo *bo; + void *cpu_ptr; + u64 gpu_addr; +}; + +/** * struct amdgpu_display_manager - Central amdgpu display manager device * * @dc: Display Core control structure @@ -257,12 +255,12 @@ struct amdgpu_display_manager { */ struct mutex audio_lock; +#if defined(CONFIG_DRM_AMD_DC_DCN) /** - * @vblank_work_lock: + * @vblank_lock: * * Guards access to deferred vblank work state. */ -#if defined(CONFIG_DRM_AMD_DC_DCN) spinlock_t vblank_lock; #endif @@ -293,7 +291,7 @@ struct amdgpu_display_manager { * Note that handlers are called in the same order as they were * registered (FIFO). */ - struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; + struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; /** * @irq_handler_list_high_tab: @@ -324,6 +322,15 @@ struct amdgpu_display_manager { vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; /** + * @vline0_params: + * + * OTG vertical interrupt0 IRQ parameters, passed to registered + * handlers when triggered. + */ + struct common_irq_params + vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1]; + + /** * @vupdate_params: * * Vertical update IRQ parameters, passed to registered handlers when @@ -345,6 +352,11 @@ struct amdgpu_display_manager { #endif #if defined(CONFIG_DRM_AMD_DC_DCN) + /** + * @vblank_workqueue: + * + * amdgpu workqueue during vblank + */ struct vblank_workqueue *vblank_workqueue; #endif @@ -363,12 +375,18 @@ struct amdgpu_display_manager { */ const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; +#if defined(CONFIG_DRM_AMD_DC_DCN) /** * @active_vblank_irq_count: * * number of currently active vblank irqs */ uint32_t active_vblank_irq_count; +#endif + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + struct crc_rd_work *crc_rd_wrk; +#endif /** * @mst_encoders: @@ -377,6 +395,13 @@ struct amdgpu_display_manager { */ struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; bool force_timing_sync; + bool dmcub_trace_event_en; + /** + * @da_list: + * + * DAL fb memory allocation list, for communication with SMU. + */ + struct list_head da_list; }; enum dsc_clock_force_state { @@ -440,6 +465,8 @@ struct amdgpu_dm_connector { #endif bool force_yuv420_output; struct dsc_preferred_settings dsc_settings; + /* Cached display modes */ + struct drm_display_mode freesync_vid_base; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) @@ -462,7 +489,6 @@ struct dm_crtc_state { int active_planes; int crc_skip_count; - enum amdgpu_dm_pipe_crc_source crc_src; bool freesync_timing_changed; bool freesync_vrr_info_changed; @@ -501,6 +527,14 @@ struct dm_connector_state { uint64_t pbn; }; +struct amdgpu_hdmi_vsdb_info { + unsigned int amd_vsdb_version; /* VSDB version, should be used to determine which VSIF to send */ + bool freesync_supported; /* FreeSync Supported */ + unsigned int min_refresh_rate_hz; /* FreeSync Minimum Refresh Rate in Hz */ + unsigned int max_refresh_rate_hz; /* FreeSync Maximum Refresh Rate in Hz */ +}; + + #define to_dm_connector_state(x)\ container_of((x), struct dm_connector_state, base) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index 66cb8730586b..c6d6baab106e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -29,6 +29,7 @@ #include "amdgpu.h" #include "amdgpu_dm.h" #include "dc.h" +#include "amdgpu_securedisplay.h" static const char *const pipe_crc_sources[] = { "none", @@ -81,6 +82,73 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, return pipe_crc_sources; } +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc) +{ + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_window.x_start = 0; + acrtc->dm_irq_params.crc_window.y_start = 0; + acrtc->dm_irq_params.crc_window.x_end = 0; + acrtc->dm_irq_params.crc_window.y_end = 0; + acrtc->dm_irq_params.crc_window.activated = false; + acrtc->dm_irq_params.crc_window.update_win = false; + acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + spin_unlock_irq(&drm_dev->event_lock); +} + +static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) +{ + struct crc_rd_work *crc_rd_wrk; + struct amdgpu_device *adev; + struct psp_context *psp; + struct securedisplay_cmd *securedisplay_cmd; + struct drm_crtc *crtc; + uint8_t phy_id; + int ret; + + crc_rd_wrk = container_of(work, struct crc_rd_work, notify_ta_work); + spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); + crtc = crc_rd_wrk->crtc; + + if (!crtc) { + spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + return; + } + + adev = drm_to_adev(crtc->dev); + psp = &adev->psp; + phy_id = crc_rd_wrk->phy_inst; + spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = + phy_id; + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + if (!ret) { + if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) { + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + } + } +} + +bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc) +{ + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + bool ret = false; + + spin_lock_irq(&drm_dev->event_lock); + ret = acrtc->dm_irq_params.crc_window.activated; + spin_unlock_irq(&drm_dev->event_lock); + + return ret; +} +#endif + int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, size_t *values_cnt) @@ -114,6 +182,20 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, /* Enable CRTC CRC generation if necessary. */ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (!enable) { + if (adev->dm.crc_rd_wrk) { + flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); + spin_lock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); + if (adev->dm.crc_rd_wrk->crtc == crtc) { + dc_stream_stop_dmcu_crc_win_update(stream_state->ctx->dc, + dm_crtc_state->stream); + adev->dm.crc_rd_wrk->crtc = NULL; + } + spin_unlock_irq(&adev->dm.crc_rd_wrk->crc_rd_work_lock); + } + } +#endif if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state, NULL, enable, enable)) { ret = -EINVAL; @@ -142,8 +224,11 @@ unlock: int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) { enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); + enum amdgpu_dm_pipe_crc_source cur_crc_src; struct drm_crtc_commit *commit; struct dm_crtc_state *crtc_state; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); struct drm_dp_aux *aux = NULL; bool enable = false; bool enabled = false; @@ -182,6 +267,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) enable = amdgpu_dm_is_valid_crc_source(source); crtc_state = to_dm_crtc_state(crtc->state); + spin_lock_irq(&drm_dev->event_lock); + cur_crc_src = acrtc->dm_irq_params.crc_src; + spin_unlock_irq(&drm_dev->event_lock); /* * USER REQ SRC | CURRENT SRC | BEHAVIOR @@ -198,7 +286,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) */ if (dm_is_crc_source_dprx(source) || (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE && - dm_is_crc_source_dprx(crtc_state->crc_src))) { + dm_is_crc_source_dprx(cur_crc_src))) { struct amdgpu_dm_connector *aconn = NULL; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; @@ -228,6 +316,10 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) } } +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + amdgpu_dm_set_crc_window_default(crtc); +#endif + if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) { ret = -EINVAL; goto cleanup; @@ -237,7 +329,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) * Reading the CRC requires the vblank interrupt handler to be * enabled. Keep a reference until CRC capture stops. */ - enabled = amdgpu_dm_is_valid_crc_source(crtc_state->crc_src); + enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src); if (!enabled && enable) { ret = drm_crtc_vblank_get(crtc); if (ret) @@ -261,7 +353,9 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) } } - crtc_state->crc_src = source; + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_src = source; + spin_unlock_irq(&drm_dev->event_lock); /* Reset crc_skipped on dm state */ crtc_state->crc_skip_count = 0; @@ -286,16 +380,26 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) { struct dm_crtc_state *crtc_state; struct dc_stream_state *stream_state; + struct drm_device *drm_dev = NULL; + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct amdgpu_crtc *acrtc = NULL; uint32_t crcs[3]; + unsigned long flags; if (crtc == NULL) return; crtc_state = to_dm_crtc_state(crtc->state); stream_state = crtc_state->stream; + acrtc = to_amdgpu_crtc(crtc); + drm_dev = crtc->dev; + + spin_lock_irqsave(&drm_dev->event_lock, flags); + cur_crc_src = acrtc->dm_irq_params.crc_src; + spin_unlock_irqrestore(&drm_dev->event_lock, flags); /* Early return if CRC capture is not enabled. */ - if (!amdgpu_dm_is_valid_crc_source(crtc_state->crc_src)) + if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) return; /* @@ -309,7 +413,7 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) return; } - if (dm_is_crc_source_crtc(crtc_state->crc_src)) { + if (dm_is_crc_source_crtc(cur_crc_src)) { if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, &crcs[0], &crcs[1], &crcs[2])) return; @@ -318,3 +422,182 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) drm_crtc_accurate_vblank_count(crtc), crcs); } } + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) +{ + struct dc_stream_state *stream_state; + struct drm_device *drm_dev = NULL; + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct amdgpu_crtc *acrtc = NULL; + struct amdgpu_device *adev = NULL; + struct crc_rd_work *crc_rd_wrk = NULL; + struct crc_params *crc_window = NULL, tmp_window; + unsigned long flags1, flags2; + struct crtc_position position; + uint32_t v_blank; + uint32_t v_back_porch; + uint32_t crc_window_latch_up_line; + struct dc_crtc_timing *timing_out; + + if (crtc == NULL) + return; + + acrtc = to_amdgpu_crtc(crtc); + adev = drm_to_adev(crtc->dev); + drm_dev = crtc->dev; + + spin_lock_irqsave(&drm_dev->event_lock, flags1); + stream_state = acrtc->dm_irq_params.stream; + cur_crc_src = acrtc->dm_irq_params.crc_src; + timing_out = &stream_state->timing; + + /* Early return if CRC capture is not enabled. */ + if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) + goto cleanup; + + if (dm_is_crc_source_crtc(cur_crc_src)) { + if (acrtc->dm_irq_params.crc_window.activated) { + if (acrtc->dm_irq_params.crc_window.update_win) { + if (acrtc->dm_irq_params.crc_window.skip_frame_cnt) { + acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1; + goto cleanup; + } + crc_window = &tmp_window; + + tmp_window.windowa_x_start = + acrtc->dm_irq_params.crc_window.x_start; + tmp_window.windowa_y_start = + acrtc->dm_irq_params.crc_window.y_start; + tmp_window.windowa_x_end = + acrtc->dm_irq_params.crc_window.x_end; + tmp_window.windowa_y_end = + acrtc->dm_irq_params.crc_window.y_end; + tmp_window.windowb_x_start = + acrtc->dm_irq_params.crc_window.x_start; + tmp_window.windowb_y_start = + acrtc->dm_irq_params.crc_window.y_start; + tmp_window.windowb_x_end = + acrtc->dm_irq_params.crc_window.x_end; + tmp_window.windowb_y_end = + acrtc->dm_irq_params.crc_window.y_end; + + dc_stream_forward_dmcu_crc_window(stream_state->ctx->dc, + stream_state, crc_window); + + acrtc->dm_irq_params.crc_window.update_win = false; + + dc_stream_get_crtc_position(stream_state->ctx->dc, &stream_state, 1, + &position.vertical_count, + &position.nominal_vcount); + + v_blank = timing_out->v_total - timing_out->v_border_top - + timing_out->v_addressable - timing_out->v_border_bottom; + + v_back_porch = v_blank - timing_out->v_front_porch - + timing_out->v_sync_width; + + crc_window_latch_up_line = v_back_porch + timing_out->v_sync_width; + + /* take 3 lines margin*/ + if ((position.vertical_count + 3) >= crc_window_latch_up_line) + acrtc->dm_irq_params.crc_window.skip_frame_cnt = 1; + else + acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + } else { + if (acrtc->dm_irq_params.crc_window.skip_frame_cnt == 0) { + if (adev->dm.crc_rd_wrk) { + crc_rd_wrk = adev->dm.crc_rd_wrk; + spin_lock_irqsave(&crc_rd_wrk->crc_rd_work_lock, flags2); + crc_rd_wrk->phy_inst = + stream_state->link->link_enc_hw_inst; + spin_unlock_irqrestore(&crc_rd_wrk->crc_rd_work_lock, flags2); + schedule_work(&crc_rd_wrk->notify_ta_work); + } + } else { + acrtc->dm_irq_params.crc_window.skip_frame_cnt -= 1; + } + } + } + } + +cleanup: + spin_unlock_irqrestore(&drm_dev->event_lock, flags1); +} + +void amdgpu_dm_crtc_secure_display_resume(struct amdgpu_device *adev) +{ + struct drm_crtc *crtc; + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; + struct crc_window_parm cur_crc_window; + struct amdgpu_crtc *acrtc = NULL; + + drm_for_each_crtc(crtc, &adev->ddev) { + acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&adev_to_drm(adev)->event_lock); + cur_crc_src = acrtc->dm_irq_params.crc_src; + cur_crc_window = acrtc->dm_irq_params.crc_window; + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + + if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { + amdgpu_dm_crtc_set_crc_source(crtc, + pipe_crc_sources[cur_crc_src]); + spin_lock_irq(&adev_to_drm(adev)->event_lock); + acrtc->dm_irq_params.crc_window = cur_crc_window; + if (acrtc->dm_irq_params.crc_window.activated) { + acrtc->dm_irq_params.crc_window.update_win = true; + acrtc->dm_irq_params.crc_window.skip_frame_cnt = 1; + spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); + crc_rd_wrk->crtc = crtc; + spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + } + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + } + } +} + +void amdgpu_dm_crtc_secure_display_suspend(struct amdgpu_device *adev) +{ + struct drm_crtc *crtc; + struct crc_window_parm cur_crc_window; + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct amdgpu_crtc *acrtc = NULL; + + drm_for_each_crtc(crtc, &adev->ddev) { + acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&adev_to_drm(adev)->event_lock); + cur_crc_src = acrtc->dm_irq_params.crc_src; + cur_crc_window = acrtc->dm_irq_params.crc_window; + cur_crc_window.update_win = false; + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + + if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { + amdgpu_dm_crtc_set_crc_source(crtc, NULL); + spin_lock_irq(&adev_to_drm(adev)->event_lock); + /* For resume to set back crc source*/ + acrtc->dm_irq_params.crc_src = cur_crc_src; + acrtc->dm_irq_params.crc_window = cur_crc_window; + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + } + } + +} + +struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void) +{ + struct crc_rd_work *crc_rd_wrk = NULL; + + crc_rd_wrk = kzalloc(sizeof(*crc_rd_wrk), GFP_KERNEL); + + if (!crc_rd_wrk) + return NULL; + + spin_lock_init(&crc_rd_wrk->crc_rd_work_lock); + INIT_WORK(&crc_rd_wrk->notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); + + return crc_rd_wrk; +} +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h index f7d731797d3f..737e701fb0f0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -39,6 +39,29 @@ enum amdgpu_dm_pipe_crc_source { AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1, }; +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +struct crc_window_parm { + uint16_t x_start; + uint16_t y_start; + uint16_t x_end; + uint16_t y_end; + /* CRC windwo is activated or not*/ + bool activated; + /* Update crc window during vertical blank or not */ + bool update_win; + /* skip reading/writing for few frames */ + int skip_frame_cnt; +}; + +struct crc_rd_work { + struct work_struct notify_ta_work; + /* To protect crc_rd_work carried fields*/ + spinlock_t crc_rd_work_lock; + struct drm_crtc *crtc; + uint8_t phy_inst; +}; +#endif + static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source) { return (source > AMDGPU_DM_PIPE_CRC_SOURCE_NONE) && @@ -64,4 +87,18 @@ void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc); #define amdgpu_dm_crtc_handle_crc_irq(x) #endif +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc); +void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc); +struct crc_rd_work *amdgpu_dm_crtc_secure_display_create_work(void); +void amdgpu_dm_crtc_secure_display_resume(struct amdgpu_device *adev); +void amdgpu_dm_crtc_secure_display_suspend(struct amdgpu_device *adev); +#else +#define amdgpu_dm_crc_window_is_activated(x) +#define amdgpu_dm_crtc_handle_crc_window_irq(x) +#define amdgpu_dm_crtc_secure_display_create_work() +#define amdgpu_dm_crtc_secure_display_resume(x) +#define amdgpu_dm_crtc_secure_display_suspend(x) +#endif + #endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 360952129b6d..927de7678a4f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -25,8 +25,6 @@ #include <linux/uaccess.h> -#include <drm/drm_debugfs.h> - #include "dc.h" #include "amdgpu.h" #include "amdgpu_dm.h" @@ -36,6 +34,7 @@ #include "resource.h" #include "dsc.h" #include "dc_link_dp.h" +#include "dc/dc_dmub_srv.h" struct dmub_debugfs_trace_header { uint32_t entry_count; @@ -2154,6 +2153,149 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, return result; } + +/* + * function description: Read max_requested_bpc property from the connector + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/max_bpc + * + */ +static ssize_t dp_max_bpc_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct dm_connector_state *state; + ssize_t result = 0; + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + const uint32_t rd_buf_size = 10; + int r; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + state = to_dm_connector_state(connector->state); + + rd_buf_ptr = rd_buf; + snprintf(rd_buf_ptr, rd_buf_size, + "%u\n", + state->base.max_requested_bpc); + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + result = r; /* r = -EFAULT */ + goto unlock; + } + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } +unlock: + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + kfree(rd_buf); + return result; +} + + +/* + * function description: Set max_requested_bpc property on the connector + * + * This function will not force the input BPC on connector, it will only + * change the max value. This is equivalent to setting max_bpc through + * xrandr. + * + * The BPC value written must be >= 6 and <= 16. Values outside of this + * range will result in errors. + * + * BPC values: + * 0x6 - 6 BPC + * 0x8 - 8 BPC + * 0xa - 10 BPC + * 0xc - 12 BPC + * 0x10 - 16 BPC + * + * Write the max_bpc in the following way: + * + * echo 0x6 > /sys/kernel/debug/dri/0/DP-X/max_bpc + * + */ +static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct dm_connector_state *state; + struct drm_device *dev = connector->dev; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + long param[1] = {0}; + uint8_t param_nums = 0; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + if (param[0] < 6 || param[0] > 16) { + DRM_DEBUG_DRIVER("bad max_bpc value\n"); + kfree(wr_buf); + return -EINVAL; + } + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + state = to_dm_connector_state(connector->state); + state->base.max_requested_bpc = param[0]; +unlock: + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + + kfree(wr_buf); + return size; +} + DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support); DEFINE_SHOW_ATTRIBUTE(dmub_fw_state); DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); @@ -2265,6 +2407,13 @@ static const struct file_operations dp_dpcd_data_debugfs_fops = { .llseek = default_llseek }; +static const struct file_operations dp_max_bpc_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_max_bpc_read, + .write = dp_max_bpc_write, + .llseek = default_llseek +}; + static const struct { char *name; const struct file_operations *fops; @@ -2287,7 +2436,8 @@ static const struct { {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops}, {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops}, {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}, - {"dp_dsc_fec_support", &dp_dsc_fec_support_fops} + {"dp_dsc_fec_support", &dp_dsc_fec_support_fops}, + {"max_bpc", &dp_max_bpc_debugfs_fops} }; #ifdef CONFIG_DRM_AMD_DC_HDCP @@ -2341,9 +2491,51 @@ static int psr_get(void *data, u64 *val) return 0; } +/* + * Set dmcub trace event IRQ enable or disable. + * Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en + * Usage to disable dmcub trace event IRQ: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en + */ +static int dmcub_trace_event_state_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + if (val == 1 || val == 0) { + dc_dmub_trace_event_control(adev->dm.dc, val); + adev->dm.dmcub_trace_event_en = (bool)val; + } else + return 0; + + return 0; +} + +/* + * The interface doesn't need get function, so it will return the + * value of zero + * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en + */ +static int dmcub_trace_event_state_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dmcub_trace_event_en; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_get, + dmcub_trace_event_state_set, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); +static const struct { + char *name; + const struct file_operations *fops; +} connector_debugfs_entries[] = { + {"force_yuv420_output", &force_yuv420_output_fops}, + {"output_bpc", &output_bpc_fops}, + {"trigger_hotplug", &trigger_hotplug_debugfs_fops} +}; + void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; @@ -2360,14 +2552,11 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); - debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector, - &force_yuv420_output_fops); - - debugfs_create_file("output_bpc", 0644, dir, connector, - &output_bpc_fops); - - debugfs_create_file("trigger_hotplug", 0644, dir, connector, - &trigger_hotplug_debugfs_fops); + for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { + debugfs_create_file(connector_debugfs_entries[i].name, + 0644, dir, connector, + connector_debugfs_entries[i].fops); + } connector->debugfs_dpcd_address = 0; connector->debugfs_dpcd_size = 0; @@ -2383,6 +2572,225 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) #endif } +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +/* + * Set crc window coordinate x start + */ +static int crc_win_x_start_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_window.x_start = (uint16_t) val; + acrtc->dm_irq_params.crc_window.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate x start + */ +static int crc_win_x_start_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.crc_window.x_start; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_start_fops, crc_win_x_start_get, + crc_win_x_start_set, "%llu\n"); + + +/* + * Set crc window coordinate y start + */ +static int crc_win_y_start_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_window.y_start = (uint16_t) val; + acrtc->dm_irq_params.crc_window.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate y start + */ +static int crc_win_y_start_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.crc_window.y_start; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_start_fops, crc_win_y_start_get, + crc_win_y_start_set, "%llu\n"); + +/* + * Set crc window coordinate x end + */ +static int crc_win_x_end_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_window.x_end = (uint16_t) val; + acrtc->dm_irq_params.crc_window.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate x end + */ +static int crc_win_x_end_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.crc_window.x_end; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_end_fops, crc_win_x_end_get, + crc_win_x_end_set, "%llu\n"); + +/* + * Set crc window coordinate y end + */ +static int crc_win_y_end_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_window.y_end = (uint16_t) val; + acrtc->dm_irq_params.crc_window.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate y end + */ +static int crc_win_y_end_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.crc_window.y_end; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get, + crc_win_y_end_set, "%llu\n"); +/* + * Trigger to commit crc window + */ +static int crc_win_update_set(void *data, u64 val) +{ + struct drm_crtc *new_crtc = data; + struct drm_crtc *old_crtc = NULL; + struct amdgpu_crtc *new_acrtc, *old_acrtc; + struct amdgpu_device *adev = drm_to_adev(new_crtc->dev); + struct crc_rd_work *crc_rd_wrk = adev->dm.crc_rd_wrk; + + if (val) { + spin_lock_irq(&adev_to_drm(adev)->event_lock); + spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock); + if (crc_rd_wrk && crc_rd_wrk->crtc) { + old_crtc = crc_rd_wrk->crtc; + old_acrtc = to_amdgpu_crtc(old_crtc); + } + new_acrtc = to_amdgpu_crtc(new_crtc); + + if (old_crtc && old_crtc != new_crtc) { + old_acrtc->dm_irq_params.crc_window.activated = false; + old_acrtc->dm_irq_params.crc_window.update_win = false; + old_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + + new_acrtc->dm_irq_params.crc_window.activated = true; + new_acrtc->dm_irq_params.crc_window.update_win = true; + new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + crc_rd_wrk->crtc = new_crtc; + } else { + new_acrtc->dm_irq_params.crc_window.activated = true; + new_acrtc->dm_irq_params.crc_window.update_win = true; + new_acrtc->dm_irq_params.crc_window.skip_frame_cnt = 0; + crc_rd_wrk->crtc = new_crtc; + } + spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock); + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + } + + return 0; +} + +/* + * Get crc window update flag + */ +static int crc_win_update_get(void *data, u64 *val) +{ + *val = 0; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get, + crc_win_update_set, "%llu\n"); + +void crtc_debugfs_init(struct drm_crtc *crtc) +{ + struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry); + + if (!dir) + return; + + debugfs_create_file_unsafe("crc_win_x_start", 0644, dir, crtc, + &crc_win_x_start_fops); + debugfs_create_file_unsafe("crc_win_y_start", 0644, dir, crtc, + &crc_win_y_start_fops); + debugfs_create_file_unsafe("crc_win_x_end", 0644, dir, crtc, + &crc_win_x_end_fops); + debugfs_create_file_unsafe("crc_win_y_end", 0644, dir, crtc, + &crc_win_y_end_fops); + debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, + &crc_win_update_fops); + +} +#endif /* * Writes DTN log state to the user supplied buffer. * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log @@ -2450,11 +2858,9 @@ static ssize_t dtn_log_write( * As written to display, taking ABM and backlight lut into account. * Ranges from 0x0 to 0x10000 (= 100% PWM) */ -static int current_backlight_read(struct seq_file *m, void *data) +static int current_backlight_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; struct amdgpu_display_manager *dm = &adev->dm; unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link); @@ -2468,11 +2874,9 @@ static int current_backlight_read(struct seq_file *m, void *data) * As written to display, taking ABM and backlight lut into account. * Ranges from 0x0 to 0x10000 (= 100% PWM) */ -static int target_backlight_read(struct seq_file *m, void *data) +static int target_backlight_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; struct amdgpu_display_manager *dm = &adev->dm; unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link); @@ -2481,10 +2885,10 @@ static int target_backlight_read(struct seq_file *m, void *data) return 0; } -static int mst_topo(struct seq_file *m, void *unused) +static int mst_topo_show(struct seq_file *m, void *unused) { - struct drm_info_node *node = (struct drm_info_node *)m->private; - struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct drm_device *dev = adev_to_drm(adev); struct drm_connector *connector; struct drm_connector_list_iter conn_iter; struct amdgpu_dm_connector *aconnector; @@ -2504,12 +2908,6 @@ static int mst_topo(struct seq_file *m, void *unused) return 0; } -static const struct drm_info_list amdgpu_dm_debugfs_list[] = { - {"amdgpu_current_backlight_pwm", ¤t_backlight_read}, - {"amdgpu_target_backlight_pwm", &target_backlight_read}, - {"amdgpu_mst_topology", &mst_topo}, -}; - /* * Sets the force_timing_sync debug optino from the given string. * All connected displays will be force synchronized immediately. @@ -2568,10 +2966,13 @@ static int visual_confirm_get(void *data, u64 *val) return 0; } +DEFINE_SHOW_ATTRIBUTE(current_backlight); +DEFINE_SHOW_ATTRIBUTE(target_backlight); +DEFINE_SHOW_ATTRIBUTE(mst_topo); DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get, visual_confirm_set, "%llu\n"); -int dtn_debugfs_init(struct amdgpu_device *adev) +void dtn_debugfs_init(struct amdgpu_device *adev) { static const struct file_operations dtn_log_fops = { .owner = THIS_MODULE, @@ -2582,13 +2983,13 @@ int dtn_debugfs_init(struct amdgpu_device *adev) struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; - int ret; - - ret = amdgpu_debugfs_add_files(adev, amdgpu_dm_debugfs_list, - ARRAY_SIZE(amdgpu_dm_debugfs_list)); - if (ret) - return ret; + debugfs_create_file("amdgpu_current_backlight_pwm", 0444, + root, adev, ¤t_backlight_fops); + debugfs_create_file("amdgpu_target_backlight_pwm", 0444, + root, adev, &target_backlight_fops); + debugfs_create_file("amdgpu_mst_topology", 0444, root, + adev, &mst_topo_fops); debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, &dtn_log_fops); @@ -2604,5 +3005,6 @@ int dtn_debugfs_init(struct amdgpu_device *adev) debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root, adev, &force_timing_sync_ops); - return 0; + debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root, + adev, &dmcub_trace_event_state_fops); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h index 5e5b2b2afa31..3366cb644053 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h @@ -30,6 +30,9 @@ #include "amdgpu_dm.h" void connector_debugfs_init(struct amdgpu_dm_connector *connector); -int dtn_debugfs_init(struct amdgpu_device *adev); +void dtn_debugfs_init(struct amdgpu_device *adev); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +void crtc_debugfs_init(struct drm_crtc *crtc); +#endif #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 5750818db8f6..09bdffb3a09e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -652,8 +652,31 @@ void *dm_helpers_allocate_gpu_mem( size_t size, long long *addr) { - // TODO - return NULL; + struct amdgpu_device *adev = ctx->driver_context; + struct dal_allocation *da; + u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? + AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; + int ret; + + da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); + if (!da) + return NULL; + + ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + domain, &da->bo, + &da->gpu_addr, &da->cpu_ptr); + + *addr = da->gpu_addr; + + if (ret) { + kfree(da); + return NULL; + } + + /* add da to list in dm */ + list_add(&da->list, &adev->dm.da_list); + + return da->cpu_ptr; } void dm_helpers_free_gpu_mem( @@ -661,5 +684,22 @@ void dm_helpers_free_gpu_mem( enum dc_gpu_mem_alloc_type type, void *pvMem) { + struct amdgpu_device *adev = ctx->driver_context; + struct dal_allocation *da; + + /* walk the da list in DM */ + list_for_each_entry(da, &adev->dm.da_list, list) { + if (pvMem == da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + } +} + +bool dm_helpers_dmub_outbox0_interrupt_control(struct dc_context *ctx, bool enable) +{ // TODO + return true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index e0000c180ed1..d3c687d07ee6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data { struct amdgpu_display_manager *dm; /* DAL irq source which registered for this interrupt. */ enum dc_irq_source irq_source; + struct work_struct work; }; #define DM_IRQ_TABLE_LOCK(adev, flags) \ @@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, */ static void dm_irq_work_func(struct work_struct *work) { - struct irq_list_head *irq_list_head = - container_of(work, struct irq_list_head, work); - struct list_head *handler_list = &irq_list_head->head; - struct amdgpu_dm_irq_handler_data *handler_data; - - list_for_each_entry(handler_data, handler_list, list) { - DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", - handler_data->irq_source); + struct amdgpu_dm_irq_handler_data *handler_data = + container_of(work, struct amdgpu_dm_irq_handler_data, work); - DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", - handler_data->irq_source); - - handler_data->handler(handler_data->handler_arg); - } + handler_data->handler(handler_data->handler_arg); /* Call a DAL subcomponent which registered for interrupt notification * at INTERRUPT_LOW_IRQ_CONTEXT. @@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev, break; case INTERRUPT_LOW_IRQ_CONTEXT: default: - hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; break; } @@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, break; case INTERRUPT_LOW_IRQ_CONTEXT: default: - hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; + INIT_WORK(&handler_data->work, dm_irq_work_func); break; } @@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, int amdgpu_dm_irq_init(struct amdgpu_device *adev) { int src; - struct irq_list_head *lh; + struct list_head *lh; DRM_DEBUG_KMS("DM_IRQ\n"); @@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev) for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { /* low context handler list init */ lh = &adev->dm.irq_handler_list_low_tab[src]; - INIT_LIST_HEAD(&lh->head); - INIT_WORK(&lh->work, dm_irq_work_func); - + INIT_LIST_HEAD(lh); /* high context handler init */ INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); } @@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev) void amdgpu_dm_irq_fini(struct amdgpu_device *adev) { int src; - struct irq_list_head *lh; + struct list_head *lh; + struct list_head *entry, *tmp; + struct amdgpu_dm_irq_handler_data *handler; unsigned long irq_table_flags; + DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { DM_IRQ_TABLE_LOCK(adev, irq_table_flags); @@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) * (because no code can schedule a new one). */ lh = &adev->dm.irq_handler_list_low_tab[src]; DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); - flush_work(&lh->work); + + if (!list_empty(lh)) { + list_for_each_safe(entry, tmp, lh) { + handler = list_entry( + entry, + struct amdgpu_dm_irq_handler_data, + list); + flush_work(&handler->work); + } + } } } @@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) struct list_head *hnd_list_h; struct list_head *hnd_list_l; unsigned long irq_table_flags; + struct list_head *entry, *tmp; + struct amdgpu_dm_irq_handler_data *handler; DM_IRQ_TABLE_LOCK(adev, irq_table_flags); @@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) * will be disabled from manage_dm_interrupts on disable CRTC. */ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) { - hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) dc_interrupt_set(adev->dm.dc, src, false); DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); - flush_work(&adev->dm.irq_handler_list_low_tab[src].work); + if (!list_empty(hnd_list_l)) { + list_for_each_safe (entry, tmp, hnd_list_l) { + handler = list_entry( + entry, + struct amdgpu_dm_irq_handler_data, + list); + flush_work(&handler->work); + } + } DM_IRQ_TABLE_LOCK(adev, irq_table_flags); } @@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) /* re-enable short pulse interrupts HW interrupt */ for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { - hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) dc_interrupt_set(adev->dm.dc, src, true); @@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) * will be enabled from manage_dm_interrupts on enable CRTC. */ for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) { - hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) dc_interrupt_set(adev->dm.dc, src, true); @@ -500,22 +512,51 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, enum dc_irq_source irq_source) { - unsigned long irq_table_flags; - struct work_struct *work = NULL; + struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source]; + struct amdgpu_dm_irq_handler_data *handler_data; + bool work_queued = false; - DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + if (list_empty(handler_list)) + return; - if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head)) - work = &adev->dm.irq_handler_list_low_tab[irq_source].work; + list_for_each_entry (handler_data, handler_list, list) { + if (queue_work(system_highpri_wq, &handler_data->work)) { + work_queued = true; + break; + } + } - DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + if (!work_queued) { + struct amdgpu_dm_irq_handler_data *handler_data_add; + /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/ + handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list); - if (work) { - if (!schedule_work(work)) - DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n", - irq_source); - } + /*allocate a new amdgpu_dm_irq_handler_data*/ + handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL); + if (!handler_data_add) { + DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); + return; + } + + /*copy new amdgpu_dm_irq_handler_data members from handler_data*/ + handler_data_add->handler = handler_data->handler; + handler_data_add->handler_arg = handler_data->handler_arg; + handler_data_add->dm = handler_data->dm; + handler_data_add->irq_source = irq_source; + list_add_tail(&handler_data_add->list, handler_list); + + INIT_WORK(&handler_data_add->work, dm_irq_work_func); + + if (queue_work(system_highpri_wq, &handler_data_add->work)) + DRM_DEBUG("Queued work for handling interrupt from " + "display for IRQ source %d\n", + irq_source); + else + DRM_ERROR("Failed to queue work for handling interrupt " + "from display for IRQ source %d\n", + irq_source); + } } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h index 45825a34f8eb..f3b93ba69a27 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h @@ -26,12 +26,21 @@ #ifndef __AMDGPU_DM_IRQ_PARAMS_H__ #define __AMDGPU_DM_IRQ_PARAMS_H__ +#include "amdgpu_dm_crc.h" + struct dm_irq_params { u32 last_flip_vblank; struct mod_vrr_params vrr_params; struct dc_stream_state *stream; int active_planes; struct mod_freesync_config freesync_config; + +#ifdef CONFIG_DEBUG_FS + enum amdgpu_dm_pipe_crc_source crc_src; +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + struct crc_window_parm crc_window; +#endif +#endif }; #endif /* __AMDGPU_DM_IRQ_PARAMS_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 41b09ab22233..73cdb9fe981a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -38,6 +38,7 @@ #include "dc_link_ddc.h" #include "i2caux_interface.h" +#include "dmub_cmd.h" #if defined(CONFIG_DEBUG_FS) #include "amdgpu_dm_debugfs.h" #endif @@ -51,7 +52,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, { ssize_t result = 0; struct aux_payload payload; - enum aux_channel_operation_result operation_result; + enum aux_return_code_type operation_result; if (WARN_ON(msg->size > 16)) return -E2BIG; @@ -73,17 +74,19 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, if (result < 0) switch (operation_result) { - case AUX_CHANNEL_OPERATION_SUCCEEDED: + case AUX_RET_SUCCESS: break; - case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: - case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: + case AUX_RET_ERROR_HPD_DISCON: + case AUX_RET_ERROR_UNKNOWN: + case AUX_RET_ERROR_INVALID_OPERATION: + case AUX_RET_ERROR_PROTOCOL_ERROR: result = -EIO; break; - case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: - case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: + case AUX_RET_ERROR_INVALID_REPLY: + case AUX_RET_ERROR_ENGINE_ACQUIRE: result = -EBUSY; break; - case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: + case AUX_RET_ERROR_TIMEOUT: result = -ETIMEDOUT; break; } |