diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-04 09:30:23 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-04 09:30:23 -0700 |
commit | 3a3c5ab3d6988afdcd63f3fc8e33d157ca1d9c67 (patch) | |
tree | c1463af1c55682106f17dd0a19b2dbabd54c6b62 | |
parent | f88cd3fb9df228e5ce4e13ec3dbad671ddb2146e (diff) | |
parent | 37e2f2e800dc6d65aa77f9d4dbc4512d841e2f0b (diff) | |
download | linux-3a3c5ab3d6988afdcd63f3fc8e33d157ca1d9c67.tar.bz2 |
Merge tag 'drm-fixes-2021-06-04-1' of git://anongit.freedesktop.org/drm/drm
Pull drm fixes from Dave Airlie:
"Two big regression reverts in here, one for fbdev and one i915.
Otherwise it's mostly amdgpu display fixes, and tegra fixes.
fb:
- revert broken fb_defio patch
amdgpu:
- Display fixes
- FRU EEPROM error handling fix
- RAS fix
- PSP fix
- Releasing pinned BO fix
i915:
- Revert conversion to io_mapping_map_user() which lead to BUG_ON()
- Fix check for error valued returns in a selftest
tegra:
- SOR power domain race condition fix
- build warning fix
- runtime pm ref leak fix
- modifier fix"
* tag 'drm-fixes-2021-06-04-1' of git://anongit.freedesktop.org/drm/drm:
amd/display: convert DRM_DEBUG_ATOMIC to drm_dbg_atomic
drm/amdgpu: make sure we unpin the UVD BO
drm/amd/amdgpu:save psp ring wptr to avoid attack
drm/amd/display: Fix potential memory leak in DMUB hw_init
drm/amdgpu: Don't query CE and UE errors
drm/amd/display: Fix overlay validation by considering cursors
drm/amdgpu: refine amdgpu_fru_get_product_info
drm/amdgpu: add judgement for dc support
drm/amd/display: Fix GPU scaling regression by FS video support
drm/amd/display: Allow bandwidth validation for 0 streams.
Revert "i915: use io_mapping_map_user"
drm/i915/selftests: Fix return value check in live_breadcrumbs_smoketest()
Revert "fb_defio: Remove custom address_space_operations"
drm/tegra: Correct DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT
drm/tegra: sor: Fix AUX device reference leak
drm/tegra: Get ref for DP AUX channel, not its ddc adapter
drm/tegra: Fix shift overflow in tegra_shared_plane_atomic_update
drm/tegra: sor: Fully initialize SOR before registration
gpu: host1x: Split up client initalization and registration
drm/tegra: sor: Do not leak runtime PM reference
22 files changed, 240 insertions, 99 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 0350205c4897..6819fe5612d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, { struct amdgpu_ctx *ctx; struct amdgpu_ctx_mgr *mgr; - unsigned long ras_counter; if (!fpriv) return -EINVAL; @@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, if (atomic_read(&ctx->guilty)) out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; - /*query ue count*/ - ras_counter = amdgpu_ras_query_error_count(adev, false); - /*ras counter is monotonic increasing*/ - if (ras_counter != ctx->ras_counter_ue) { - out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; - ctx->ras_counter_ue = ras_counter; - } - - /*query ce count*/ - ras_counter = amdgpu_ras_query_error_count(adev, true); - if (ras_counter != ctx->ras_counter_ce) { - out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; - ctx->ras_counter_ce = ras_counter; - } - mutex_unlock(&mgr->lock); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 66ddfe4f58c2..57ec108b5972 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3118,7 +3118,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) */ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display) + if (amdgpu_sriov_vf(adev) || + adev->enable_virtual_display || + (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) return false; return amdgpu_device_asic_has_dc_support(adev->asic_type); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 8f4a8f8d8146..39b6c6bfab45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr, int amdgpu_fru_get_product_info(struct amdgpu_device *adev) { unsigned char buff[34]; - int addrptr = 0, size = 0; + int addrptr, size; + int len; if (!is_fru_eeprom_supported(adev)) return 0; @@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) /* If algo exists, it means that the i2c_adapter's initialized */ if (!adev->pm.smu_i2c.algo) { DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); - return 0; + return -ENODEV; } /* There's a lot of repetition here. This is due to the FRU having @@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) size = amdgpu_fru_read_eeprom(adev, addrptr, buff); if (size < 1) { DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size); - return size; + return -EINVAL; } /* Increment the addrptr by the size of the field, and 1 due to the @@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) size = amdgpu_fru_read_eeprom(adev, addrptr, buff); if (size < 1) { DRM_ERROR("Failed to read FRU product name, ret:%d", size); - return size; + return -EINVAL; } + len = size; /* Product name should only be 32 characters. Any more, * and something could be wrong. Cap it at 32 to be safe */ - if (size > 32) { + if (len >= sizeof(adev->product_name)) { DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake"); - size = 32; + len = sizeof(adev->product_name) - 1; } /* Start at 2 due to buff using fields 0 and 1 for the address */ - memcpy(adev->product_name, &buff[2], size); - adev->product_name[size] = '\0'; + memcpy(adev->product_name, &buff[2], len); + adev->product_name[len] = '\0'; addrptr += size + 1; size = amdgpu_fru_read_eeprom(adev, addrptr, buff); if (size < 1) { DRM_ERROR("Failed to read FRU product number, ret:%d", size); - return size; + return -EINVAL; } + len = size; /* Product number should only be 16 characters. Any more, * and something could be wrong. Cap it at 16 to be safe */ - if (size > 16) { + if (len >= sizeof(adev->product_number)) { DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake"); - size = 16; + len = sizeof(adev->product_number) - 1; } - memcpy(adev->product_number, &buff[2], size); - adev->product_number[size] = '\0'; + memcpy(adev->product_number, &buff[2], len); + adev->product_number[len] = '\0'; addrptr += size + 1; size = amdgpu_fru_read_eeprom(adev, addrptr, buff); if (size < 1) { DRM_ERROR("Failed to read FRU product version, ret:%d", size); - return size; + return -EINVAL; } addrptr += size + 1; @@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) if (size < 1) { DRM_ERROR("Failed to read FRU serial number, ret:%d", size); - return size; + return -EINVAL; } + len = size; /* Serial number should only be 16 characters. Any more, * and something could be wrong. Cap it at 16 to be safe */ - if (size > 16) { + if (len >= sizeof(adev->serial)) { DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake"); - size = 16; + len = sizeof(adev->serial) - 1; } - memcpy(adev->serial, &buff[2], size); - adev->serial[size] = '\0'; + memcpy(adev->serial, &buff[2], len); + adev->serial[len] = '\0'; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 46a5328e00e0..60aa99a39a74 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -76,6 +76,7 @@ struct psp_ring uint64_t ring_mem_mc_addr; void *ring_mem_handle; uint32_t ring_size; + uint32_t ring_wptr; }; /* More registers may will be supported */ diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 589410c32d09..02bba1f3c42e 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -720,7 +720,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp) struct amdgpu_device *adev = psp->adev; if (amdgpu_sriov_vf(adev)) - data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + data = psp->km_ring.ring_wptr; else data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); @@ -734,6 +734,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value) if (amdgpu_sriov_vf(adev)) { WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + psp->km_ring.ring_wptr = value; } else WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index f2e725f72d2f..908664a5774b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -379,7 +379,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp) struct amdgpu_device *adev = psp->adev; if (amdgpu_sriov_vf(adev)) - data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102); + data = psp->km_ring.ring_wptr; else data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67); return data; @@ -394,6 +394,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value) /* send interrupt to PSP for SRIOV ring write pointer update */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD); + psp->km_ring.ring_wptr = value; } else WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value); } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 2bab9c77952f..cf3803f8f075 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); + amdgpu_bo_unpin(bo); amdgpu_bo_unreserve(bo); amdgpu_bo_unref(&bo); return r; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 389eff96fcf6..652cc1a0e450 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -925,7 +925,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); } - adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); + if (!adev->dm.dc->ctx->dmub_srv) + adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); if (!adev->dm.dc->ctx->dmub_srv) { DRM_ERROR("Couldn't allocate DC DMUB server!\n"); return -ENOMEM; @@ -1954,7 +1955,6 @@ static int dm_suspend(void *handle) amdgpu_dm_irq_suspend(adev); - dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); return 0; @@ -5500,7 +5500,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, struct drm_display_mode saved_mode; struct drm_display_mode *freesync_mode = NULL; bool native_mode_found = false; - bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false; + bool recalculate_timing = false; + bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false; int mode_refresh; int preferred_refresh = 0; #if defined(CONFIG_DRM_AMD_DC_DCN) @@ -5563,7 +5564,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { - recalculate_timing |= amdgpu_freesync_vid_mode && + recalculate_timing = amdgpu_freesync_vid_mode && is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); @@ -5571,11 +5572,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, mode = *freesync_mode; } else { decide_crtc_timing_for_drm_display_mode( - &mode, preferred_mode, - dm_state ? (dm_state->scaling != RMX_OFF) : false); - } + &mode, preferred_mode, scale); - preferred_refresh = drm_mode_vrefresh(preferred_mode); + preferred_refresh = drm_mode_vrefresh(preferred_mode); + } } if (recalculate_timing) @@ -5587,7 +5587,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, * If scaling is enabled and refresh rate didn't change * we copy the vic and polarities of the old timings */ - if (!recalculate_timing || mode_refresh != preferred_refresh) + if (!scale || mode_refresh != preferred_refresh) fill_stream_properties_from_drm_display_mode( stream, &mode, &aconnector->base, con_state, NULL, requested_bpc); @@ -9854,7 +9854,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state, if (cursor_scale_w != primary_scale_w || cursor_scale_h != primary_scale_h) { - DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n"); + drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n"); return -EINVAL; } @@ -9891,7 +9891,7 @@ static int validate_overlay(struct drm_atomic_state *state) int i; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; - struct drm_plane_state *primary_state, *overlay_state = NULL; + struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL; /* Check if primary plane is contained inside overlay */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { @@ -9921,6 +9921,14 @@ static int validate_overlay(struct drm_atomic_state *state) if (!primary_state->crtc) return 0; + /* check if cursor plane is enabled */ + cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor); + if (IS_ERR(cursor_state)) + return PTR_ERR(cursor_state); + + if (drm_atomic_plane_disabling(plane->state, cursor_state)) + return 0; + /* Perform the bounds check to ensure the overlay plane covers the primary */ if (primary_state->crtc_x < overlay_state->crtc_x || primary_state->crtc_y < overlay_state->crtc_y || diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 527e56c353cb..8357aa3c41d5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3236,7 +3236,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc, voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; - if (voltage_supported && dummy_pstate_supported) { + if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) { context->bw_ctx.bw.dcn.clk.p_state_change_support = false; goto restore_dml_state; } diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 93f4d059fc89..1e1cb245fca7 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -20,7 +20,6 @@ config DRM_I915 select INPUT if ACPI select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI - select IO_MAPPING select SYNC_FILE select IOSF_MBI select CRC32 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index f6fe5cb01438..8598a1c78a4c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -367,10 +367,11 @@ retry: goto err_unpin; /* Finally, remap it using the new GTT offset */ - ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start + - (vma->ggtt_view.partial.offset << PAGE_SHIFT), - (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, - min_t(u64, vma->size, area->vm_end - area->vm_start)); + ret = remap_io_mapping(area, + area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), + (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, + min_t(u64, vma->size, area->vm_end - area->vm_start), + &ggtt->iomap); if (ret) goto err_fence; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9ec9277539ec..69e43bf91a15 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1905,6 +1905,9 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); /* i915_mm.c */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap); int remap_io_sg(struct vm_area_struct *vma, unsigned long addr, unsigned long size, struct scatterlist *sgl, resource_size_t iobase); diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 9a777b0ff59b..666808cb3a32 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -37,6 +37,17 @@ struct remap_pfn { resource_size_t iobase; }; +static int remap_pfn(pte_t *pte, unsigned long addr, void *data) +{ + struct remap_pfn *r = data; + + /* Special PTE are not associated with any struct page */ + set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); + r->pfn++; + + return 0; +} + #define use_dma(io) ((io) != -1) static inline unsigned long sgt_pfn(const struct remap_pfn *r) @@ -66,7 +77,40 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data) return 0; } +/** + * remap_io_mapping - remap an IO mapping to userspace + * @vma: user vma to map to + * @addr: target user address to start at + * @pfn: physical address of kernel memory + * @size: size of map area + * @iomap: the source io_mapping + * + * Note: this is only safe if the mm semaphore is held when called. + */ +int remap_io_mapping(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, unsigned long size, + struct io_mapping *iomap) +{ + struct remap_pfn r; + int err; + #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) + GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); + + /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ + r.mm = vma->vm_mm; + r.pfn = pfn; + r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | + (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); + + err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r); + if (unlikely(err)) { + zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); + return err; + } + + return 0; +} /** * remap_io_sg - remap an IO mapping to userspace diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index ee8e753d98ce..eae0abd614cb 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -1592,8 +1592,8 @@ static int live_breadcrumbs_smoketest(void *arg) for (n = 0; n < smoke[0].ncontexts; n++) { smoke[0].contexts[n] = live_context(i915, file); - if (!smoke[0].contexts[n]) { - ret = -ENOMEM; + if (IS_ERR(smoke[0].contexts[n])) { + ret = PTR_ERR(smoke[0].contexts[n]); goto out_contexts; } } diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 87df251c1fcf..0cb868065348 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -25,7 +25,7 @@ #include "trace.h" /* XXX move to include/uapi/drm/drm_fourcc.h? */ -#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT(22) +#define DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT BIT_ULL(22) struct reset_control; diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 79bff8b48271..bfae8a02f55b 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -510,7 +510,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, * dGPU sector layout. */ if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU) - base |= BIT(39); + base |= BIT_ULL(39); #endif tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 7b88261f57bb..0ea320c1092b 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -3125,21 +3125,21 @@ static int tegra_sor_init(struct host1x_client *client) if (err < 0) { dev_err(sor->dev, "failed to acquire SOR reset: %d\n", err); - return err; + goto rpm_put; } err = reset_control_assert(sor->rst); if (err < 0) { dev_err(sor->dev, "failed to assert SOR reset: %d\n", err); - return err; + goto rpm_put; } } err = clk_prepare_enable(sor->clk); if (err < 0) { dev_err(sor->dev, "failed to enable clock: %d\n", err); - return err; + goto rpm_put; } usleep_range(1000, 3000); @@ -3150,7 +3150,7 @@ static int tegra_sor_init(struct host1x_client *client) dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err); clk_disable_unprepare(sor->clk); - return err; + goto rpm_put; } reset_control_release(sor->rst); @@ -3171,6 +3171,12 @@ static int tegra_sor_init(struct host1x_client *client) } return 0; + +rpm_put: + if (sor->rst) + pm_runtime_put(sor->dev); + + return err; } static int tegra_sor_exit(struct host1x_client *client) @@ -3739,12 +3745,8 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->aux) return -EPROBE_DEFER; - if (get_device(&sor->aux->ddc.dev)) { - if (try_module_get(sor->aux->ddc.owner)) - sor->output.ddc = &sor->aux->ddc; - else - put_device(&sor->aux->ddc.dev); - } + if (get_device(sor->aux->dev)) + sor->output.ddc = &sor->aux->ddc; } if (!sor->aux) { @@ -3772,12 +3774,13 @@ static int tegra_sor_probe(struct platform_device *pdev) err = tegra_sor_parse_dt(sor); if (err < 0) - return err; + goto put_aux; err = tegra_output_probe(&sor->output); - if (err < 0) - return dev_err_probe(&pdev->dev, err, - "failed to probe output\n"); + if (err < 0) { + dev_err_probe(&pdev->dev, err, "failed to probe output\n"); + goto put_aux; + } if (sor->ops && sor->ops->probe) { err = sor->ops->probe(sor); @@ -3916,17 +3919,10 @@ static int tegra_sor_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sor); pm_runtime_enable(&pdev->dev); - INIT_LIST_HEAD(&sor->client.list); + host1x_client_init(&sor->client); sor->client.ops = &sor_client_ops; sor->client.dev = &pdev->dev; - err = host1x_client_register(&sor->client); - if (err < 0) { - dev_err(&pdev->dev, "failed to register host1x client: %d\n", - err); - goto rpm_disable; - } - /* * On Tegra210 and earlier, provide our own implementation for the * pad output clock. @@ -3938,13 +3934,13 @@ static int tegra_sor_probe(struct platform_device *pdev) sor->index); if (!name) { err = -ENOMEM; - goto unregister; + goto uninit; } err = host1x_client_resume(&sor->client); if (err < 0) { dev_err(sor->dev, "failed to resume: %d\n", err); - goto unregister; + goto uninit; } sor->clk_pad = tegra_clk_sor_pad_register(sor, name); @@ -3955,17 +3951,30 @@ static int tegra_sor_probe(struct platform_device *pdev) err = PTR_ERR(sor->clk_pad); dev_err(sor->dev, "failed to register SOR pad clock: %d\n", err); - goto unregister; + goto uninit; + } + + err = __host1x_client_register(&sor->client); + if (err < 0) { + dev_err(&pdev->dev, "failed to register host1x client: %d\n", + err); + goto uninit; } return 0; -unregister: - host1x_client_unregister(&sor->client); -rpm_disable: +uninit: + host1x_client_exit(&sor->client); pm_runtime_disable(&pdev->dev); remove: + if (sor->aux) + sor->output.ddc = NULL; + tegra_output_remove(&sor->output); +put_aux: + if (sor->aux) + put_device(sor->aux->dev); + return err; } @@ -3983,6 +3992,11 @@ static int tegra_sor_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); + if (sor->aux) { + put_device(sor->aux->dev); + sor->output.ddc = NULL; + } + tegra_output_remove(&sor->output); return 0; diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c index 46f69c532b6b..218e3718fd68 100644 --- a/drivers/gpu/host1x/bus.c +++ b/drivers/gpu/host1x/bus.c @@ -736,6 +736,29 @@ void host1x_driver_unregister(struct host1x_driver *driver) EXPORT_SYMBOL(host1x_driver_unregister); /** + * __host1x_client_init() - initialize a host1x client + * @client: host1x client + * @key: lock class key for the client-specific mutex + */ +void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key) +{ + INIT_LIST_HEAD(&client->list); + __mutex_init(&client->lock, "host1x client lock", key); + client->usecount = 0; +} +EXPORT_SYMBOL(__host1x_client_init); + +/** + * host1x_client_exit() - uninitialize a host1x client + * @client: host1x client + */ +void host1x_client_exit(struct host1x_client *client) +{ + mutex_destroy(&client->lock); +} +EXPORT_SYMBOL(host1x_client_exit); + +/** * __host1x_client_register() - register a host1x client * @client: host1x client * @key: lock class key for the client-specific mutex @@ -747,16 +770,11 @@ EXPORT_SYMBOL(host1x_driver_unregister); * device and call host1x_device_init(), which will in turn call each client's * &host1x_client_ops.init implementation. */ -int __host1x_client_register(struct host1x_client *client, - struct lock_class_key *key) +int __host1x_client_register(struct host1x_client *client) { struct host1x *host1x; int err; - INIT_LIST_HEAD(&client->list); - __mutex_init(&client->lock, "host1x client lock", key); - client->usecount = 0; - mutex_lock(&devices_lock); list_for_each_entry(host1x, &devices, list) { diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index b292887a2481..a591d291b231 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -52,6 +52,13 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; get_page(page); + + if (vmf->vma->vm_file) + page->mapping = vmf->vma->vm_file->f_mapping; + else + printk(KERN_ERR "no mapping available\n"); + + BUG_ON(!page->mapping); page->index = vmf->pgoff; vmf->page = page; @@ -144,6 +151,17 @@ static const struct vm_operations_struct fb_deferred_io_vm_ops = { .page_mkwrite = fb_deferred_io_mkwrite, }; +static int fb_deferred_io_set_page_dirty(struct page *page) +{ + if (!PageDirty(page)) + SetPageDirty(page); + return 0; +} + +static const struct address_space_operations fb_deferred_io_aops = { + .set_page_dirty = fb_deferred_io_set_page_dirty, +}; + int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma) { vma->vm_ops = &fb_deferred_io_vm_ops; @@ -194,12 +212,29 @@ void fb_deferred_io_init(struct fb_info *info) } EXPORT_SYMBOL_GPL(fb_deferred_io_init); +void fb_deferred_io_open(struct fb_info *info, + struct inode *inode, + struct file *file) +{ + file->f_mapping->a_ops = &fb_deferred_io_aops; +} +EXPORT_SYMBOL_GPL(fb_deferred_io_open); + void fb_deferred_io_cleanup(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; + struct page *page; + int i; BUG_ON(!fbdefio); cancel_delayed_work_sync(&info->deferred_work); + + /* clear out the mapping that we setup */ + for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) { + page = fb_deferred_io_page(info, i); + page->mapping = NULL; + } + mutex_destroy(&fbdefio->lock); } EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 072780b0e570..98f193078c05 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1415,6 +1415,10 @@ __releases(&info->lock) if (res) module_put(info->fbops->owner); } +#ifdef CONFIG_FB_DEFERRED_IO + if (info->fbdefio) + fb_deferred_io_open(info, inode, file); +#endif out: unlock_fb_info(info); if (res) diff --git a/include/linux/fb.h b/include/linux/fb.h index a8dccd23c249..ecfbcc0553a5 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -659,6 +659,9 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch, /* drivers/video/fb_defio.c */ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma); extern void fb_deferred_io_init(struct fb_info *info); +extern void fb_deferred_io_open(struct fb_info *info, + struct inode *inode, + struct file *file); extern void fb_deferred_io_cleanup(struct fb_info *info); extern int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync); diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 232e1bd507a7..9b0487c88571 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -332,12 +332,30 @@ static inline struct host1x_device *to_host1x_device(struct device *dev) int host1x_device_init(struct host1x_device *device); int host1x_device_exit(struct host1x_device *device); -int __host1x_client_register(struct host1x_client *client, - struct lock_class_key *key); -#define host1x_client_register(class) \ - ({ \ - static struct lock_class_key __key; \ - __host1x_client_register(class, &__key); \ +void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key); +void host1x_client_exit(struct host1x_client *client); + +#define host1x_client_init(client) \ + ({ \ + static struct lock_class_key __key; \ + __host1x_client_init(client, &__key); \ + }) + +int __host1x_client_register(struct host1x_client *client); + +/* + * Note that this wrapper calls __host1x_client_init() for compatibility + * with existing callers. Callers that want to separately initialize and + * register a host1x client must first initialize using either of the + * __host1x_client_init() or host1x_client_init() functions and then use + * the low-level __host1x_client_register() function to avoid the client + * getting reinitialized. + */ +#define host1x_client_register(client) \ + ({ \ + static struct lock_class_key __key; \ + __host1x_client_init(client, &__key); \ + __host1x_client_register(client); \ }) int host1x_client_unregister(struct host1x_client *client); |