diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | 164 |
1 files changed, 135 insertions, 29 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index ae9c106979d7..4b584cb75bf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -26,9 +26,12 @@ #include <drm/drmP.h> #include "amdgpu.h" +#include "amdgpu_display.h" #include <drm/amdgpu_drm.h> #include <linux/dma-buf.h> +static const struct dma_buf_ops amdgpu_dmabuf_ops; + struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); @@ -102,59 +105,95 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, int ret; ww_mutex_lock(&resv->lock, NULL); - ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false, - AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, 0, &bo); - ww_mutex_unlock(&resv->lock); + ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_CPU, 0, ttm_bo_type_sg, + resv, &bo); if (ret) - return ERR_PTR(ret); + goto error; - bo->prime_shared_count = 1; + bo->tbo.sg = sg; + bo->tbo.ttm->sg = sg; + bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; + bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; + if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) + bo->prime_shared_count = 1; + + ww_mutex_unlock(&resv->lock); return &bo->gem_base; + +error: + ww_mutex_unlock(&resv->lock); + return ERR_PTR(ret); } -int amdgpu_gem_prime_pin(struct drm_gem_object *obj) +static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, + struct device *target_dev, + struct dma_buf_attachment *attach) { + struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - long ret = 0; - - ret = amdgpu_bo_reserve(bo, false); - if (unlikely(ret != 0)) - return ret; - - /* - * Wait for all shared fences to complete before we switch to future - * use of exclusive fence on this prime shared bo. - */ - ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, - MAX_SCHEDULE_TIMEOUT); - if (unlikely(ret < 0)) { - DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); - amdgpu_bo_unreserve(bo); - return ret; + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + long r; + + r = drm_gem_map_attach(dma_buf, target_dev, attach); + if (r) + return r; + + r = amdgpu_bo_reserve(bo, false); + if (unlikely(r != 0)) + goto error_detach; + + + if (attach->dev->driver != adev->dev->driver) { + /* + * Wait for all shared fences to complete before we switch to future + * use of exclusive fence on this prime shared bo. + */ + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, + true, false, + MAX_SCHEDULE_TIMEOUT); + if (unlikely(r < 0)) { + DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); + goto error_unreserve; + } } /* pin buffer into GTT */ - ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); - if (likely(ret == 0)) + r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); + if (r) + goto error_unreserve; + + if (attach->dev->driver != adev->dev->driver) bo->prime_shared_count++; +error_unreserve: amdgpu_bo_unreserve(bo); - return ret; + +error_detach: + if (r) + drm_gem_map_detach(dma_buf, attach); + return r; } -void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) +static void amdgpu_gem_map_detach(struct dma_buf *dma_buf, + struct dma_buf_attachment *attach) { + struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); int ret = 0; ret = amdgpu_bo_reserve(bo, true); if (unlikely(ret != 0)) - return; + goto error; amdgpu_bo_unpin(bo); - if (bo->prime_shared_count) + if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) bo->prime_shared_count--; amdgpu_bo_unreserve(bo); + +error: + drm_gem_map_detach(dma_buf, attach); } struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) @@ -164,6 +203,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj) return bo->tbo.resv; } +static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf, + enum dma_data_direction direction) +{ + struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct ttm_operation_ctx ctx = { true, false }; + u32 domain = amdgpu_display_framebuffer_domains(adev); + int ret; + bool reads = (direction == DMA_BIDIRECTIONAL || + direction == DMA_FROM_DEVICE); + + if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) + return 0; + + /* move to gtt */ + ret = amdgpu_bo_reserve(bo, false); + if (unlikely(ret != 0)) + return ret; + + if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { + amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + } + + amdgpu_bo_unreserve(bo); + return ret; +} + +static const struct dma_buf_ops amdgpu_dmabuf_ops = { + .attach = amdgpu_gem_map_attach, + .detach = amdgpu_gem_map_detach, + .map_dma_buf = drm_gem_map_dma_buf, + .unmap_dma_buf = drm_gem_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .begin_cpu_access = amdgpu_gem_begin_cpu_access, + .map = drm_gem_dmabuf_kmap, + .map_atomic = drm_gem_dmabuf_kmap_atomic, + .unmap = drm_gem_dmabuf_kunmap, + .unmap_atomic = drm_gem_dmabuf_kunmap_atomic, + .mmap = drm_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, +}; + struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gobj, int flags) @@ -176,7 +259,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, return ERR_PTR(-EPERM); buf = drm_gem_prime_export(dev, gobj, flags); - if (!IS_ERR(buf)) + if (!IS_ERR(buf)) { buf->file->f_mapping = dev->anon_inode->i_mapping; + buf->ops = &amdgpu_dmabuf_ops; + } + return buf; } + +struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct drm_gem_object *obj; + + if (dma_buf->ops == &amdgpu_dmabuf_ops) { + obj = dma_buf->priv; + if (obj->dev == dev) { + /* + * Importing dmabuf exported from out own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_get(obj); + return obj; + } + } + + return drm_gem_prime_import(dev, dma_buf); +} |