diff options
Diffstat (limited to 'drivers/gpu/drm/tegra/gem.c')
-rw-r--r-- | drivers/gpu/drm/tegra/gem.c | 366 |
1 files changed, 287 insertions, 79 deletions
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index ce023fa3e8ae..da32086cbeaf 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -14,6 +14,7 @@ */ #include <linux/dma-buf.h> +#include <linux/iommu.h> #include <drm/tegra_drm.h> #include "drm.h" @@ -91,13 +92,90 @@ static const struct host1x_bo_ops tegra_bo_ops = { .kunmap = tegra_bo_kunmap, }; -static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo) +/* + * A generic iommu_map_sg() function is being reviewed and will hopefully be + * merged soon. At that point this function can be dropped in favour of the + * one provided by the IOMMU API. + */ +static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, + int prot) { - dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); + struct scatterlist *s; + size_t offset = 0; + unsigned int i; + int err; + + for_each_sg(sg, s, nents, i) { + phys_addr_t phys = page_to_phys(sg_page(s)); + size_t length = s->offset + s->length; + + err = iommu_map(domain, iova + offset, phys, length, prot); + if (err < 0) { + iommu_unmap(domain, iova, offset); + return err; + } + + offset += length; + } + + return offset; } -struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, - unsigned long flags) +static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) +{ + int prot = IOMMU_READ | IOMMU_WRITE; + ssize_t err; + + if (bo->mm) + return -EBUSY; + + bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL); + if (!bo->mm) + return -ENOMEM; + + err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size, + PAGE_SIZE, 0, 0, 0); + if (err < 0) { + dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n", + err); + goto free; + } + + bo->paddr = bo->mm->start; + + err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, + bo->sgt->nents, prot); + if (err < 0) { + dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err); + goto remove; + } + + bo->size = err; + + return 0; + +remove: + drm_mm_remove_node(bo->mm); +free: + kfree(bo->mm); + return err; +} + +static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) +{ + if (!bo->mm) + return 0; + + iommu_unmap(tegra->domain, bo->paddr, bo->size); + drm_mm_remove_node(bo->mm); + kfree(bo->mm); + + return 0; +} + +static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm, + size_t size) { struct tegra_bo *bo; int err; @@ -109,22 +187,96 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, host1x_bo_init(&bo->base, &tegra_bo_ops); size = round_up(size, PAGE_SIZE); - bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, - GFP_KERNEL | __GFP_NOWARN); - if (!bo->vaddr) { - dev_err(drm->dev, "failed to allocate buffer with size %u\n", - size); - err = -ENOMEM; - goto err_dma; - } - err = drm_gem_object_init(drm, &bo->gem, size); - if (err) - goto err_init; + if (err < 0) + goto free; err = drm_gem_create_mmap_offset(&bo->gem); - if (err) - goto err_mmap; + if (err < 0) + goto release; + + return bo; + +release: + drm_gem_object_release(&bo->gem); +free: + kfree(bo); + return ERR_PTR(err); +} + +static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) +{ + if (bo->pages) { + drm_gem_put_pages(&bo->gem, bo->pages, true, true); + sg_free_table(bo->sgt); + kfree(bo->sgt); + } else if (bo->vaddr) { + dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, + bo->paddr); + } +} + +static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo, + size_t size) +{ + bo->pages = drm_gem_get_pages(&bo->gem); + if (IS_ERR(bo->pages)) + return PTR_ERR(bo->pages); + + bo->num_pages = size >> PAGE_SHIFT; + + bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); + if (IS_ERR(bo->sgt)) { + drm_gem_put_pages(&bo->gem, bo->pages, false, false); + return PTR_ERR(bo->sgt); + } + + return 0; +} + +static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo, + size_t size) +{ + struct tegra_drm *tegra = drm->dev_private; + int err; + + if (tegra->domain) { + err = tegra_bo_get_pages(drm, bo, size); + if (err < 0) + return err; + + err = tegra_bo_iommu_map(tegra, bo); + if (err < 0) { + tegra_bo_free(drm, bo); + return err; + } + } else { + bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, + GFP_KERNEL | __GFP_NOWARN); + if (!bo->vaddr) { + dev_err(drm->dev, + "failed to allocate buffer of size %zu\n", + size); + return -ENOMEM; + } + } + + return 0; +} + +struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size, + unsigned long flags) +{ + struct tegra_bo *bo; + int err; + + bo = tegra_bo_alloc_object(drm, size); + if (IS_ERR(bo)) + return bo; + + err = tegra_bo_alloc(drm, bo, size); + if (err < 0) + goto release; if (flags & DRM_TEGRA_GEM_CREATE_TILED) bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; @@ -134,69 +286,52 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, return bo; -err_mmap: +release: drm_gem_object_release(&bo->gem); -err_init: - tegra_bo_destroy(drm, bo); -err_dma: kfree(bo); - return ERR_PTR(err); } struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, struct drm_device *drm, - unsigned int size, + size_t size, unsigned long flags, - unsigned int *handle) + u32 *handle) { struct tegra_bo *bo; - int ret; + int err; bo = tegra_bo_create(drm, size, flags); if (IS_ERR(bo)) return bo; - ret = drm_gem_handle_create(file, &bo->gem, handle); - if (ret) - goto err; + err = drm_gem_handle_create(file, &bo->gem, handle); + if (err) { + tegra_bo_free_object(&bo->gem); + return ERR_PTR(err); + } drm_gem_object_unreference_unlocked(&bo->gem); return bo; - -err: - tegra_bo_free_object(&bo->gem); - return ERR_PTR(ret); } static struct tegra_bo *tegra_bo_import(struct drm_device *drm, struct dma_buf *buf) { + struct tegra_drm *tegra = drm->dev_private; struct dma_buf_attachment *attach; struct tegra_bo *bo; - ssize_t size; int err; - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return ERR_PTR(-ENOMEM); - - host1x_bo_init(&bo->base, &tegra_bo_ops); - size = round_up(buf->size, PAGE_SIZE); - - err = drm_gem_object_init(drm, &bo->gem, size); - if (err < 0) - goto free; - - err = drm_gem_create_mmap_offset(&bo->gem); - if (err < 0) - goto release; + bo = tegra_bo_alloc_object(drm, buf->size); + if (IS_ERR(bo)) + return bo; attach = dma_buf_attach(buf, drm->dev); if (IS_ERR(attach)) { err = PTR_ERR(attach); - goto free_mmap; + goto free; } get_dma_buf(buf); @@ -212,12 +347,19 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm, goto detach; } - if (bo->sgt->nents > 1) { - err = -EINVAL; - goto detach; + if (tegra->domain) { + err = tegra_bo_iommu_map(tegra, bo); + if (err < 0) + goto detach; + } else { + if (bo->sgt->nents > 1) { + err = -EINVAL; + goto detach; + } + + bo->paddr = sg_dma_address(bo->sgt->sgl); } - bo->paddr = sg_dma_address(bo->sgt->sgl); bo->gem.import_attach = attach; return bo; @@ -228,47 +370,41 @@ detach: dma_buf_detach(buf, attach); dma_buf_put(buf); -free_mmap: - drm_gem_free_mmap_offset(&bo->gem); -release: - drm_gem_object_release(&bo->gem); free: + drm_gem_object_release(&bo->gem); kfree(bo); - return ERR_PTR(err); } void tegra_bo_free_object(struct drm_gem_object *gem) { + struct tegra_drm *tegra = gem->dev->dev_private; struct tegra_bo *bo = to_tegra_bo(gem); + if (tegra->domain) + tegra_bo_iommu_unmap(tegra, bo); + if (gem->import_attach) { dma_buf_unmap_attachment(gem->import_attach, bo->sgt, DMA_TO_DEVICE); drm_prime_gem_destroy(gem, NULL); } else { - tegra_bo_destroy(gem->dev, bo); + tegra_bo_free(gem->dev, bo); } - drm_gem_free_mmap_offset(gem); drm_gem_object_release(gem); - kfree(bo); } int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, struct drm_mode_create_dumb *args) { - int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); struct tegra_drm *tegra = drm->dev_private; struct tegra_bo *bo; - min_pitch = round_up(min_pitch, tegra->pitch_align); - if (args->pitch < min_pitch) - args->pitch = min_pitch; - - if (args->size < args->pitch * args->height) - args->size = args->pitch * args->height; + args->pitch = round_up(min_pitch, tegra->pitch_align); + args->size = args->pitch * args->height; bo = tegra_bo_create_with_handle(file, drm, args->size, 0, &args->handle); @@ -279,7 +415,7 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, } int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, - uint32_t handle, uint64_t *offset) + u32 handle, u64 *offset) { struct drm_gem_object *gem; struct tegra_bo *bo; @@ -304,7 +440,38 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, return 0; } +static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct drm_gem_object *gem = vma->vm_private_data; + struct tegra_bo *bo = to_tegra_bo(gem); + struct page *page; + pgoff_t offset; + int err; + + if (!bo->pages) + return VM_FAULT_SIGBUS; + + offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT; + page = bo->pages[offset]; + + err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page); + switch (err) { + case -EAGAIN: + case 0: + case -ERESTARTSYS: + case -EINTR: + case -EBUSY: + return VM_FAULT_NOPAGE; + + case -ENOMEM: + return VM_FAULT_OOM; + } + + return VM_FAULT_SIGBUS; +} + const struct vm_operations_struct tegra_bo_vm_ops = { + .fault = tegra_bo_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, }; @@ -322,12 +489,30 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma) gem = vma->vm_private_data; bo = to_tegra_bo(gem); - ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, vma->vm_page_prot); - if (ret) - drm_gem_vm_close(vma); + if (!bo->pages) { + unsigned long vm_pgoff = vma->vm_pgoff; + + vma->vm_flags &= ~VM_PFNMAP; + vma->vm_pgoff = 0; + + ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr, + bo->paddr, gem->size); + if (ret) { + drm_gem_vm_close(vma); + return ret; + } + + vma->vm_pgoff = vm_pgoff; + } else { + pgprot_t prot = vm_get_page_prot(vma->vm_flags); + + vma->vm_flags |= VM_MIXEDMAP; + vma->vm_flags &= ~VM_PFNMAP; - return ret; + vma->vm_page_prot = pgprot_writecombine(prot); + } + + return 0; } static struct sg_table * @@ -342,21 +527,44 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, if (!sgt) return NULL; - if (sg_alloc_table(sgt, 1, GFP_KERNEL)) { - kfree(sgt); - return NULL; - } + if (bo->pages) { + struct scatterlist *sg; + unsigned int i; - sg_dma_address(sgt->sgl) = bo->paddr; - sg_dma_len(sgt->sgl) = gem->size; + if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) + goto free; + + for_each_sg(sgt->sgl, sg, bo->num_pages, i) + sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); + + if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + goto free; + } else { + if (sg_alloc_table(sgt, 1, GFP_KERNEL)) + goto free; + + sg_dma_address(sgt->sgl) = bo->paddr; + sg_dma_len(sgt->sgl) = gem->size; + } return sgt; + +free: + sg_free_table(sgt); + kfree(sgt); + return NULL; } static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, struct sg_table *sgt, enum dma_data_direction dir) { + struct drm_gem_object *gem = attach->dmabuf->priv; + struct tegra_bo *bo = to_tegra_bo(gem); + + if (bo->pages) + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + sg_free_table(sgt); kfree(sgt); } |