From b40a6ab2cf9213923bf8e821ce7fa7f6a0a26990 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 7 Apr 2021 18:19:58 -0400 Subject: drm/amdkfd: Use drm_priv to pass VM from KFD to amdgpu amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu needs the drm_priv to allow mmap to access the BO through the corresponding file descriptor. The VM can also be extracted from drm_priv, so drm_priv can replace the vm parameter in the kfd2kgd interface. Signed-off-by: Felix Kuehling Reviewed-by: Philip Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 69 ++++++++++++++---------- 1 file changed, 41 insertions(+), 28 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 7d4118c8128a..dc86faa03b88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -948,6 +948,13 @@ static int process_update_pds(struct amdkfd_process_info *process_info, return 0; } +static struct amdgpu_vm *drm_priv_to_vm(struct drm_file *drm_priv) +{ + struct amdgpu_fpriv *fpriv = drm_priv->driver_priv; + + return &fpriv->vm; +} + static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, struct dma_fence **ef) { @@ -1036,15 +1043,19 @@ create_evict_fence_fail: int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, struct file *filp, u32 pasid, - void **vm, void **process_info, + void **process_info, struct dma_fence **ef) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct drm_file *drm_priv = filp->private_data; - struct amdgpu_fpriv *drv_priv = drm_priv->driver_priv; - struct amdgpu_vm *avm = &drv_priv->vm; + struct amdgpu_fpriv *drv_priv; + struct amdgpu_vm *avm; int ret; + ret = amdgpu_file_to_fpriv(filp, &drv_priv); + if (ret) + return ret; + avm = &drv_priv->vm; + /* Already a compute VM? */ if (avm->process_info) return -EINVAL; @@ -1059,7 +1070,7 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, if (ret) return ret; - *vm = (void *)avm; + amdgpu_vm_set_task_info(avm); return 0; } @@ -1100,15 +1111,17 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, } } -void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) +void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *drm_priv) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_vm *avm; - if (WARN_ON(!kgd || !vm)) + if (WARN_ON(!kgd || !drm_priv)) return; - pr_debug("Releasing process vm %p\n", vm); + avm = drm_priv_to_vm(drm_priv); + + pr_debug("Releasing process vm %p\n", avm); /* The original pasid of amdgpu vm has already been * released during making a amdgpu vm to a compute vm @@ -1119,9 +1132,9 @@ void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm) amdgpu_vm_release_compute(adev, avm); } -uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) +uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv) { - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdgpu_bo *pd = avm->root.base.bo; struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); @@ -1132,11 +1145,11 @@ uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm) int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct kgd_dev *kgd, uint64_t va, uint64_t size, - void *vm, struct kgd_mem **mem, + void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); enum ttm_bo_type bo_type = ttm_bo_type_device; struct sg_table *sg = NULL; uint64_t user_addr = 0; @@ -1347,10 +1360,10 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); int ret; struct amdgpu_bo *bo; uint32_t domain; @@ -1391,9 +1404,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue), - vm, domain_string(domain)); + avm, domain_string(domain)); - ret = reserve_bo_and_vm(mem, vm, &ctx); + ret = reserve_bo_and_vm(mem, avm, &ctx); if (unlikely(ret)) goto out; @@ -1437,7 +1450,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( } list_for_each_entry(entry, &mem->bo_va_list, bo_list) { - if (entry->bo_va->base.vm == vm && !entry->is_mapped) { + if (entry->bo_va->base.vm == avm && !entry->is_mapped) { pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", entry->va, entry->va + bo_size, entry); @@ -1449,7 +1462,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( goto map_bo_to_gpuvm_failed; } - ret = vm_update_pds(vm, ctx.sync); + ret = vm_update_pds(avm, ctx.sync); if (ret) { pr_err("Failed to update page directories\n"); goto map_bo_to_gpuvm_failed; @@ -1485,11 +1498,11 @@ out: } int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *vm) + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - struct amdkfd_process_info *process_info = - ((struct amdgpu_vm *)vm)->process_info; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); + struct amdkfd_process_info *process_info = avm->process_info; unsigned long bo_size = mem->bo->tbo.base.size; struct kfd_bo_va_list *entry; struct bo_vm_reservation_context ctx; @@ -1497,7 +1510,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( mutex_lock(&mem->lock); - ret = reserve_bo_and_cond_vms(mem, vm, BO_VM_MAPPED, &ctx); + ret = reserve_bo_and_cond_vms(mem, avm, BO_VM_MAPPED, &ctx); if (unlikely(ret)) goto out; /* If no VMs were reserved, it means the BO wasn't actually mapped */ @@ -1506,17 +1519,17 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( goto unreserve_out; } - ret = vm_validate_pt_pd_bos((struct amdgpu_vm *)vm); + ret = vm_validate_pt_pd_bos(avm); if (unlikely(ret)) goto unreserve_out; pr_debug("Unmap VA 0x%llx - 0x%llx from vm %p\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue), - vm); + avm); list_for_each_entry(entry, &mem->bo_va_list, bo_list) { - if (entry->bo_va->base.vm == vm && entry->is_mapped) { + if (entry->bo_va->base.vm == avm && entry->is_mapped) { pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", entry->va, entry->va + bo_size, @@ -1642,14 +1655,14 @@ int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd, int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, struct dma_buf *dma_buf, - uint64_t va, void *vm, + uint64_t va, void *drm_priv, struct kgd_mem **mem, uint64_t *size, uint64_t *mmap_offset) { struct amdgpu_device *adev = (struct amdgpu_device *)kgd; + struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct drm_gem_object *obj; struct amdgpu_bo *bo; - struct amdgpu_vm *avm = (struct amdgpu_vm *)vm; if (dma_buf->ops != &amdgpu_dmabuf_ops) /* Can't handle non-graphics buffers */ -- cgit v1.2.3 From d4ec4bdc0bd5ad352854473ba4dcbdb39fd5bfdd Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 7 Apr 2021 18:46:26 -0400 Subject: drm/amdkfd: Allow access for mmapping KFD BOs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DRM render node file handles are used for CPU mapping of BOs using mmap by the Thunk. It uses the DRM render node of the GPU where the BO was allocated. DRM allows mmap access automatically when it creates a GEM handle for a BO. KFD BOs don't have GEM handles, so KFD needs to manage access manually. Use drm_vma_node_allow to allow user mode to mmap BOs allocated with kfd_ioctl_alloc_memory_of_gpu through the DRM render node that was used in the kfd_ioctl_acquire_vm call for the same GPU. Signed-off-by: Felix Kuehling Acked-by: Christian König Reviewed-by: Philip Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 18 +++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 8 +++++--- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 9 ++++++--- 4 files changed, 30 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 0d59bebd92af..7c8c5e469707 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -245,7 +245,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( void *drm_priv, struct kgd_mem **mem, uint64_t *offset, uint32_t flags); int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size); + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + uint64_t *size); int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index dc86faa03b88..f96c331c9b6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1229,6 +1229,11 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( domain_string(alloc_domain), ret); goto err_bo_create; } + ret = drm_vma_node_allow(&gobj->vma_node, drm_priv); + if (ret) { + pr_debug("Failed to allow vma node access. ret %d\n", ret); + goto err_node_allow; + } bo = gem_to_amdgpu_bo(gobj); if (bo_type == ttm_bo_type_sg) { bo->tbo.sg = sg; @@ -1258,6 +1263,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( allocate_init_user_pages_failed: remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); + drm_vma_node_revoke(&gobj->vma_node, drm_priv); +err_node_allow: amdgpu_bo_unref(&bo); /* Don't unreserve system mem limit twice */ goto err_reserve_limit; @@ -1275,7 +1282,8 @@ err: } int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size) + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, + uint64_t *size) { struct amdkfd_process_info *process_info = mem->process_info; unsigned long bo_size = mem->bo->tbo.base.size; @@ -1352,6 +1360,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } /* Free the BO*/ + drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); drm_gem_object_put(&mem->bo->tbo.base); mutex_destroy(&mem->lock); kfree(mem); @@ -1663,6 +1672,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct drm_gem_object *obj; struct amdgpu_bo *bo; + int ret; if (dma_buf->ops != &amdgpu_dmabuf_ops) /* Can't handle non-graphics buffers */ @@ -1683,6 +1693,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, if (!*mem) return -ENOMEM; + ret = drm_vma_node_allow(&obj->vma_node, drm_priv); + if (ret) { + kfree(mem); + return ret; + } + if (size) *size = amdgpu_bo_size(bo); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 97da1632f504..242e8b28feac 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1328,7 +1328,8 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, return 0; err_free: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL); + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, + pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); return err; @@ -1365,7 +1366,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, } ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, - (struct kgd_mem *)mem, &size); + (struct kgd_mem *)mem, pdd->drm_priv, &size); /* If freeing the buffer failed, leave the handle in place for * clean-up during process tear-down. @@ -1721,7 +1722,8 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, return 0; err_free: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL); + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, + pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); dma_buf_put(dmabuf); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 209e9edd6ddd..0a5379439642 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -648,7 +648,8 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem, struct kfd_dev *dev = pdd->dev; amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv); - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL); + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv, + NULL); } /* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process @@ -712,7 +713,8 @@ sync_memory_failed: return err; err_map_mem: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL); + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, pdd->drm_priv, + NULL); err_alloc_mem: *kptr = NULL; return err; @@ -907,7 +909,8 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd) peer_pdd->dev->kgd, mem, peer_pdd->drm_priv); } - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL); + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, + pdd->drm_priv, NULL); kfd_process_device_remove_obj_handle(pdd, id); } } -- cgit v1.2.3 From f80fe9d3c1149bb2ad0d7807aaaf5eff7c4e80a6 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 24 Feb 2021 18:29:06 -0500 Subject: drm/amdkfd: map svm range to GPUs Use amdgpu_vm_bo_update_mapping to update GPU page table to map or unmap svm range system memory pages address to GPUs. Signed-off-by: Philip Yang Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 4 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 7 - drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 473 ++++++++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 4 + 4 files changed, 478 insertions(+), 10 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 7c8c5e469707..670e9f8d58b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -234,6 +234,10 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s }) /* GPUVM API */ +#define drm_priv_to_vm(drm_priv) \ + (&((struct amdgpu_fpriv *) \ + ((struct drm_file *)(drm_priv))->driver_priv)->vm) + int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd, struct file *filp, u32 pasid, void **process_info, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index f96c331c9b6d..618614fe3eb4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -948,13 +948,6 @@ static int process_update_pds(struct amdkfd_process_info *process_info, return 0; } -static struct amdgpu_vm *drm_priv_to_vm(struct drm_file *drm_priv) -{ - struct amdgpu_fpriv *fpriv = drm_priv->driver_priv; - - return &fpriv->vm; -} - static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, struct dma_fence **ef) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 72d54cf4acc8..3ccb75d45f13 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -99,11 +99,123 @@ static void svm_range_remove_notifier(struct svm_range *prange) mmu_interval_notifier_remove(&prange->notifier); } +static int +svm_range_dma_map_dev(struct device *dev, dma_addr_t **dma_addr, + unsigned long *hmm_pfns, uint64_t npages) +{ + enum dma_data_direction dir = DMA_BIDIRECTIONAL; + dma_addr_t *addr = *dma_addr; + struct page *page; + int i, r; + + if (!addr) { + addr = kvmalloc_array(npages, sizeof(*addr), + GFP_KERNEL | __GFP_ZERO); + if (!addr) + return -ENOMEM; + *dma_addr = addr; + } + + for (i = 0; i < npages; i++) { + if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]), + "leaking dma mapping\n")) + dma_unmap_page(dev, addr[i], PAGE_SIZE, dir); + + page = hmm_pfn_to_page(hmm_pfns[i]); + addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); + r = dma_mapping_error(dev, addr[i]); + if (r) { + pr_debug("failed %d dma_map_page\n", r); + return r; + } + pr_debug("dma mapping 0x%llx for page addr 0x%lx\n", + addr[i] >> PAGE_SHIFT, page_to_pfn(page)); + } + return 0; +} + +static int +svm_range_dma_map(struct svm_range *prange, unsigned long *bitmap, + unsigned long *hmm_pfns) +{ + struct kfd_process *p; + uint32_t gpuidx; + int r; + + p = container_of(prange->svms, struct kfd_process, svms); + + for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { + struct kfd_process_device *pdd; + struct amdgpu_device *adev; + + pr_debug("mapping to gpu idx 0x%x\n", gpuidx); + pdd = kfd_process_device_from_gpuidx(p, gpuidx); + if (!pdd) { + pr_debug("failed to find device idx %d\n", gpuidx); + return -EINVAL; + } + adev = (struct amdgpu_device *)pdd->dev->kgd; + + r = svm_range_dma_map_dev(adev->dev, &prange->dma_addr[gpuidx], + hmm_pfns, prange->npages); + if (r) + break; + } + + return r; +} + +static void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr, + unsigned long offset, unsigned long npages) +{ + enum dma_data_direction dir = DMA_BIDIRECTIONAL; + int i; + + if (!dma_addr) + return; + + for (i = offset; i < offset + npages; i++) { + if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i])) + continue; + pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT); + dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); + dma_addr[i] = 0; + } +} + +static void svm_range_free_dma_mappings(struct svm_range *prange) +{ + struct kfd_process_device *pdd; + dma_addr_t *dma_addr; + struct device *dev; + struct kfd_process *p; + uint32_t gpuidx; + + p = container_of(prange->svms, struct kfd_process, svms); + + for (gpuidx = 0; gpuidx < MAX_GPU_INSTANCE; gpuidx++) { + dma_addr = prange->dma_addr[gpuidx]; + if (!dma_addr) + continue; + + pdd = kfd_process_device_from_gpuidx(p, gpuidx); + if (!pdd) { + pr_debug("failed to find device idx %d\n", gpuidx); + continue; + } + dev = &pdd->dev->pdev->dev; + svm_range_dma_unmap(dev, dma_addr, 0, prange->npages); + kvfree(dma_addr); + prange->dma_addr[gpuidx] = NULL; + } +} + static void svm_range_free(struct svm_range *prange) { pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms, prange, prange->start, prange->last); + svm_range_free_dma_mappings(prange); mutex_destroy(&prange->lock); kfree(prange); } @@ -149,6 +261,15 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, return prange; } +static int svm_range_bo_validate(void *param, struct amdgpu_bo *bo) +{ + struct ttm_operation_ctx ctx = { false, false }; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); + + return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); +} + static int svm_range_check_attr(struct kfd_process *p, uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) @@ -291,6 +412,61 @@ svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new) old->granularity == new->granularity); } +static int +svm_range_split_array(void *ppnew, void *ppold, size_t size, + uint64_t old_start, uint64_t old_n, + uint64_t new_start, uint64_t new_n) +{ + unsigned char *new, *old, *pold; + uint64_t d; + + if (!ppold) + return 0; + pold = *(unsigned char **)ppold; + if (!pold) + return 0; + + new = kvmalloc_array(new_n, size, GFP_KERNEL); + if (!new) + return -ENOMEM; + + d = (new_start - old_start) * size; + memcpy(new, pold + d, new_n * size); + + old = kvmalloc_array(old_n, size, GFP_KERNEL); + if (!old) { + kvfree(new); + return -ENOMEM; + } + + d = (new_start == old_start) ? new_n * size : 0; + memcpy(old, pold + d, old_n * size); + + kvfree(pold); + *(void **)ppold = old; + *(void **)ppnew = new; + + return 0; +} + +static int +svm_range_split_pages(struct svm_range *new, struct svm_range *old, + uint64_t start, uint64_t last) +{ + uint64_t npages = last - start + 1; + int i, r; + + for (i = 0; i < MAX_GPU_INSTANCE; i++) { + r = svm_range_split_array(&new->dma_addr[i], &old->dma_addr[i], + sizeof(*old->dma_addr[i]), old->start, + npages, new->start, new->npages); + if (r) + return r; + } + + return 0; +} + /** * svm_range_split_adjust - split range and adjust * @@ -299,7 +475,7 @@ svm_range_is_same_attrs(struct svm_range *old, struct svm_range *new) * @start: the old range adjust to start address in pages * @last: the old range adjust to last address in pages * - * Copy attributes in old range to new + * Copy system memory dma_addr in old range to new * range from new_start up to size new->npages, the remaining old range is from * start to last * @@ -310,6 +486,8 @@ static int svm_range_split_adjust(struct svm_range *new, struct svm_range *old, uint64_t start, uint64_t last) { + int r; + pr_debug("svms 0x%p new 0x%lx old [0x%lx 0x%lx] => [0x%llx 0x%llx]\n", new->svms, new->start, old->start, old->last, start, last); @@ -319,6 +497,10 @@ svm_range_split_adjust(struct svm_range *new, struct svm_range *old, return -EINVAL; } + r = svm_range_split_pages(new, old, start, last); + if (r) + return r; + old->npages = last - start + 1; old->start = start; old->last = last; @@ -425,6 +607,249 @@ svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, list_add_tail(&pchild->child_list, &prange->child_list); } +static uint64_t +svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange) +{ + uint32_t flags = prange->flags; + uint32_t mapping_flags; + uint64_t pte_flags; + + pte_flags = AMDGPU_PTE_VALID; + pte_flags |= AMDGPU_PTE_SYSTEM | AMDGPU_PTE_SNOOPED; + + mapping_flags = AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE; + + if (flags & KFD_IOCTL_SVM_FLAG_GPU_RO) + mapping_flags &= ~AMDGPU_VM_PAGE_WRITEABLE; + if (flags & KFD_IOCTL_SVM_FLAG_GPU_EXEC) + mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; + if (flags & KFD_IOCTL_SVM_FLAG_COHERENT) + mapping_flags |= AMDGPU_VM_MTYPE_UC; + else + mapping_flags |= AMDGPU_VM_MTYPE_NC; + + /* TODO: add CHIP_ARCTURUS new flags for vram mapping */ + + pte_flags |= amdgpu_gem_va_map_flags(adev, mapping_flags); + + /* Apply ASIC specific mapping flags */ + amdgpu_gmc_get_vm_pte(adev, &prange->mapping, &pte_flags); + + pr_debug("PTE flags 0x%llx\n", pte_flags); + + return pte_flags; +} + +static int +svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, + uint64_t start, uint64_t last, + struct dma_fence **fence) +{ + uint64_t init_pte_value = 0; + + pr_debug("[0x%llx 0x%llx]\n", start, last); + + return amdgpu_vm_bo_update_mapping(adev, adev, vm, false, true, NULL, + start, last, init_pte_value, 0, + NULL, NULL, fence); +} + +static int +svm_range_unmap_from_gpus(struct svm_range *prange, unsigned long start, + unsigned long last) +{ + DECLARE_BITMAP(bitmap, MAX_GPU_INSTANCE); + struct kfd_process_device *pdd; + struct dma_fence *fence = NULL; + struct amdgpu_device *adev; + struct kfd_process *p; + uint32_t gpuidx; + int r = 0; + + bitmap_or(bitmap, prange->bitmap_access, prange->bitmap_aip, + MAX_GPU_INSTANCE); + p = container_of(prange->svms, struct kfd_process, svms); + + for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { + pr_debug("unmap from gpu idx 0x%x\n", gpuidx); + pdd = kfd_process_device_from_gpuidx(p, gpuidx); + if (!pdd) { + pr_debug("failed to find device idx %d\n", gpuidx); + return -EINVAL; + } + adev = (struct amdgpu_device *)pdd->dev->kgd; + + r = svm_range_unmap_from_gpu(adev, drm_priv_to_vm(pdd->drm_priv), + start, last, &fence); + if (r) + break; + + if (fence) { + r = dma_fence_wait(fence, false); + dma_fence_put(fence); + fence = NULL; + if (r) + break; + } + amdgpu_amdkfd_flush_gpu_tlb_pasid((struct kgd_dev *)adev, + p->pasid); + } + + return r; +} + +static int +svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct svm_range *prange, dma_addr_t *dma_addr, + struct dma_fence **fence) +{ + uint64_t pte_flags; + int r = 0; + + pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, + prange->last); + + prange->mapping.start = prange->start; + prange->mapping.last = prange->last; + prange->mapping.offset = 0; + pte_flags = svm_range_get_pte_flags(adev, prange); + + r = amdgpu_vm_bo_update_mapping(adev, adev, vm, false, false, NULL, + prange->mapping.start, + prange->mapping.last, pte_flags, + prange->mapping.offset, NULL, + dma_addr, &vm->last_update); + if (r) { + pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start); + goto out; + } + + r = amdgpu_vm_update_pdes(adev, vm, false); + if (r) { + pr_debug("failed %d to update directories 0x%lx\n", r, + prange->start); + goto out; + } + + if (fence) + *fence = dma_fence_get(vm->last_update); + +out: + return r; +} + +static int svm_range_map_to_gpus(struct svm_range *prange, + unsigned long *bitmap, bool wait) +{ + struct kfd_process_device *pdd; + struct amdgpu_device *adev; + struct kfd_process *p; + struct dma_fence *fence = NULL; + uint32_t gpuidx; + int r = 0; + + p = container_of(prange->svms, struct kfd_process, svms); + for_each_set_bit(gpuidx, bitmap, MAX_GPU_INSTANCE) { + pdd = kfd_process_device_from_gpuidx(p, gpuidx); + if (!pdd) { + pr_debug("failed to find device idx %d\n", gpuidx); + return -EINVAL; + } + adev = (struct amdgpu_device *)pdd->dev->kgd; + + pdd = kfd_bind_process_to_device(pdd->dev, p); + if (IS_ERR(pdd)) + return -EINVAL; + + r = svm_range_map_to_gpu(adev, drm_priv_to_vm(pdd->drm_priv), + prange, prange->dma_addr[gpuidx], + wait ? &fence : NULL); + if (r) + break; + + if (fence) { + r = dma_fence_wait(fence, false); + dma_fence_put(fence); + fence = NULL; + if (r) { + pr_debug("failed %d to dma fence wait\n", r); + break; + } + } + } + + return r; +} + +struct svm_validate_context { + struct kfd_process *process; + struct svm_range *prange; + bool intr; + unsigned long bitmap[MAX_GPU_INSTANCE]; + struct ttm_validate_buffer tv[MAX_GPU_INSTANCE+1]; + struct list_head validate_list; + struct ww_acquire_ctx ticket; +}; + +static int svm_range_reserve_bos(struct svm_validate_context *ctx) +{ + struct kfd_process_device *pdd; + struct amdgpu_device *adev; + struct amdgpu_vm *vm; + uint32_t gpuidx; + int r; + + INIT_LIST_HEAD(&ctx->validate_list); + for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { + pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); + if (!pdd) { + pr_debug("failed to find device idx %d\n", gpuidx); + return -EINVAL; + } + adev = (struct amdgpu_device *)pdd->dev->kgd; + vm = drm_priv_to_vm(pdd->drm_priv); + + ctx->tv[gpuidx].bo = &vm->root.base.bo->tbo; + ctx->tv[gpuidx].num_shared = 4; + list_add(&ctx->tv[gpuidx].head, &ctx->validate_list); + } + + r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list, + ctx->intr, NULL); + if (r) { + pr_debug("failed %d to reserve bo\n", r); + return r; + } + + for_each_set_bit(gpuidx, ctx->bitmap, MAX_GPU_INSTANCE) { + pdd = kfd_process_device_from_gpuidx(ctx->process, gpuidx); + if (!pdd) { + pr_debug("failed to find device idx %d\n", gpuidx); + r = -EINVAL; + goto unreserve_out; + } + adev = (struct amdgpu_device *)pdd->dev->kgd; + + r = amdgpu_vm_validate_pt_bos(adev, drm_priv_to_vm(pdd->drm_priv), + svm_range_bo_validate, NULL); + if (r) { + pr_debug("failed %d validate pt bos\n", r); + goto unreserve_out; + } + } + + return 0; + +unreserve_out: + ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); + return r; +} + +static void svm_range_unreserve_bos(struct svm_validate_context *ctx) +{ + ttm_eu_backoff_reservation(&ctx->ticket, &ctx->validate_list); +} + /* * Validation+GPU mapping with concurrent invalidation (MMU notifiers) * @@ -453,9 +878,27 @@ static int svm_range_validate_and_map(struct mm_struct *mm, struct svm_range *prange, uint32_t gpuidx, bool intr, bool wait) { + struct svm_validate_context ctx; struct hmm_range *hmm_range; int r = 0; + ctx.process = container_of(prange->svms, struct kfd_process, svms); + ctx.prange = prange; + ctx.intr = intr; + + if (gpuidx < MAX_GPU_INSTANCE) { + bitmap_zero(ctx.bitmap, MAX_GPU_INSTANCE); + bitmap_set(ctx.bitmap, gpuidx, 1); + } else { + bitmap_or(ctx.bitmap, prange->bitmap_access, + prange->bitmap_aip, MAX_GPU_INSTANCE); + } + + if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) + return 0; + + svm_range_reserve_bos(&ctx); + if (!prange->actual_loc) { r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, prange->start << PAGE_SHIFT, @@ -465,6 +908,13 @@ static int svm_range_validate_and_map(struct mm_struct *mm, pr_debug("failed %d to get svm range pages\n", r); goto unreserve_out; } + + r = svm_range_dma_map(prange, ctx.bitmap, + hmm_range->hmm_pfns); + if (r) { + pr_debug("failed %d to dma map range\n", r); + goto unreserve_out; + } } svm_range_lock(prange); @@ -474,12 +924,17 @@ static int svm_range_validate_and_map(struct mm_struct *mm, goto unlock_out; } } + if (!list_empty(&prange->child_list)) { + r = -EAGAIN; + goto unlock_out; + } - /* TODO: map to GPU */ + r = svm_range_map_to_gpus(prange, ctx.bitmap, wait); unlock_out: svm_range_unlock(prange); unreserve_out: + svm_range_unreserve_bos(&ctx); return r; } @@ -834,6 +1289,7 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, struct svm_range_list *svms; struct svm_range *pchild; struct kfd_process *p; + unsigned long s, l; bool unmap_parent; p = kfd_lookup_process_by_mm(mm); @@ -846,8 +1302,19 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, unmap_parent = start <= prange->start && last >= prange->last; - list_for_each_entry(pchild, &prange->child_list, child_list) + list_for_each_entry(pchild, &prange->child_list, child_list) { + mutex_lock_nested(&pchild->lock, 1); + s = max(start, pchild->start); + l = min(last, pchild->last); + if (l >= s) + svm_range_unmap_from_gpus(pchild, s, l); svm_range_unmap_split(mm, prange, pchild, start, last); + mutex_unlock(&pchild->lock); + } + s = max(start, prange->start); + l = min(last, prange->last); + if (l >= s) + svm_range_unmap_from_gpus(prange, s, l); svm_range_unmap_split(mm, prange, prange, start, last); if (unmap_parent) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 173e93e138a9..5949890bf48c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -57,7 +57,9 @@ struct svm_work_list_item { * @update_list:link list node used to add to update_list * @remove_list:link list node used to add to remove list * @insert_list:link list node used to add to insert list + * @mapping: bo_va mapping structure to create and update GPU page table * @npages: number of pages + * @dma_addr: dma mapping address on each GPU for system memory physical page * @lock: protect prange start, last, child_list, svm_bo_list * @saved_flags:save/restore current PF_MEMALLOC flags * @flags: flags defined as KFD_IOCTL_SVM_FLAG_* @@ -85,7 +87,9 @@ struct svm_range { struct list_head update_list; struct list_head remove_list; struct list_head insert_list; + struct amdgpu_bo_va_mapping mapping; uint64_t npages; + dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; struct mutex lock; unsigned int saved_flags; uint32_t flags; -- cgit v1.2.3 From c46ebb6a6d9d28fce66595b56db070b0cc4fab71 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 15 Apr 2021 17:43:32 -0400 Subject: drm/amdkfd: set memory limit to avoid OOM with HMM enabled HMM migration alloc sizeof(struct page) on system memory for each VRAM page, it is 1GB system memory reserved for 64GB VRAM. To avoid application OOM, increase system memory used size based on VRAM size of all GPUs, then application alloc memory will fail if system memory usage reach the limit. Signed-off-by: Philip Yang Reviewed-by: Oak Zeng Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 5 +++++ drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 ++++++++ 3 files changed, 14 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 670e9f8d58b4..b3e5ecec316c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -275,6 +275,7 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); +void amdgpu_amdkfd_reserve_system_mem(uint64_t size); #else static inline void amdgpu_amdkfd_gpuvm_init_mem_limits(void) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 618614fe3eb4..d03088d1eb60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -108,6 +108,11 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void) (kfd_mem_limit.max_ttm_mem_limit >> 20)); } +void amdgpu_amdkfd_reserve_system_mem(uint64_t size) +{ + kfd_mem_limit.system_mem_used += size; +} + /* Estimate page table size needed to represent a given memory size * * With 4KB pages, we need one 8 byte PTE for each 4KB of memory diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index b32d8fdfe8e9..d8cec5ebe1d4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -57,6 +57,9 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { .migrate_to_ram = svm_migrate_to_ram, }; +/* Each VRAM page uses sizeof(struct page) on system memory */ +#define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) + int svm_migrate_init(struct amdgpu_device *adev) { struct kfd_dev *kfddev = adev->kfd.dev; @@ -93,6 +96,11 @@ int svm_migrate_init(struct amdgpu_device *adev) return PTR_ERR(r); } + pr_debug("reserve %ldMB system memory for VRAM pages struct\n", + SVM_HMM_PAGE_STRUCT_SIZE(size) >> 20); + + amdgpu_amdkfd_reserve_system_mem(SVM_HMM_PAGE_STRUCT_SIZE(size)); + pr_info("HMM registered %ldMB device memory\n", size >> 20); return 0; -- cgit v1.2.3 From eb2cec5537bbab46a319b4b0bcd71c320c382d2b Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Tue, 28 Jul 2020 13:35:32 -0500 Subject: drm/amdkfd: add svm_bo reference for eviction fence [why] As part of the SVM functionality, the eviction mechanism used for SVM_BOs is different. This mechanism uses one eviction fence per prange, instead of one fence per kfd_process. [how] A svm_bo reference to amdgpu_amdkfd_fence to allow differentiate between SVM_BO or regular BO evictions. This also include modifications to set the reference at the fence creation call. Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 4 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 6 ++++-- 3 files changed, 10 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index b3e5ecec316c..313ee49b9f17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -75,6 +75,7 @@ struct amdgpu_amdkfd_fence { struct mm_struct *mm; spinlock_t lock; char timeline_name[TASK_COMM_LEN]; + struct svm_range_bo *svm_bo; }; struct amdgpu_kfd_dev { @@ -148,7 +149,8 @@ int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int queue_bit); struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, - struct mm_struct *mm); + struct mm_struct *mm, + struct svm_range_bo *svm_bo); #if IS_ENABLED(CONFIG_HSA_AMD) bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c index 5af464933976..53559643c712 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c @@ -60,7 +60,8 @@ static atomic_t fence_seq = ATOMIC_INIT(0); */ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, - struct mm_struct *mm) + struct mm_struct *mm, + struct svm_range_bo *svm_bo) { struct amdgpu_amdkfd_fence *fence; @@ -73,7 +74,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, fence->mm = mm; get_task_comm(fence->timeline_name, current); spin_lock_init(&fence->lock); - + fence->svm_bo = svm_bo; dma_fence_init(&fence->base, &amdkfd_fence_ops, &fence->lock, context, atomic_inc_return(&fence_seq)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index d03088d1eb60..6c4df1f1796e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -972,7 +972,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, info->eviction_fence = amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1), - current->mm); + current->mm, + NULL); if (!info->eviction_fence) { pr_err("Failed to create eviction fence\n"); ret = -ENOMEM; @@ -2162,7 +2163,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) */ new_fence = amdgpu_amdkfd_fence_create( process_info->eviction_fence->base.context, - process_info->eviction_fence->mm); + process_info->eviction_fence->mm, + NULL); if (!new_fence) { pr_err("Failed to create eviction fence\n"); ret = -ENOMEM; -- cgit v1.2.3 From f04c79cfba7e01db060d17c8d46f23cf8e02845a Mon Sep 17 00:00:00 2001 From: Alex Sierra Date: Tue, 28 Jul 2020 13:38:29 -0500 Subject: drm/amdgpu: add param bit flag to create SVM BOs Add CREATE_SVM_BO define bit for SVM BOs. Another define flag was moved to concentrate these KFD type flags in one include file. Signed-off-by: Alex Sierra Reviewed-by: Felix Kuehling Signed-off-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 7 ++----- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 4 ++++ 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 6c4df1f1796e..dfa025d694f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -33,9 +33,6 @@ #include #include "amdgpu_xgmi.h" -/* BO flag to indicate a KFD userptr BO */ -#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) - /* Userptr restore delay, just long enough to allow consecutive VM * changes to accumulate */ @@ -222,7 +219,7 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo) u32 domain = bo->preferred_domains; bool sg = (bo->preferred_domains == AMDGPU_GEM_DOMAIN_CPU); - if (bo->flags & AMDGPU_AMDKFD_USERPTR_BO) { + if (bo->flags & AMDGPU_AMDKFD_CREATE_USERPTR_BO) { domain = AMDGPU_GEM_DOMAIN_CPU; sg = false; } @@ -1241,7 +1238,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( bo->kfd_bo = *mem; (*mem)->bo = bo; if (user_addr) - bo->flags |= AMDGPU_AMDKFD_USERPTR_BO; + bo->flags |= AMDGPU_AMDKFD_CREATE_USERPTR_BO; (*mem)->va = va; (*mem)->domain = domain; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 2d1fefbe1e99..973c88bdf37b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -37,6 +37,10 @@ #define AMDGPU_BO_INVALID_OFFSET LONG_MAX #define AMDGPU_BO_MAX_PLACEMENTS 3 +/* BO flag to indicate a KFD userptr BO */ +#define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63) +#define AMDGPU_AMDKFD_CREATE_SVM_BO (1ULL << 62) + #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo) struct amdgpu_bo_param { -- cgit v1.2.3 From 0c6f7777cf37b84839d556a41b9dbeedccfa64d4 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Mon, 10 May 2021 18:37:56 -0400 Subject: drm/amdgpu: Arcturus: MTYPE_NC for coarse-grain remote memory MTYPE UC was used for a specific use case that ended up not being implemented. Use NC for better performance for coarse-grained memory where cache coherence during shader execution is not required. Signed-off-by: Felix Kuehling Reviewed-by: Oak Zeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index dfa025d694f8..941a204d28f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -433,7 +433,8 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; else - mapping_flags |= AMDGPU_VM_MTYPE_UC; + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; } else { mapping_flags |= coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index b665e9ff77e3..12c827854d1d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1022,7 +1022,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange) mapping_flags |= coherent ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; } else { - mapping_flags |= AMDGPU_VM_MTYPE_UC; + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; if (amdgpu_xgmi_same_hive(adev, bo_adev)) snoop = true; } -- cgit v1.2.3 From 2b2339eeaff597778b042bf0010b87bac33715e2 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Mon, 10 May 2021 18:50:11 -0400 Subject: drm/amdgpu: Albebaran: MTYPE_NC for coarse-grain remote memory MTYPE UC was used for a specific use case that ended up not being implemented. Use NC for better performance for coarse-grained memory where cache coherence during shader execution is not required. Signed-off-by: Felix Kuehling Reviewed-by: Oak Zeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 941a204d28f5..93ee569f6a63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -453,7 +453,8 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) if (adev->gmc.xgmi.connected_to_cpu) snoop = true; } else { - mapping_flags |= AMDGPU_VM_MTYPE_UC; + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; if (amdgpu_xgmi_same_hive(adev, bo_adev)) snoop = true; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 12c827854d1d..3886c630286b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1040,7 +1040,8 @@ svm_range_get_pte_flags(struct amdgpu_device *adev, struct svm_range *prange) if (adev->gmc.xgmi.connected_to_cpu) snoop = true; } else { - mapping_flags |= AMDGPU_VM_MTYPE_UC; + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; if (amdgpu_xgmi_same_hive(adev, bo_adev)) snoop = true; } -- cgit v1.2.3 From c780b2eedbd0ddc9f594379fd39281100693fd3d Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 8 Apr 2021 19:56:12 -0400 Subject: drm/amdgpu: Rename kfd_bo_va_list to kfd_mem_attachment This name is more fitting, especially for the changes coming next to support multi-GPU systems with proper DMA mappings. Cleaned up the code and renamed some related functions and variables to improve readability. Signed-off-by: Felix Kuehling Acked-by: Oak Zeng Acked-by: Ramesh Errabolu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 209 +++++++++++------------ 2 files changed, 104 insertions(+), 113 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index f4de50de359b..1675cd08ec16 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -44,10 +44,10 @@ enum TLB_FLUSH_TYPE { struct amdgpu_device; -struct kfd_bo_va_list { - struct list_head bo_list; +struct kfd_mem_attachment { + struct list_head list; struct amdgpu_bo_va *bo_va; - void *kgd_dev; + struct amdgpu_device *adev; bool is_mapped; uint64_t va; uint64_t pte_flags; @@ -56,7 +56,7 @@ struct kfd_bo_va_list { struct kgd_mem { struct mutex lock; struct amdgpu_bo *bo; - struct list_head bo_va_list; + struct list_head attachments; /* protected by amdkfd_process_info.lock */ struct ttm_validate_buffer validate_list; struct ttm_validate_buffer resv_list; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 93ee569f6a63..7008edd3f622 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -72,16 +72,16 @@ static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) return (struct amdgpu_device *)kgd; } -static bool check_if_add_bo_to_vm(struct amdgpu_vm *avm, +static bool kfd_mem_is_attached(struct amdgpu_vm *avm, struct kgd_mem *mem) { - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; - list_for_each_entry(entry, &mem->bo_va_list, bo_list) + list_for_each_entry(entry, &mem->attachments, list) if (entry->bo_va->base.vm == avm) - return false; + return true; - return true; + return false; } /* Set memory usage limits. Current, limits are @@ -475,7 +475,7 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) return pte_flags; } -/* add_bo_to_vm - Add a BO to a VM +/* kfd_mem_attach - Add a BO to a VM * * Everything that needs to bo done only once when a BO is first added * to a VM. It can later be mapped and unmapped many times without @@ -487,15 +487,14 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) * 4. Alloc page tables and directories if needed * 4a. Validate new page tables and directories */ -static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, +static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_vm *vm, bool is_aql, - struct kfd_bo_va_list **p_bo_va_entry) + struct kfd_mem_attachment **p_attachment) { int ret; - struct kfd_bo_va_list *bo_va_entry; + struct kfd_mem_attachment *attachment; struct amdgpu_bo *bo = mem->bo; uint64_t va = mem->va; - struct list_head *list_bo_va = &mem->bo_va_list; unsigned long bo_size = bo->tbo.base.size; if (!va) { @@ -506,29 +505,29 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, if (is_aql) va += bo_size; - bo_va_entry = kzalloc(sizeof(*bo_va_entry), GFP_KERNEL); - if (!bo_va_entry) + attachment = kzalloc(sizeof(*attachment), GFP_KERNEL); + if (!attachment) return -ENOMEM; pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, va + bo_size, vm); /* Add BO to VM internal data structures*/ - bo_va_entry->bo_va = amdgpu_vm_bo_add(adev, vm, bo); - if (!bo_va_entry->bo_va) { + attachment->bo_va = amdgpu_vm_bo_add(adev, vm, bo); + if (!attachment->bo_va) { ret = -EINVAL; pr_err("Failed to add BO object to VM. ret == %d\n", ret); goto err_vmadd; } - bo_va_entry->va = va; - bo_va_entry->pte_flags = get_pte_flags(adev, mem); - bo_va_entry->kgd_dev = (void *)adev; - list_add(&bo_va_entry->bo_list, list_bo_va); + attachment->va = va; + attachment->pte_flags = get_pte_flags(adev, mem); + attachment->adev = adev; + list_add(&attachment->list, &mem->attachments); - if (p_bo_va_entry) - *p_bo_va_entry = bo_va_entry; + if (p_attachment) + *p_attachment = attachment; /* Allocate validate page tables if needed */ ret = vm_validate_pt_pd_bos(vm); @@ -540,22 +539,20 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, return 0; err_alloc_pts: - amdgpu_vm_bo_rmv(adev, bo_va_entry->bo_va); - list_del(&bo_va_entry->bo_list); + amdgpu_vm_bo_rmv(adev, attachment->bo_va); + list_del(&attachment->list); err_vmadd: - kfree(bo_va_entry); + kfree(attachment); return ret; } -static void remove_bo_from_vm(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, unsigned long size) +static void kfd_mem_detach(struct kfd_mem_attachment *attachment) { - pr_debug("\t remove VA 0x%llx - 0x%llx in entry %p\n", - entry->va, - entry->va + size, entry); - amdgpu_vm_bo_rmv(adev, entry->bo_va); - list_del(&entry->bo_list); - kfree(entry); + pr_debug("\t remove VA 0x%llx in entry %p\n", + attachment->va, attachment); + amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va); + list_del(&attachment->list); + kfree(attachment); } static void add_kgd_mem_to_kfd_bo_list(struct kgd_mem *mem, @@ -730,7 +727,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, struct bo_vm_reservation_context *ctx) { struct amdgpu_bo *bo = mem->bo; - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; unsigned int i; int ret; @@ -742,7 +739,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, INIT_LIST_HEAD(&ctx->list); INIT_LIST_HEAD(&ctx->duplicates); - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { + list_for_each_entry(entry, &mem->attachments, list) { if ((vm && vm != entry->bo_va->base.vm) || (entry->is_mapped != map_type && map_type != BO_VM_ALL)) @@ -764,7 +761,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, list_add(&ctx->kfd_bo.tv.head, &ctx->list); i = 0; - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { + list_for_each_entry(entry, &mem->attachments, list) { if ((vm && vm != entry->bo_va->base.vm) || (entry->is_mapped != map_type && map_type != BO_VM_ALL)) @@ -819,7 +816,7 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, } static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, + struct kfd_mem_attachment *entry, struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = entry->bo_va; @@ -835,7 +832,7 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, } static int update_gpuvm_pte(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, + struct kfd_mem_attachment *entry, struct amdgpu_sync *sync) { int ret; @@ -852,7 +849,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev, } static int map_bo_to_gpuvm(struct amdgpu_device *adev, - struct kfd_bo_va_list *entry, struct amdgpu_sync *sync, + struct kfd_mem_attachment *entry, struct amdgpu_sync *sync, bool no_update_pte) { int ret; @@ -1196,7 +1193,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( ret = -ENOMEM; goto err; } - INIT_LIST_HEAD(&(*mem)->bo_va_list); + INIT_LIST_HEAD(&(*mem)->attachments); mutex_init(&(*mem)->lock); (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM); @@ -1285,7 +1282,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( { struct amdkfd_process_info *process_info = mem->process_info; unsigned long bo_size = mem->bo->tbo.base.size; - struct kfd_bo_va_list *entry, *tmp; + struct kfd_mem_attachment *entry, *tmp; struct bo_vm_reservation_context ctx; struct ttm_validate_buffer *bo_list_entry; unsigned int mapped_to_gpu_memory; @@ -1329,9 +1326,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( mem->va + bo_size * (1 + mem->aql_queue)); /* Remove from VM internal data structures */ - list_for_each_entry_safe(entry, tmp, &mem->bo_va_list, bo_list) - remove_bo_from_vm((struct amdgpu_device *)entry->kgd_dev, - entry, bo_size); + list_for_each_entry_safe(entry, tmp, &mem->attachments, list) + kfd_mem_detach(entry); ret = unreserve_bo_and_vms(&ctx, false, false); @@ -1374,10 +1370,10 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( int ret; struct amdgpu_bo *bo; uint32_t domain; - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; struct bo_vm_reservation_context ctx; - struct kfd_bo_va_list *bo_va_entry = NULL; - struct kfd_bo_va_list *bo_va_entry_aql = NULL; + struct kfd_mem_attachment *attachment = NULL; + struct kfd_mem_attachment *attachment_aql = NULL; unsigned long bo_size; bool is_invalid_userptr = false; @@ -1426,21 +1422,20 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( bo->tbo.mem.mem_type == TTM_PL_SYSTEM) is_invalid_userptr = true; - if (check_if_add_bo_to_vm(avm, mem)) { - ret = add_bo_to_vm(adev, mem, avm, false, - &bo_va_entry); + if (!kfd_mem_is_attached(avm, mem)) { + ret = kfd_mem_attach(adev, mem, avm, false, &attachment); if (ret) - goto add_bo_to_vm_failed; + goto attach_failed; if (mem->aql_queue) { - ret = add_bo_to_vm(adev, mem, avm, - true, &bo_va_entry_aql); + ret = kfd_mem_attach(adev, mem, avm, true, + &attachment_aql); if (ret) - goto add_bo_to_vm_failed_aql; + goto attach_failed_aql; } } else { ret = vm_validate_pt_pd_bos(avm); if (unlikely(ret)) - goto add_bo_to_vm_failed; + goto attach_failed; } if (mem->mapped_to_gpu_memory == 0 && @@ -1456,30 +1451,30 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( } } - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { - if (entry->bo_va->base.vm == avm && !entry->is_mapped) { - pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", - entry->va, entry->va + bo_size, - entry); + list_for_each_entry(entry, &mem->attachments, list) { + if (entry->bo_va->base.vm != avm || entry->is_mapped) + continue; - ret = map_bo_to_gpuvm(adev, entry, ctx.sync, - is_invalid_userptr); - if (ret) { - pr_err("Failed to map bo to gpuvm\n"); - goto map_bo_to_gpuvm_failed; - } + pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", + entry->va, entry->va + bo_size, entry); - ret = vm_update_pds(avm, ctx.sync); - if (ret) { - pr_err("Failed to update page directories\n"); - goto map_bo_to_gpuvm_failed; - } + ret = map_bo_to_gpuvm(adev, entry, ctx.sync, + is_invalid_userptr); + if (ret) { + pr_err("Failed to map bo to gpuvm\n"); + goto map_bo_to_gpuvm_failed; + } - entry->is_mapped = true; - mem->mapped_to_gpu_memory++; - pr_debug("\t INC mapping count %d\n", - mem->mapped_to_gpu_memory); + ret = vm_update_pds(avm, ctx.sync); + if (ret) { + pr_err("Failed to update page directories\n"); + goto map_bo_to_gpuvm_failed; } + + entry->is_mapped = true; + mem->mapped_to_gpu_memory++; + pr_debug("\t INC mapping count %d\n", + mem->mapped_to_gpu_memory); } if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count) @@ -1491,12 +1486,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( goto out; map_bo_to_gpuvm_failed: - if (bo_va_entry_aql) - remove_bo_from_vm(adev, bo_va_entry_aql, bo_size); -add_bo_to_vm_failed_aql: - if (bo_va_entry) - remove_bo_from_vm(adev, bo_va_entry, bo_size); -add_bo_to_vm_failed: + if (attachment_aql) + kfd_mem_detach(attachment_aql); +attach_failed_aql: + if (attachment) + kfd_mem_detach(attachment); +attach_failed: unreserve_bo_and_vms(&ctx, false, false); out: mutex_unlock(&mem->process_info->lock); @@ -1511,7 +1506,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdkfd_process_info *process_info = avm->process_info; unsigned long bo_size = mem->bo->tbo.base.size; - struct kfd_bo_va_list *entry; + struct kfd_mem_attachment *entry; struct bo_vm_reservation_context ctx; int ret; @@ -1535,26 +1530,24 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( mem->va + bo_size * (1 + mem->aql_queue), avm); - list_for_each_entry(entry, &mem->bo_va_list, bo_list) { - if (entry->bo_va->base.vm == avm && entry->is_mapped) { - pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", - entry->va, - entry->va + bo_size, - entry); + list_for_each_entry(entry, &mem->attachments, list) { + if (entry->bo_va->base.vm != avm || !entry->is_mapped) + continue; - ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); - if (ret == 0) { - entry->is_mapped = false; - } else { - pr_err("failed to unmap VA 0x%llx\n", - mem->va); - goto unreserve_out; - } + pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", + entry->va, entry->va + bo_size, entry); - mem->mapped_to_gpu_memory--; - pr_debug("\t DEC mapping count %d\n", - mem->mapped_to_gpu_memory); + ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); + if (ret == 0) { + entry->is_mapped = false; + } else { + pr_err("failed to unmap VA 0x%llx\n", mem->va); + goto unreserve_out; } + + mem->mapped_to_gpu_memory--; + pr_debug("\t DEC mapping count %d\n", + mem->mapped_to_gpu_memory); } /* If BO is unmapped from all VMs, unfence it. It can be evicted if @@ -1703,7 +1696,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, if (mmap_offset) *mmap_offset = amdgpu_bo_mmap_offset(bo); - INIT_LIST_HEAD(&(*mem)->bo_va_list); + INIT_LIST_HEAD(&(*mem)->attachments); mutex_init(&(*mem)->lock); (*mem)->alloc_flags = @@ -1900,7 +1893,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_inval_list, validate_list.head) { - struct kfd_bo_va_list *bo_va_entry; + struct kfd_mem_attachment *attachment; bo = mem->bo; @@ -1923,13 +1916,13 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) * VM faults if the GPU tries to access the invalid * memory. */ - list_for_each_entry(bo_va_entry, &mem->bo_va_list, bo_list) { - if (!bo_va_entry->is_mapped) + list_for_each_entry(attachment, &mem->attachments, list) { + if (!attachment->is_mapped) continue; ret = update_gpuvm_pte((struct amdgpu_device *) - bo_va_entry->kgd_dev, - bo_va_entry, &sync); + attachment->adev, + attachment, &sync); if (ret) { pr_err("%s: update PTE failed\n", __func__); /* make sure this gets validated again */ @@ -2110,7 +2103,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) struct amdgpu_bo *bo = mem->bo; uint32_t domain = mem->domain; - struct kfd_bo_va_list *bo_va_entry; + struct kfd_mem_attachment *attachment; total_size += amdgpu_bo_size(bo); @@ -2130,11 +2123,9 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) pr_debug("Memory eviction: Sync BO fence failed. Try again\n"); goto validate_map_fail; } - list_for_each_entry(bo_va_entry, &mem->bo_va_list, - bo_list) { + list_for_each_entry(attachment, &mem->attachments, list) { ret = update_gpuvm_pte((struct amdgpu_device *) - bo_va_entry->kgd_dev, - bo_va_entry, + attachment->adev, attachment, &sync_obj); if (ret) { pr_debug("Memory eviction: update PTE failed. Try again\n"); @@ -2210,7 +2201,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem return -ENOMEM; mutex_init(&(*mem)->lock); - INIT_LIST_HEAD(&(*mem)->bo_va_list); + INIT_LIST_HEAD(&(*mem)->attachments); (*mem)->bo = amdgpu_bo_ref(gws_bo); (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; (*mem)->process_info = process_info; -- cgit v1.2.3 From 4e94272f8a99e0235353a024f37be1201acf4c8b Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 8 Apr 2021 22:27:34 -0400 Subject: drm/amdgpu: Keep a bo-reference per-attachment For now they all reference the same BO. For correct DMA mappings they will refer to different BOs per-GPU. Signed-off-by: Felix Kuehling Acked-by: Oak Zeng Acked-by: Ramesh Errabolu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 7008edd3f622..ee42d561bd64 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -491,11 +491,11 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_vm *vm, bool is_aql, struct kfd_mem_attachment **p_attachment) { - int ret; - struct kfd_mem_attachment *attachment; - struct amdgpu_bo *bo = mem->bo; + unsigned long bo_size = mem->bo->tbo.base.size; uint64_t va = mem->va; - unsigned long bo_size = bo->tbo.base.size; + struct kfd_mem_attachment *attachment; + struct amdgpu_bo *bo; + int ret; if (!va) { pr_err("Invalid VA when adding BO to VM\n"); @@ -512,6 +512,14 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, va + bo_size, vm); + /* FIXME: For now all attachments use the same BO. This is incorrect + * because one BO can only have one DMA mapping for one GPU. We need + * one BO per GPU, e.g. a DMABuf import with dynamic attachment. This + * will be addressed one BO-type at a time in subsequent patches. + */ + bo = mem->bo; + drm_gem_object_get(&bo->tbo.base); + /* Add BO to VM internal data structures*/ attachment->bo_va = amdgpu_vm_bo_add(adev, vm, bo); if (!attachment->bo_va) { @@ -531,7 +539,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, /* Allocate validate page tables if needed */ ret = vm_validate_pt_pd_bos(vm); - if (ret) { + if (unlikely(ret)) { pr_err("validate_pt_pd_bos() failed\n"); goto err_alloc_pts; } @@ -542,15 +550,19 @@ err_alloc_pts: amdgpu_vm_bo_rmv(adev, attachment->bo_va); list_del(&attachment->list); err_vmadd: + drm_gem_object_put(&bo->tbo.base); kfree(attachment); return ret; } static void kfd_mem_detach(struct kfd_mem_attachment *attachment) { + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + pr_debug("\t remove VA 0x%llx in entry %p\n", attachment->va, attachment); amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va); + drm_gem_object_put(&bo->tbo.base); list_del(&attachment->list); kfree(attachment); } -- cgit v1.2.3 From 7141394edc05f439751e4eb2e5aedb4889b48e33 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Thu, 8 Apr 2021 23:26:27 -0400 Subject: drm/amdgpu: Simplify AQL queue mapping Do AQL queue double-mapping with a single attach call. That will make it easier to create per-GPU BOs later, to be shared between the two BO VA mappings on the same GPU. Freeing the attachments is not necessary if map_to_gpu fails. These will be cleaned up when the kdg_mem object is destroyed in amdgpu_amdkfd_gpuvm_free_memory_of_gpu. Signed-off-by: Felix Kuehling Acked-by: Oak Zeng Acked-by: Ramesh Errabolu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 103 +++++++++++------------ 1 file changed, 48 insertions(+), 55 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index ee42d561bd64..a23628f3f780 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -488,70 +488,76 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) * 4a. Validate new page tables and directories */ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, - struct amdgpu_vm *vm, bool is_aql, - struct kfd_mem_attachment **p_attachment) + struct amdgpu_vm *vm, bool is_aql) { unsigned long bo_size = mem->bo->tbo.base.size; uint64_t va = mem->va; - struct kfd_mem_attachment *attachment; - struct amdgpu_bo *bo; - int ret; + struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; + struct amdgpu_bo *bo[2] = {NULL, NULL}; + int i, ret; if (!va) { pr_err("Invalid VA when adding BO to VM\n"); return -EINVAL; } - if (is_aql) - va += bo_size; - - attachment = kzalloc(sizeof(*attachment), GFP_KERNEL); - if (!attachment) - return -ENOMEM; + for (i = 0; i <= is_aql; i++) { + attachment[i] = kzalloc(sizeof(*attachment[i]), GFP_KERNEL); + if (unlikely(!attachment[i])) { + ret = -ENOMEM; + goto unwind; + } - pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, - va + bo_size, vm); + pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, + va + bo_size, vm); - /* FIXME: For now all attachments use the same BO. This is incorrect - * because one BO can only have one DMA mapping for one GPU. We need - * one BO per GPU, e.g. a DMABuf import with dynamic attachment. This - * will be addressed one BO-type at a time in subsequent patches. - */ - bo = mem->bo; - drm_gem_object_get(&bo->tbo.base); + /* FIXME: For now all attachments use the same BO. This is + * incorrect because one BO can only have one DMA mapping + * for one GPU. We need one BO per GPU, e.g. a DMABuf + * import with dynamic attachment. This will be addressed + * one BO-type at a time in subsequent patches. + */ + bo[i] = mem->bo; + drm_gem_object_get(&bo[i]->tbo.base); - /* Add BO to VM internal data structures*/ - attachment->bo_va = amdgpu_vm_bo_add(adev, vm, bo); - if (!attachment->bo_va) { - ret = -EINVAL; - pr_err("Failed to add BO object to VM. ret == %d\n", - ret); - goto err_vmadd; - } + /* Add BO to VM internal data structures */ + attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); + if (unlikely(!attachment[i]->bo_va)) { + ret = -ENOMEM; + pr_err("Failed to add BO object to VM. ret == %d\n", + ret); + goto unwind; + } - attachment->va = va; - attachment->pte_flags = get_pte_flags(adev, mem); - attachment->adev = adev; - list_add(&attachment->list, &mem->attachments); + attachment[i]->va = va; + attachment[i]->pte_flags = get_pte_flags(adev, mem); + attachment[i]->adev = adev; + list_add(&attachment[i]->list, &mem->attachments); - if (p_attachment) - *p_attachment = attachment; + va += bo_size; + } /* Allocate validate page tables if needed */ ret = vm_validate_pt_pd_bos(vm); if (unlikely(ret)) { pr_err("validate_pt_pd_bos() failed\n"); - goto err_alloc_pts; + goto unwind; } return 0; -err_alloc_pts: - amdgpu_vm_bo_rmv(adev, attachment->bo_va); - list_del(&attachment->list); -err_vmadd: - drm_gem_object_put(&bo->tbo.base); - kfree(attachment); +unwind: + for (; i >= 0; i--) { + if (!attachment[i]) + continue; + if (attachment[i]->bo_va) { + amdgpu_vm_bo_rmv(adev, attachment[i]->bo_va); + list_del(&attachment[i]->list); + } + if (bo[i]) + drm_gem_object_put(&bo[i]->tbo.base); + kfree(attachment[i]); + } return ret; } @@ -1384,8 +1390,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( uint32_t domain; struct kfd_mem_attachment *entry; struct bo_vm_reservation_context ctx; - struct kfd_mem_attachment *attachment = NULL; - struct kfd_mem_attachment *attachment_aql = NULL; unsigned long bo_size; bool is_invalid_userptr = false; @@ -1435,15 +1439,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( is_invalid_userptr = true; if (!kfd_mem_is_attached(avm, mem)) { - ret = kfd_mem_attach(adev, mem, avm, false, &attachment); + ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); if (ret) goto attach_failed; - if (mem->aql_queue) { - ret = kfd_mem_attach(adev, mem, avm, true, - &attachment_aql); - if (ret) - goto attach_failed_aql; - } } else { ret = vm_validate_pt_pd_bos(avm); if (unlikely(ret)) @@ -1498,11 +1496,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( goto out; map_bo_to_gpuvm_failed: - if (attachment_aql) - kfd_mem_detach(attachment_aql); -attach_failed_aql: - if (attachment) - kfd_mem_detach(attachment); attach_failed: unreserve_bo_and_vms(&ctx, false, false); out: -- cgit v1.2.3 From 264fb4d332f5e76743818480e482464437837c52 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sat, 10 Apr 2021 03:43:58 -0400 Subject: drm/amdgpu: Add multi-GPU DMA mapping helpers Add BO-type specific helpers functions to DMA-map and unmap kfd_mem_attachments. Implement this functionality for userptrs by creating one SG BO per GPU and filling it with a DMA mapping of the pages from the original mem->bo. Signed-off-by: Felix Kuehling Acked-by: Oak Zeng Acked-by: Ramesh Errabolu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 8 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 149 +++++++++++++++++++++-- 2 files changed, 148 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 1675cd08ec16..a83ac39afdd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -44,11 +44,17 @@ enum TLB_FLUSH_TYPE { struct amdgpu_device; +enum kfd_mem_attachment_type { + KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */ + KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */ +}; + struct kfd_mem_attachment { struct list_head list; + enum kfd_mem_attachment_type type; + bool is_mapped; struct amdgpu_bo_va *bo_va; struct amdgpu_device *adev; - bool is_mapped; uint64_t va; uint64_t pte_flags; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index a23628f3f780..9c31c29ec784 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -475,12 +475,120 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) return pte_flags; } +static int +kfd_mem_dmamap_userptr(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + enum dma_data_direction direction = + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + struct ttm_operation_ctx ctx = {.interruptible = true}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + struct amdgpu_device *adev = attachment->adev; + struct ttm_tt *src_ttm = mem->bo->tbo.ttm; + struct ttm_tt *ttm = bo->tbo.ttm; + int ret; + + ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); + if (unlikely(!ttm->sg)) + return -ENOMEM; + + if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) + return -EINVAL; + + /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ + ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, + ttm->num_pages, 0, + (u64)ttm->num_pages << PAGE_SHIFT, + GFP_KERNEL); + if (unlikely(ret)) + goto free_sg; + + ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); + if (unlikely(ret)) + goto release_sg; + + drm_prime_sg_to_dma_addr_array(ttm->sg, ttm->dma_address, + ttm->num_pages); + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + goto unmap_sg; + + return 0; + +unmap_sg: + dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); +release_sg: + pr_err("DMA map userptr failed: %d\n", ret); + sg_free_table(ttm->sg); +free_sg: + kfree(ttm->sg); + ttm->sg = NULL; + return ret; +} + +static int +kfd_mem_dmamap_attachment(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + switch (attachment->type) { + case KFD_MEM_ATT_SHARED: + return 0; + case KFD_MEM_ATT_USERPTR: + return kfd_mem_dmamap_userptr(mem, attachment); + default: + WARN_ON_ONCE(1); + } + return -EINVAL; +} + +static void +kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + enum dma_data_direction direction = + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + struct ttm_operation_ctx ctx = {.interruptible = false}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + struct amdgpu_device *adev = attachment->adev; + struct ttm_tt *ttm = bo->tbo.ttm; + + if (unlikely(!ttm->sg)) + return; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + + dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); + sg_free_table(ttm->sg); + ttm->sg = NULL; +} + +static void +kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, + struct kfd_mem_attachment *attachment) +{ + switch (attachment->type) { + case KFD_MEM_ATT_SHARED: + break; + case KFD_MEM_ATT_USERPTR: + kfd_mem_dmaunmap_userptr(mem, attachment); + break; + default: + WARN_ON_ONCE(1); + } +} + /* kfd_mem_attach - Add a BO to a VM * * Everything that needs to bo done only once when a BO is first added * to a VM. It can later be mapped and unmapped many times without * repeating these steps. * + * 0. Create BO for DMA mapping, if needed * 1. Allocate and initialize BO VA entry data structure * 2. Add BO to the VM * 3. Determine ASIC-specific PTE flags @@ -490,10 +598,12 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_vm *vm, bool is_aql) { + struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); unsigned long bo_size = mem->bo->tbo.base.size; uint64_t va = mem->va; struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; struct amdgpu_bo *bo[2] = {NULL, NULL}; + struct drm_gem_object *gobj; int i, ret; if (!va) { @@ -511,14 +621,37 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, va + bo_size, vm); - /* FIXME: For now all attachments use the same BO. This is - * incorrect because one BO can only have one DMA mapping - * for one GPU. We need one BO per GPU, e.g. a DMABuf - * import with dynamic attachment. This will be addressed - * one BO-type at a time in subsequent patches. - */ - bo[i] = mem->bo; - drm_gem_object_get(&bo[i]->tbo.base); + if (adev == bo_adev || (mem->domain == AMDGPU_GEM_DOMAIN_VRAM && + amdgpu_xgmi_same_hive(adev, bo_adev))) { + /* Mappings on the local GPU and VRAM mappings in the + * local hive share the original BO + */ + attachment[i]->type = KFD_MEM_ATT_SHARED; + bo[i] = mem->bo; + drm_gem_object_get(&bo[i]->tbo.base); + } else if (i > 0) { + /* Multiple mappings on the same GPU share the BO */ + attachment[i]->type = KFD_MEM_ATT_SHARED; + bo[i] = bo[0]; + drm_gem_object_get(&bo[i]->tbo.base); + } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { + /* Create an SG BO to DMA-map userptrs on other GPUs */ + attachment[i]->type = KFD_MEM_ATT_USERPTR; + ret = amdgpu_gem_object_create(adev, bo_size, 1, + AMDGPU_GEM_DOMAIN_CPU, + 0, ttm_bo_type_sg, + mem->bo->tbo.base.resv, + &gobj); + if (ret) + goto unwind; + bo[i] = gem_to_amdgpu_bo(gobj); + bo[i]->parent = amdgpu_bo_ref(mem->bo); + } else { + /* FIXME: Need to DMA-map other BO types */ + attachment[i]->type = KFD_MEM_ATT_SHARED; + bo[i] = mem->bo; + drm_gem_object_get(&bo[i]->tbo.base); + } /* Add BO to VM internal data structures */ attachment[i]->bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]); -- cgit v1.2.3 From b72ed8a2de8e9dfbd61217d60a7da868ac2cfbff Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sat, 10 Apr 2021 03:48:56 -0400 Subject: drm/amdgpu: DMA map/unmap when updating GPU mappings DMA map kfd_mem_attachments in update_gpuvm_pte. This function is called with the BO and page tables reserved, so we can safely update the DMA mapping. DMA unmap when a BO is unmapped from a GPU and before updating mappings in restore workers. Signed-off-by: Felix Kuehling Acked-by: Oak Zeng Acked-by: Ramesh Errabolu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 56 ++++++++++++------------ 1 file changed, 29 insertions(+), 27 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 9c31c29ec784..2c02b1504e77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -966,11 +966,12 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, return ret; } -static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, +static void unmap_bo_from_gpuvm(struct kgd_mem *mem, struct kfd_mem_attachment *entry, struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = entry->bo_va; + struct amdgpu_device *adev = entry->adev; struct amdgpu_vm *vm = bo_va->base.vm; amdgpu_vm_bo_unmap(adev, bo_va, entry->va); @@ -979,15 +980,20 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, amdgpu_sync_fence(sync, bo_va->last_pt_update); - return 0; + kfd_mem_dmaunmap_attachment(mem, entry); } -static int update_gpuvm_pte(struct amdgpu_device *adev, - struct kfd_mem_attachment *entry, - struct amdgpu_sync *sync) +static int update_gpuvm_pte(struct kgd_mem *mem, + struct kfd_mem_attachment *entry, + struct amdgpu_sync *sync) { - int ret; struct amdgpu_bo_va *bo_va = entry->bo_va; + struct amdgpu_device *adev = entry->adev; + int ret; + + ret = kfd_mem_dmamap_attachment(mem, entry); + if (ret) + return ret; /* Update the page tables */ ret = amdgpu_vm_bo_update(adev, bo_va, false); @@ -999,14 +1005,15 @@ static int update_gpuvm_pte(struct amdgpu_device *adev, return amdgpu_sync_fence(sync, bo_va->last_pt_update); } -static int map_bo_to_gpuvm(struct amdgpu_device *adev, - struct kfd_mem_attachment *entry, struct amdgpu_sync *sync, - bool no_update_pte) +static int map_bo_to_gpuvm(struct kgd_mem *mem, + struct kfd_mem_attachment *entry, + struct amdgpu_sync *sync, + bool no_update_pte) { int ret; /* Set virtual address for the allocation */ - ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0, + ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, amdgpu_bo_size(entry->bo_va->base.bo), entry->pte_flags); if (ret) { @@ -1018,7 +1025,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev, if (no_update_pte) return 0; - ret = update_gpuvm_pte(adev, entry, sync); + ret = update_gpuvm_pte(mem, entry, sync); if (ret) { pr_err("update_gpuvm_pte() failed\n"); goto update_gpuvm_pte_failed; @@ -1027,7 +1034,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev, return 0; update_gpuvm_pte_failed: - unmap_bo_from_gpuvm(adev, entry, sync); + unmap_bo_from_gpuvm(mem, entry, sync); return ret; } @@ -1601,7 +1608,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", entry->va, entry->va + bo_size, entry); - ret = map_bo_to_gpuvm(adev, entry, ctx.sync, + ret = map_bo_to_gpuvm(mem, entry, ctx.sync, is_invalid_userptr); if (ret) { pr_err("Failed to map bo to gpuvm\n"); @@ -1640,7 +1647,6 @@ out: int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdkfd_process_info *process_info = avm->process_info; unsigned long bo_size = mem->bo->tbo.base.size; @@ -1675,13 +1681,8 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", entry->va, entry->va + bo_size, entry); - ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); - if (ret == 0) { - entry->is_mapped = false; - } else { - pr_err("failed to unmap VA 0x%llx\n", mem->va); - goto unreserve_out; - } + unmap_bo_from_gpuvm(mem, entry, ctx.sync); + entry->is_mapped = false; mem->mapped_to_gpu_memory--; pr_debug("\t DEC mapping count %d\n", @@ -2058,9 +2059,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) if (!attachment->is_mapped) continue; - ret = update_gpuvm_pte((struct amdgpu_device *) - attachment->adev, - attachment, &sync); + kfd_mem_dmaunmap_attachment(mem, attachment); + ret = update_gpuvm_pte(mem, attachment, &sync); if (ret) { pr_err("%s: update PTE failed\n", __func__); /* make sure this gets validated again */ @@ -2262,9 +2262,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) goto validate_map_fail; } list_for_each_entry(attachment, &mem->attachments, list) { - ret = update_gpuvm_pte((struct amdgpu_device *) - attachment->adev, attachment, - &sync_obj); + if (!attachment->is_mapped) + continue; + + kfd_mem_dmaunmap_attachment(mem, attachment); + ret = update_gpuvm_pte(mem, attachment, &sync_obj); if (ret) { pr_debug("Memory eviction: update PTE failed. Try again\n"); goto validate_map_fail; -- cgit v1.2.3 From 9e5d275319e224e01adb62bfe03943b32f540b7d Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sun, 11 Apr 2021 18:50:23 -0400 Subject: drm/amdgpu: Move kfd_mem_attach outside reservation This is needed to avoid deadlocks with DMA buf import in the next patch. Also move PT/PD validation out of kfd_mem_attach, that way the caller can bo this unconditionally. Signed-off-by: Felix Kuehling Acked-by: Oak Zeng Acked-by: Ramesh Errabolu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 75 ++++++++++++++---------- 1 file changed, 44 insertions(+), 31 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 2c02b1504e77..4fb180d1c758 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -582,6 +582,34 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, } } +static int +kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem, + struct amdgpu_bo **bo) +{ + unsigned long bo_size = mem->bo->tbo.base.size; + struct drm_gem_object *gobj; + int ret; + + ret = amdgpu_bo_reserve(mem->bo, false); + if (ret) + return ret; + + ret = amdgpu_gem_object_create(adev, bo_size, 1, + AMDGPU_GEM_DOMAIN_CPU, + 0, ttm_bo_type_sg, + mem->bo->tbo.base.resv, + &gobj); + if (ret) + return ret; + + amdgpu_bo_unreserve(mem->bo); + + *bo = gem_to_amdgpu_bo(gobj); + (*bo)->parent = amdgpu_bo_ref(mem->bo); + + return 0; +} + /* kfd_mem_attach - Add a BO to a VM * * Everything that needs to bo done only once when a BO is first added @@ -603,7 +631,6 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, uint64_t va = mem->va; struct kfd_mem_attachment *attachment[2] = {NULL, NULL}; struct amdgpu_bo *bo[2] = {NULL, NULL}; - struct drm_gem_object *gobj; int i, ret; if (!va) { @@ -637,15 +664,9 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, } else if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { /* Create an SG BO to DMA-map userptrs on other GPUs */ attachment[i]->type = KFD_MEM_ATT_USERPTR; - ret = amdgpu_gem_object_create(adev, bo_size, 1, - AMDGPU_GEM_DOMAIN_CPU, - 0, ttm_bo_type_sg, - mem->bo->tbo.base.resv, - &gobj); + ret = kfd_mem_attach_userptr(adev, mem, &bo[i]); if (ret) goto unwind; - bo[i] = gem_to_amdgpu_bo(gobj); - bo[i]->parent = amdgpu_bo_ref(mem->bo); } else { /* FIXME: Need to DMA-map other BO types */ attachment[i]->type = KFD_MEM_ATT_SHARED; @@ -670,13 +691,6 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, va += bo_size; } - /* Allocate validate page tables if needed */ - ret = vm_validate_pt_pd_bos(vm); - if (unlikely(ret)) { - pr_err("validate_pt_pd_bos() failed\n"); - goto unwind; - } - return 0; unwind: @@ -1483,12 +1497,12 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va, mem->va + bo_size * (1 + mem->aql_queue)); + ret = unreserve_bo_and_vms(&ctx, false, false); + /* Remove from VM internal data structures */ list_for_each_entry_safe(entry, tmp, &mem->attachments, list) kfd_mem_detach(entry); - ret = unreserve_bo_and_vms(&ctx, false, false); - /* Free the sync object */ amdgpu_sync_free(&mem->sync); @@ -1565,6 +1579,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( mem->va + bo_size * (1 + mem->aql_queue), avm, domain_string(domain)); + if (!kfd_mem_is_attached(avm, mem)) { + ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); + if (ret) + goto out; + } + ret = reserve_bo_and_vm(mem, avm, &ctx); if (unlikely(ret)) goto out; @@ -1578,15 +1598,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( bo->tbo.mem.mem_type == TTM_PL_SYSTEM) is_invalid_userptr = true; - if (!kfd_mem_is_attached(avm, mem)) { - ret = kfd_mem_attach(adev, mem, avm, mem->aql_queue); - if (ret) - goto attach_failed; - } else { - ret = vm_validate_pt_pd_bos(avm); - if (unlikely(ret)) - goto attach_failed; - } + ret = vm_validate_pt_pd_bos(avm); + if (unlikely(ret)) + goto out_unreserve; if (mem->mapped_to_gpu_memory == 0 && !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { @@ -1597,7 +1611,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( ret = amdgpu_amdkfd_bo_validate(bo, domain, true); if (ret) { pr_debug("Validate failed\n"); - goto map_bo_to_gpuvm_failed; + goto out_unreserve; } } @@ -1612,13 +1626,13 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( is_invalid_userptr); if (ret) { pr_err("Failed to map bo to gpuvm\n"); - goto map_bo_to_gpuvm_failed; + goto out_unreserve; } ret = vm_update_pds(avm, ctx.sync); if (ret) { pr_err("Failed to update page directories\n"); - goto map_bo_to_gpuvm_failed; + goto out_unreserve; } entry->is_mapped = true; @@ -1635,8 +1649,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( goto out; -map_bo_to_gpuvm_failed: -attach_failed: +out_unreserve: unreserve_bo_and_vms(&ctx, false, false); out: mutex_unlock(&mem->process_info->lock); -- cgit v1.2.3 From 5ac3c3e45fb93d14102fc7cdc69ad909f6980388 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sun, 11 Apr 2021 18:52:19 -0400 Subject: drm/amdgpu: Add DMA mapping of GTT BOs Use DMABufs with dynamic attachment to DMA-map GTT BOs on other GPUs. Signed-off-by: Felix Kuehling Acked-by: Oak Zeng Acked-by: Ramesh Errabolu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 76 +++++++++++++++++++++++- 2 files changed, 77 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index a83ac39afdd1..81264517d532 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -47,6 +47,7 @@ struct amdgpu_device; enum kfd_mem_attachment_type { KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */ KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */ + KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */ }; struct kfd_mem_attachment { @@ -62,6 +63,7 @@ struct kfd_mem_attachment { struct kgd_mem { struct mutex lock; struct amdgpu_bo *bo; + struct dma_buf *dmabuf; struct list_head attachments; /* protected by amdkfd_process_info.lock */ struct ttm_validate_buffer validate_list; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 4fb180d1c758..bc838c319cb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -529,6 +529,16 @@ free_sg: return ret; } +static int +kfd_mem_dmamap_dmabuf(struct kfd_mem_attachment *attachment) +{ + struct ttm_operation_ctx ctx = {.interruptible = true}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); + return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); +} + static int kfd_mem_dmamap_attachment(struct kgd_mem *mem, struct kfd_mem_attachment *attachment) @@ -538,6 +548,8 @@ kfd_mem_dmamap_attachment(struct kgd_mem *mem, return 0; case KFD_MEM_ATT_USERPTR: return kfd_mem_dmamap_userptr(mem, attachment); + case KFD_MEM_ATT_DMABUF: + return kfd_mem_dmamap_dmabuf(attachment); default: WARN_ON_ONCE(1); } @@ -567,6 +579,19 @@ kfd_mem_dmaunmap_userptr(struct kgd_mem *mem, ttm->sg = NULL; } +static void +kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) +{ + struct ttm_operation_ctx ctx = {.interruptible = true}; + struct amdgpu_bo *bo = attachment->bo_va->base.bo; + + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + /* FIXME: This does not guarantee that amdgpu_ttm_tt_unpopulate is + * called + */ +} + static void kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, struct kfd_mem_attachment *attachment) @@ -577,6 +602,9 @@ kfd_mem_dmaunmap_attachment(struct kgd_mem *mem, case KFD_MEM_ATT_USERPTR: kfd_mem_dmaunmap_userptr(mem, attachment); break; + case KFD_MEM_ATT_DMABUF: + kfd_mem_dmaunmap_dmabuf(attachment); + break; default: WARN_ON_ONCE(1); } @@ -610,6 +638,38 @@ kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem, return 0; } +static int +kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, + struct amdgpu_bo **bo) +{ + struct drm_gem_object *gobj; + + if (!mem->dmabuf) { + mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base, + mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ? + DRM_RDWR : 0); + if (IS_ERR(mem->dmabuf)) { + mem->dmabuf = NULL; + return PTR_ERR(mem->dmabuf); + } + } + + gobj = amdgpu_gem_prime_import(&adev->ddev, mem->dmabuf); + if (IS_ERR(gobj)) + return PTR_ERR(gobj); + + /* Import takes an extra reference on the dmabuf. Drop it now to + * avoid leaking it. We only need the one reference in + * kgd_mem->dmabuf. + */ + dma_buf_put(mem->dmabuf); + + *bo = gem_to_amdgpu_bo(gobj); + (*bo)->parent = amdgpu_bo_ref(mem->bo); + + return 0; +} + /* kfd_mem_attach - Add a BO to a VM * * Everything that needs to bo done only once when a BO is first added @@ -667,8 +727,20 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem, ret = kfd_mem_attach_userptr(adev, mem, &bo[i]); if (ret) goto unwind; + } else if (mem->domain == AMDGPU_GEM_DOMAIN_GTT && + mem->bo->tbo.type != ttm_bo_type_sg) { + /* GTT BOs use DMA-mapping ability of dynamic-attach + * DMA bufs. TODO: The same should work for VRAM on + * large-BAR GPUs. + */ + attachment[i]->type = KFD_MEM_ATT_DMABUF; + ret = kfd_mem_attach_dmabuf(adev, mem, &bo[i]); + if (ret) + goto unwind; } else { - /* FIXME: Need to DMA-map other BO types */ + /* FIXME: Need to DMA-map other BO types: + * large-BAR VRAM, doorbells, MMIO remap + */ attachment[i]->type = KFD_MEM_ATT_SHARED; bo[i] = mem->bo; drm_gem_object_get(&bo[i]->tbo.base); @@ -1527,6 +1599,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( /* Free the BO*/ drm_vma_node_revoke(&mem->bo->tbo.base.vma_node, drm_priv); + if (mem->dmabuf) + dma_buf_put(mem->dmabuf); drm_gem_object_put(&mem->bo->tbo.base); mutex_destroy(&mem->lock); kfree(mem); -- cgit v1.2.3 From e552ee40b02bb6d30b0278d03fa03fae357ec043 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 21 Apr 2021 19:25:45 -0400 Subject: drm/amdgpu: Move dmabuf attach/detach to backend_(un)bind MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The dmabuf attachment should be updated by moving the SG BO to DOMAIN_CPU and back to DOMAIN_GTT. This does not necessarily invoke the populate/unpopulate callbacks. Do this in backend_bind/unbind instead. Signed-off-by: Felix Kuehling Reviewed-by: Christian König Acked-by: Oak Zeng Acked-by: Ramesh Errabolu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 3 -- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 51 ++++++++++++------------ 2 files changed, 25 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index bc838c319cb5..928e8d57cd08 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -587,9 +587,6 @@ kfd_mem_dmaunmap_dmabuf(struct kfd_mem_attachment *attachment) amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - /* FIXME: This does not guarantee that amdgpu_ttm_tt_unpopulate is - * called - */ } static void diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 01775a670c1a..d16b6dbca0ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -907,7 +907,23 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, DRM_ERROR("failed to pin userptr\n"); return r; } + } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) { + if (!ttm->sg) { + struct dma_buf_attachment *attach; + struct sg_table *sgt; + + attach = gtt->gobj->import_attach; + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + ttm->sg = sgt; + } + + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); } + if (!ttm->num_pages) { WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); @@ -1034,8 +1050,15 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, int r; /* if the pages have userptr pinning then clear that first */ - if (gtt->userptr) + if (gtt->userptr) { amdgpu_ttm_tt_unpin_userptr(bdev, ttm); + } else if (ttm->sg && gtt->gobj->import_attach) { + struct dma_buf_attachment *attach; + + attach = gtt->gobj->import_attach; + dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); + ttm->sg = NULL; + } if (!gtt->bound) return; @@ -1122,23 +1145,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, return 0; } - if (ttm->page_flags & TTM_PAGE_FLAG_SG) { - if (!ttm->sg) { - struct dma_buf_attachment *attach; - struct sg_table *sgt; - - attach = gtt->gobj->import_attach; - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) - return PTR_ERR(sgt); - - ttm->sg = sgt; - } - - drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, - ttm->num_pages); + if (ttm->page_flags & TTM_PAGE_FLAG_SG) return 0; - } return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); } @@ -1162,15 +1170,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, return; } - if (ttm->sg && gtt->gobj->import_attach) { - struct dma_buf_attachment *attach; - - attach = gtt->gobj->import_attach; - dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); - ttm->sg = NULL; - return; - } - if (ttm->page_flags & TTM_PAGE_FLAG_SG) return; -- cgit v1.2.3