diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_chardev.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 131 |
1 files changed, 82 insertions, 49 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 86afd37b098d..4bfc0c8ab764 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -321,7 +321,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, /* Return gpu_id as doorbell offset for mmap usage */ args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL; args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id); - if (KFD_IS_SOC15(dev->device_info->asic_family)) + if (KFD_IS_SOC15(dev)) /* On SOC15 ASICs, include the doorbell offset within the * process doorbell frame, which is 2 pages. */ @@ -405,7 +405,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, mutex_lock(&p->mutex); - retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); + retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties); mutex_unlock(&p->mutex); @@ -418,7 +418,7 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, int retval; const int max_num_cus = 1024; struct kfd_ioctl_set_cu_mask_args *args = data; - struct queue_properties properties; + struct mqd_update_info minfo = {0}; uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr; size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32); @@ -428,8 +428,8 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, return -EINVAL; } - properties.cu_mask_count = args->num_cu_mask; - if (properties.cu_mask_count == 0) { + minfo.cu_mask.count = args->num_cu_mask; + if (minfo.cu_mask.count == 0) { pr_debug("CU mask cannot be 0"); return -EINVAL; } @@ -438,32 +438,33 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, * limit of max_num_cus bits. We can then just drop any CU mask bits * past max_num_cus bits and just use the first max_num_cus bits. */ - if (properties.cu_mask_count > max_num_cus) { + if (minfo.cu_mask.count > max_num_cus) { pr_debug("CU mask cannot be greater than 1024 bits"); - properties.cu_mask_count = max_num_cus; + minfo.cu_mask.count = max_num_cus; cu_mask_size = sizeof(uint32_t) * (max_num_cus/32); } - properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL); - if (!properties.cu_mask) + minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL); + if (!minfo.cu_mask.ptr) return -ENOMEM; - retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size); + retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size); if (retval) { pr_debug("Could not copy CU mask from userspace"); - kfree(properties.cu_mask); - return -EFAULT; + retval = -EFAULT; + goto out; } + minfo.update_flag = UPDATE_FLAG_CU_MASK; + mutex_lock(&p->mutex); - retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties); + retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo); mutex_unlock(&p->mutex); - if (retval) - kfree(properties.cu_mask); - +out: + kfree(minfo.cu_mask.ptr); return retval; } @@ -579,7 +580,7 @@ static int kfd_ioctl_dbg_register(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_register not supported on CZ\n"); return -EINVAL; } @@ -630,7 +631,7 @@ static int kfd_ioctl_dbg_unregister(struct file *filep, if (!dev || !dev->dbgmgr) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_unregister not supported on CZ\n"); return -EINVAL; } @@ -675,7 +676,7 @@ static int kfd_ioctl_dbg_address_watch(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); return -EINVAL; } @@ -783,7 +784,7 @@ static int kfd_ioctl_dbg_wave_control(struct file *filep, if (!dev) return -EINVAL; - if (dev->device_info->asic_family == CHIP_CARRIZO) { + if (dev->adev->asic_type == CHIP_CARRIZO) { pr_debug("kfd_ioctl_dbg_wave_control not supported on CZ\n"); return -EINVAL; } @@ -850,7 +851,7 @@ static int kfd_ioctl_get_clock_counters(struct file *filep, dev = kfd_device_by_id(args->gpu_id); if (dev) /* Reading GPU clock counter from KGD */ - args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->kgd); + args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(dev->adev); else /* Node without GPU resource */ args->gpu_clock_counter = 0; @@ -1011,11 +1012,6 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, void *mem, *kern_addr; uint64_t size; - if (p->signal_page) { - pr_err("Event page is already set\n"); - return -EINVAL; - } - kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset)); if (!kfd) { pr_err("Getting device by id failed in %s\n", __func__); @@ -1023,6 +1019,13 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, } mutex_lock(&p->mutex); + + if (p->signal_page) { + pr_err("Event page is already set\n"); + err = -EINVAL; + goto out_unlock; + } + pdd = kfd_bind_process_to_device(kfd, p); if (IS_ERR(pdd)) { err = PTR_ERR(pdd); @@ -1037,20 +1040,24 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p, err = -EINVAL; goto out_unlock; } - mutex_unlock(&p->mutex); - err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd, + err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->adev, mem, &kern_addr, &size); if (err) { pr_err("Failed to map event page to kernel\n"); - return err; + goto out_unlock; } err = kfd_event_page_set(p, kern_addr, size); if (err) { pr_err("Failed to set event page\n"); - return err; + amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->adev, mem); + goto out_unlock; } + + p->signal_handle = args->event_page_offset; + + mutex_unlock(&p->mutex); } err = kfd_event_create(filp, p, args->event_type, @@ -1130,7 +1137,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) dev->kfd2kgd->set_scratch_backing_va( - dev->kgd, args->va_addr, pdd->qpd.vmid); + dev->adev, args->va_addr, pdd->qpd.vmid); return 0; @@ -1151,7 +1158,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep, if (!dev) return -EINVAL; - amdgpu_amdkfd_get_tile_config(dev->kgd, &config); + amdgpu_amdkfd_get_tile_config(dev->adev, &config); args->gb_addr_config = config.gb_addr_config; args->num_banks = config.num_banks; @@ -1237,7 +1244,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev) if (dev->use_iommu_v2) return false; - amdgpu_amdkfd_get_local_mem_info(dev->kgd, &mem_info); + amdgpu_amdkfd_get_local_mem_info(dev->adev, &mem_info); if (mem_info.local_mem_size_private == 0 && mem_info.local_mem_size_public > 0) return true; @@ -1259,6 +1266,23 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, if (args->size == 0) return -EINVAL; +#if IS_ENABLED(CONFIG_HSA_AMD_SVM) + /* Flush pending deferred work to avoid racing with deferred actions + * from previous memory map changes (e.g. munmap). + */ + svm_range_list_lock_and_flush_work(&p->svms, current->mm); + mutex_lock(&p->svms.lock); + mmap_write_unlock(current->mm); + if (interval_tree_iter_first(&p->svms.objects, + args->va_addr >> PAGE_SHIFT, + (args->va_addr + args->size - 1) >> PAGE_SHIFT)) { + pr_err("Address: 0x%llx already allocated by SVM\n", + args->va_addr); + mutex_unlock(&p->svms.lock); + return -EADDRINUSE; + } + mutex_unlock(&p->svms.lock); +#endif dev = kfd_device_by_id(args->gpu_id); if (!dev) return -EINVAL; @@ -1289,7 +1313,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, err = -EINVAL; goto err_unlock; } - offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd); + offset = dev->adev->rmmio_remap.bus_addr; if (!offset) { err = -ENOMEM; goto err_unlock; @@ -1297,7 +1321,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, } err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( - dev->kgd, args->va_addr, args->size, + dev->adev, args->va_addr, args->size, pdd->drm_priv, (struct kgd_mem **) &mem, &offset, flags); @@ -1329,7 +1353,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep, return 0; err_free: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); @@ -1351,6 +1375,15 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, return -EINVAL; mutex_lock(&p->mutex); + /* + * Safeguard to prevent user space from freeing signal BO. + * It will be freed at process termination. + */ + if (p->signal_handle && (p->signal_handle == args->handle)) { + pr_err("Free signal BO is not allowed\n"); + ret = -EPERM; + goto err_unlock; + } pdd = kfd_get_process_device_data(dev, p); if (!pdd) { @@ -1366,7 +1399,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep, goto err_unlock; } - ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, + ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, &size); /* If freeing the buffer failed, leave the handle in place for @@ -1451,7 +1484,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - peer->kgd, (struct kgd_mem *)mem, + peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv, &table_freed); if (err) { pr_err("Failed to map to gpu %d/%d\n", @@ -1463,7 +1496,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, mutex_unlock(&p->mutex); - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); + err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); goto sync_memory_failed; @@ -1560,7 +1593,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( - peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv); + peer->adev, (struct kgd_mem *)mem, peer_pdd->drm_priv); if (err) { pr_err("Failed to unmap from gpu %d/%d\n", i, args->n_devices); @@ -1570,8 +1603,8 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, } mutex_unlock(&p->mutex); - if (dev->device_info->asic_family == CHIP_ALDEBARAN) { - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, + if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) { + err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); @@ -1647,7 +1680,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, { struct kfd_ioctl_get_dmabuf_info_args *args = data; struct kfd_dev *dev = NULL; - struct kgd_dev *dma_buf_kgd; + struct amdgpu_device *dmabuf_adev; void *metadata_buffer = NULL; uint32_t flags; unsigned int i; @@ -1667,15 +1700,15 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, } /* Get dmabuf info from KGD */ - r = amdgpu_amdkfd_get_dmabuf_info(dev->kgd, args->dmabuf_fd, - &dma_buf_kgd, &args->size, + r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd, + &dmabuf_adev, &args->size, metadata_buffer, args->metadata_size, &args->metadata_size, &flags); if (r) goto exit; /* Reverse-lookup gpu_id from kgd pointer */ - dev = kfd_device_by_kgd(dma_buf_kgd); + dev = kfd_device_by_adev(dmabuf_adev); if (!dev) { r = -EINVAL; goto exit; @@ -1725,7 +1758,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, goto err_unlock; } - r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->kgd, dmabuf, + r = amdgpu_amdkfd_gpuvm_import_dmabuf(dev->adev, dmabuf, args->va_addr, pdd->drm_priv, (struct kgd_mem **)&mem, &size, NULL); @@ -1746,7 +1779,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep, return 0; err_free: - amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->adev, (struct kgd_mem *)mem, pdd->drm_priv, NULL); err_unlock: mutex_unlock(&p->mutex); @@ -2033,7 +2066,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, if (vma->vm_end - vma->vm_start != PAGE_SIZE) return -EINVAL; - address = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd); + address = dev->adev->rmmio_remap.bus_addr; vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP; |