diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-30 12:54:44 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-30 12:54:44 -0700 |
commit | 515f12b9eeed35250d793b7c874707c33f7f6e05 (patch) | |
tree | d01ac06c36327910a5ce0b4a0ee3a145736c072c /drivers | |
parent | 2a11c76e5301dddefcb618dac04f74e6314df6bc (diff) | |
parent | de4ee728465f7c0c29241550e083139b2ce9159c (diff) | |
download | linux-515f12b9eeed35250d793b7c874707c33f7f6e05.tar.bz2 |
Merge tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull HMM fixes from Jason Gunthorpe:
"Fix the locking around nouveau's use of the hmm_range_* APIs. It works
correctly in the success case, but many of the the edge cases have
missing unlocks or double unlocks.
The diffstat is a bit big as Christoph did a comprehensive job to move
the obsolete API from the core header and into the driver before
fixing its flow, but the risk of regression from this code motion is
low"
* tag 'for-linus-hmm' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
nouveau: unlock mmap_sem on all errors from nouveau_range_fault
nouveau: remove the block parameter to nouveau_range_fault
mm/hmm: move hmm_vma_range_done and hmm_vma_fault to nouveau
mm/hmm: always return EBUSY for invalid ranges in hmm_range_{fault,snapshot}
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_svm.c | 47 |
1 files changed, 44 insertions, 3 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 8c92374afcf2..a835cebb6d90 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm, fault->inst, fault->addr, fault->access); } +static inline bool +nouveau_range_done(struct hmm_range *range) +{ + bool ret = hmm_range_valid(range); + + hmm_range_unregister(range); + return ret; +} + +static int +nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range) +{ + long ret; + + range->default_flags = 0; + range->pfn_flags_mask = -1UL; + + ret = hmm_range_register(range, mirror, + range->start, range->end, + PAGE_SHIFT); + if (ret) { + up_read(&range->vma->vm_mm->mmap_sem); + return (int)ret; + } + + if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) { + up_read(&range->vma->vm_mm->mmap_sem); + return -EAGAIN; + } + + ret = hmm_range_fault(range, true); + if (ret <= 0) { + if (ret == 0) + ret = -EBUSY; + up_read(&range->vma->vm_mm->mmap_sem); + hmm_range_unregister(range); + return ret; + } + return 0; +} + static int nouveau_svm_fault(struct nvif_notify *notify) { @@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify) range.values = nouveau_svm_pfn_values; range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT; again: - ret = hmm_vma_fault(&svmm->mirror, &range, true); + ret = nouveau_range_fault(&svmm->mirror, &range); if (ret == 0) { mutex_lock(&svmm->mutex); - if (!hmm_vma_range_done(&range)) { + if (!nouveau_range_done(&range)) { mutex_unlock(&svmm->mutex); goto again; } @@ -666,8 +707,8 @@ again: NULL); svmm->vmm->vmm.object.client->super = false; mutex_unlock(&svmm->mutex); + up_read(&svmm->mm->mmap_sem); } - up_read(&svmm->mm->mmap_sem); /* Cancel any faults in the window whose pages didn't manage * to keep their valid bit, or stay writeable when required. |