diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-05-22 16:52:52 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-06-07 10:47:24 -0300 |
commit | 6d7c3cde93c1d9ac0b37f78ec3f2ff052159a242 (patch) | |
tree | a3b28f3ceb1a253b2683b8cb04002f6cccf2958f /mm/hmm.c | |
parent | 9b1ae605c8e295836050fa6eaf720131db2fac73 (diff) | |
download | linux-6d7c3cde93c1d9ac0b37f78ec3f2ff052159a242.tar.bz2 |
mm/hmm: fix use after free with struct hmm in the mmu notifiers
mmu_notifier_unregister_no_release() is not a fence and the mmu_notifier
system will continue to reference hmm->mn until the srcu grace period
expires.
Resulting in use after free races like this:
CPU0 CPU1
__mmu_notifier_invalidate_range_start()
srcu_read_lock
hlist_for_each ()
// mn == hmm->mn
hmm_mirror_unregister()
hmm_put()
hmm_free()
mmu_notifier_unregister_no_release()
hlist_del_init_rcu(hmm-mn->list)
mn->ops->invalidate_range_start(mn, range);
mm_get_hmm()
mm->hmm = NULL;
kfree(hmm)
mutex_lock(&hmm->lock);
Use SRCU to kfree the hmm memory so that the notifiers can rely on hmm
existing. Get the now-safe hmm struct through container_of and directly
check kref_get_unless_zero to lock it against free.
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Tested-by: Philip Yang <Philip.Yang@amd.com>
Diffstat (limited to 'mm/hmm.c')
-rw-r--r-- | mm/hmm.c | 23 |
1 files changed, 17 insertions, 6 deletions
@@ -104,6 +104,11 @@ error: return NULL; } +static void hmm_free_rcu(struct rcu_head *rcu) +{ + kfree(container_of(rcu, struct hmm, rcu)); +} + static void hmm_free(struct kref *kref) { struct hmm *hmm = container_of(kref, struct hmm, kref); @@ -116,7 +121,7 @@ static void hmm_free(struct kref *kref) mm->hmm = NULL; spin_unlock(&mm->page_table_lock); - kfree(hmm); + mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu); } static inline void hmm_put(struct hmm *hmm) @@ -144,10 +149,14 @@ void hmm_mm_destroy(struct mm_struct *mm) static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) { - struct hmm *hmm = mm_get_hmm(mm); + struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); struct hmm_mirror *mirror; struct hmm_range *range; + /* Bail out if hmm is in the process of being freed */ + if (!kref_get_unless_zero(&hmm->kref)) + return; + /* Report this HMM as dying. */ hmm->dead = true; @@ -185,13 +194,14 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm) static int hmm_invalidate_range_start(struct mmu_notifier *mn, const struct mmu_notifier_range *nrange) { - struct hmm *hmm = mm_get_hmm(nrange->mm); + struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); struct hmm_mirror *mirror; struct hmm_update update; struct hmm_range *range; int ret = 0; - VM_BUG_ON(!hmm); + if (!kref_get_unless_zero(&hmm->kref)) + return 0; update.start = nrange->start; update.end = nrange->end; @@ -236,9 +246,10 @@ out: static void hmm_invalidate_range_end(struct mmu_notifier *mn, const struct mmu_notifier_range *nrange) { - struct hmm *hmm = mm_get_hmm(nrange->mm); + struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier); - VM_BUG_ON(!hmm); + if (!kref_get_unless_zero(&hmm->kref)) + return; mutex_lock(&hmm->lock); hmm->notifiers--; |