diff options
author | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-06 20:15:39 -0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2019-08-16 11:44:48 -0300 |
commit | 70df291bf81ffda47ff84e6e2da4fbe21f95a861 (patch) | |
tree | fe19e393dfc9d17b06eb0324999ffc1a90b99f99 /mm/mmu_notifier.c | |
parent | 56c57103db17db9ecdad0507a3f0e3eea747fabe (diff) | |
download | linux-70df291bf81ffda47ff84e6e2da4fbe21f95a861.tar.bz2 |
mm/mmu_notifiers: do not speculatively allocate a mmu_notifier_mm
A prior commit e0f3c3f78da2 ("mm/mmu_notifier: init notifier if necessary")
made an attempt at doing this, but had to be reverted as calling
the GFP_KERNEL allocator under the i_mmap_mutex causes deadlock, see
commit 35cfa2b0b491 ("mm/mmu_notifier: allocate mmu_notifier in advance").
However, we can avoid that problem by doing the allocation only under
the mmap_sem, which is already happening.
Since all writers to mm->mmu_notifier_mm hold the write side of the
mmap_sem reading it under that sem is deterministic and we can use that to
decide if the allocation path is required, without speculation.
The actual update to mmu_notifier_mm must still be done under the
mm_take_all_locks() to ensure read-side coherency.
Link: https://lore.kernel.org/r/20190806231548.25242-3-jgg@ziepe.ca
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Tested-by: Ralph Campbell <rcampbell@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'mm/mmu_notifier.c')
-rw-r--r-- | mm/mmu_notifier.c | 34 |
1 files changed, 22 insertions, 12 deletions
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 218a6f108bc2..696810f632ad 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -242,27 +242,32 @@ EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range); */ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) { - struct mmu_notifier_mm *mmu_notifier_mm; + struct mmu_notifier_mm *mmu_notifier_mm = NULL; int ret; lockdep_assert_held_write(&mm->mmap_sem); BUG_ON(atomic_read(&mm->mm_users) <= 0); - mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); - if (unlikely(!mmu_notifier_mm)) - return -ENOMEM; + if (!mm->mmu_notifier_mm) { + /* + * kmalloc cannot be called under mm_take_all_locks(), but we + * know that mm->mmu_notifier_mm can't change while we hold + * the write side of the mmap_sem. + */ + mmu_notifier_mm = + kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL); + if (!mmu_notifier_mm) + return -ENOMEM; + + INIT_HLIST_HEAD(&mmu_notifier_mm->list); + spin_lock_init(&mmu_notifier_mm->lock); + } ret = mm_take_all_locks(mm); if (unlikely(ret)) goto out_clean; - if (!mm_has_notifiers(mm)) { - INIT_HLIST_HEAD(&mmu_notifier_mm->list); - spin_lock_init(&mmu_notifier_mm->lock); - - mm->mmu_notifier_mm = mmu_notifier_mm; - mmu_notifier_mm = NULL; - } + /* Pairs with the mmdrop in mmu_notifier_unregister_* */ mmgrab(mm); /* @@ -273,14 +278,19 @@ int __mmu_notifier_register(struct mmu_notifier *mn, struct mm_struct *mm) * We can't race against any other mmu notifier method either * thanks to mm_take_all_locks(). */ + if (mmu_notifier_mm) + mm->mmu_notifier_mm = mmu_notifier_mm; + spin_lock(&mm->mmu_notifier_mm->lock); hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list); spin_unlock(&mm->mmu_notifier_mm->lock); mm_drop_all_locks(mm); + BUG_ON(atomic_read(&mm->mm_users) <= 0); + return 0; + out_clean: kfree(mmu_notifier_mm); - BUG_ON(atomic_read(&mm->mm_users) <= 0); return ret; } EXPORT_SYMBOL_GPL(__mmu_notifier_register); |