summaryrefslogtreecommitdiffstats
path: root/mm/mremap.c
diff options
context:
space:
mode:
authorWei Yang <richard.weiyang@linux.alibaba.com>2020-08-06 23:23:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-07 11:33:27 -0700
commitb8aa9d9d95b3b4b60d42ac95f65d33a92527aef3 (patch)
treebbe059eeca3a684c6b0f9e7d24a36726f30113c4 /mm/mremap.c
parent45e55300f11495ed58c53427da7f0d958800a30f (diff)
downloadlinux-b8aa9d9d95b3b4b60d42ac95f65d33a92527aef3.tar.bz2
mm/mremap: it is sure to have enough space when extent meets requirement
Patch series "mm/mremap: cleanup move_page_tables() a little", v5. move_page_tables() tries to move page table by PMD or PTE. The root reason is if it tries to move PMD, both old and new range should be PMD aligned. But current code calculate old range and new range separately. This leads to some redundant check and calculation. This cleanup tries to consolidate the range check in one place to reduce some extra range handling. This patch (of 3): old_end is passed to these two functions to check whether there is enough space to do the move, while this check is done before invoking these functions. These two functions only would be invoked when extent meets the requirement and there is one check before invoking these functions: if (extent > old_end - old_addr) extent = old_end - old_addr; This implies (old_end - old_addr) won't fail the check in these two functions. Signed-off-by: Wei Yang <richard.weiyang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Tested-by: Dmitry Osipenko <digetx@gmail.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Yang Shi <yang.shi@linux.alibaba.com> Cc: Thomas Hellstrom (VMware) <thomas_os@shipmail.org> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Sean Christopherson <sean.j.christopherson@intel.com> Cc: Wei Yang <richard.weiyang@linux.alibaba.com> Cc: Peter Xu <peterx@redhat.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Thomas Hellstrom <thellstrom@vmware.com> Link: http://lkml.kernel.org/r/20200710092835.56368-1-richard.weiyang@linux.alibaba.com Link: http://lkml.kernel.org/r/20200710092835.56368-2-richard.weiyang@linux.alibaba.com Link: http://lkml.kernel.org/r/20200708095028.41706-1-richard.weiyang@linux.alibaba.com Link: http://lkml.kernel.org/r/20200708095028.41706-2-richard.weiyang@linux.alibaba.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mremap.c')
-rw-r--r--mm/mremap.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index 6b153dc05fe4..4f434062d154 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -193,15 +193,13 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
#ifdef CONFIG_HAVE_MOVE_PMD
static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
- unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd)
+ unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd)
{
spinlock_t *old_ptl, *new_ptl;
struct mm_struct *mm = vma->vm_mm;
pmd_t pmd;
- if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)
- || old_end - old_addr < PMD_SIZE)
+ if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK))
return false;
/*
@@ -292,7 +290,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (need_rmap_locks)
take_rmap_locks(vma);
moved = move_huge_pmd(vma, old_addr, new_addr,
- old_end, old_pmd, new_pmd);
+ old_pmd, new_pmd);
if (need_rmap_locks)
drop_rmap_locks(vma);
if (moved)
@@ -312,7 +310,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
if (need_rmap_locks)
take_rmap_locks(vma);
moved = move_normal_pmd(vma, old_addr, new_addr,
- old_end, old_pmd, new_pmd);
+ old_pmd, new_pmd);
if (need_rmap_locks)
drop_rmap_locks(vma);
if (moved)