summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorArjun Roy <arjunroy@google.com>2020-06-25 20:30:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-26 00:27:37 -0700
commit7f70c2a68a51496289df163f6969d4db7c383f30 (patch)
tree9acf8e8fac975ee9a01806f2f03273fab17bf98d /mm
parent243bce09c91b0145aeaedd5afba799d81841c030 (diff)
downloadlinux-7f70c2a68a51496289df163f6969d4db7c383f30.tar.bz2
mm/memory.c: properly pte_offset_map_lock/unlock in vm_insert_pages()
Calls to pte_offset_map() in vm_insert_pages() are erroneously not matched with a call to pte_unmap(). This would cause problems on architectures where that is not a no-op. This patch does away with the non-traditional locking in the existing code, and instead uses pte_offset_map_lock/unlock() as usual, incrementing PTE as necessary. The PTE pointer is kept within bounds since we clamp it with PTRS_PER_PTE. Link: http://lkml.kernel.org/r/20200618220446.20284-1-arjunroy.kdev@gmail.com Fixes: 8cd3984d81d5 ("mm/memory.c: add vm_insert_pages()") Signed-off-by: Arjun Roy <arjunroy@google.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 1c632faa2611..0e5b25c9b151 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1498,7 +1498,7 @@ out:
}
#ifdef pte_index
-static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
+static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
unsigned long addr, struct page *page, pgprot_t prot)
{
int err;
@@ -1506,8 +1506,9 @@ static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
if (!page_count(page))
return -EINVAL;
err = validate_page_before_insert(page);
- return err ? err : insert_page_into_pte_locked(
- mm, pte_offset_map(pmd, addr), addr, page, prot);
+ if (err)
+ return err;
+ return insert_page_into_pte_locked(mm, pte, addr, page, prot);
}
/* insert_pages() amortizes the cost of spinlock operations
@@ -1517,7 +1518,8 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num, pgprot_t prot)
{
pmd_t *pmd = NULL;
- spinlock_t *pte_lock = NULL;
+ pte_t *start_pte, *pte;
+ spinlock_t *pte_lock;
struct mm_struct *const mm = vma->vm_mm;
unsigned long curr_page_idx = 0;
unsigned long remaining_pages_total = *num;
@@ -1536,18 +1538,17 @@ more:
ret = -ENOMEM;
if (pte_alloc(mm, pmd))
goto out;
- pte_lock = pte_lockptr(mm, pmd);
while (pages_to_write_in_pmd) {
int pte_idx = 0;
const int batch_size = min_t(int, pages_to_write_in_pmd, 8);
- spin_lock(pte_lock);
- for (; pte_idx < batch_size; ++pte_idx) {
- int err = insert_page_in_batch_locked(mm, pmd,
+ start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
+ for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
+ int err = insert_page_in_batch_locked(mm, pte,
addr, pages[curr_page_idx], prot);
if (unlikely(err)) {
- spin_unlock(pte_lock);
+ pte_unmap_unlock(start_pte, pte_lock);
ret = err;
remaining_pages_total -= pte_idx;
goto out;
@@ -1555,7 +1556,7 @@ more:
addr += PAGE_SIZE;
++curr_page_idx;
}
- spin_unlock(pte_lock);
+ pte_unmap_unlock(start_pte, pte_lock);
pages_to_write_in_pmd -= batch_size;
remaining_pages_total -= batch_size;
}