summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h4
-rw-r--r--mm/mmap.c15
-rw-r--r--mm/rmap.c12
3 files changed, 0 insertions, 31 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8e2841a2f441..3899395a03de 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -97,11 +97,7 @@ extern unsigned int kobjsize(const void *objp);
#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
-#ifdef CONFIG_MMU
-#define VM_LOCK_RMAP 0x01000000 /* Do not follow this rmap (mmu mmap) */
-#else
#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
-#endif
#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
diff --git a/mm/mmap.c b/mm/mmap.c
index 6a0c15db7f60..f1b4448626bf 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -554,9 +554,7 @@ again: remove_next = 1 + (end > next->vm_end);
*/
if (importer && !importer->anon_vma) {
/* Block reverse map lookups until things are set up. */
- importer->vm_flags |= VM_LOCK_RMAP;
if (anon_vma_clone(importer, vma)) {
- importer->vm_flags &= ~VM_LOCK_RMAP;
return -ENOMEM;
}
importer->anon_vma = anon_vma;
@@ -618,11 +616,6 @@ again: remove_next = 1 + (end > next->vm_end);
__vma_unlink(mm, next, vma);
if (file)
__remove_shared_vm_struct(next, file, mapping);
- /*
- * This VMA is now dead, no need for rmap to follow it.
- * Call anon_vma_merge below, outside of i_mmap_lock.
- */
- next->vm_flags |= VM_LOCK_RMAP;
} else if (insert) {
/*
* split_vma has split insert from vma, and needs
@@ -635,20 +628,12 @@ again: remove_next = 1 + (end > next->vm_end);
if (mapping)
spin_unlock(&mapping->i_mmap_lock);
- /*
- * The current VMA has been set up. It is now safe for the
- * rmap code to get from the pages to the ptes.
- */
- if (anon_vma && importer)
- importer->vm_flags &= ~VM_LOCK_RMAP;
-
if (remove_next) {
if (file) {
fput(file);
if (next->vm_flags & VM_EXECUTABLE)
removed_exe_file_vma(mm);
}
- /* Protected by mmap_sem and VM_LOCK_RMAP. */
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
diff --git a/mm/rmap.c b/mm/rmap.c
index 28bcdc433d88..4d2fb93851ca 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -329,18 +329,6 @@ vma_address(struct page *page, struct vm_area_struct *vma)
/* page should be within @vma mapping range */
return -EFAULT;
}
- if (unlikely(vma->vm_flags & VM_LOCK_RMAP)) {
- /*
- * This VMA is being unlinked or is not yet linked into the
- * VMA tree. Do not try to follow this rmap. This race
- * condition can result in page_referenced() ignoring a
- * reference or in try_to_unmap() failing to unmap a page.
- * The VMA cannot be freed under us because we hold the
- * anon_vma->lock, which the munmap code takes while
- * unlinking the anon_vmas from the VMA.
- */
- return -EFAULT;
- }
return address;
}