From e6a1530d692d6a60cdf15dfbcfea07f5324d7b9f Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Sun, 25 Jun 2006 05:46:49 -0700 Subject: [PATCH] Allow migration of mlocked pages Hugh clarified the role of VM_LOCKED. So we can now implement page migration for mlocked pages. Allow the migration of mlocked pages. This means that try_to_unmap must unmap mlocked pages in the migration case. Signed-off-by: Christoph Lameter Acked-by: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'mm/rmap.c') diff --git a/mm/rmap.c b/mm/rmap.c index 882a85826bb2..e76909e880ca 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -562,9 +562,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * If it's recently referenced (perhaps page_referenced * skipped over this mm) then we should reactivate it. */ - if ((vma->vm_flags & VM_LOCKED) || - (ptep_clear_flush_young(vma, address, pte) - && !migration)) { + if (!migration && ((vma->vm_flags & VM_LOCKED) || + (ptep_clear_flush_young(vma, address, pte)))) { ret = SWAP_FAIL; goto out_unmap; } @@ -771,7 +770,7 @@ static int try_to_unmap_file(struct page *page, int migration) list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (vma->vm_flags & VM_LOCKED) + if ((vma->vm_flags & VM_LOCKED) && !migration) continue; cursor = (unsigned long) vma->vm_private_data; if (cursor > max_nl_cursor) @@ -805,7 +804,7 @@ static int try_to_unmap_file(struct page *page, int migration) do { list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) { - if (vma->vm_flags & VM_LOCKED) + if ((vma->vm_flags & VM_LOCKED) && !migration) continue; cursor = (unsigned long) vma->vm_private_data; while ( cursor < max_nl_cursor && -- cgit v1.2.3