diff options
author | Lee Schermerhorn <lee.schermerhorn@hp.com> | 2008-10-18 20:26:56 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-20 08:52:31 -0700 |
commit | 9978ad583e100945b74e4f33e73317983ea32df9 (patch) | |
tree | 132d3a06664e04cac4635ddba55a0ec36ff2a001 /mm/mlock.c | |
parent | c11d69d8c830e09a0e7b3935c952afb26c48bba8 (diff) | |
download | linux-9978ad583e100945b74e4f33e73317983ea32df9.tar.bz2 |
mlock: make mlock error return Posixly Correct
Rework Posix error return for mlock().
Posix requires error code for mlock*() system calls for some conditions
that differ from what kernel low level functions, such as
get_user_pages(), return for those conditions. For more info, see:
http://marc.info/?l=linux-kernel&m=121750892930775&w=2
This patch provides the same translation of get_user_pages()
error codes to posix specified error codes in the context
of the mlock rework for unevictable lru.
[akpm@linux-foundation.org: fix build]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 33 |
1 files changed, 27 insertions, 6 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index bce1b22c36c2..008ea70b7afa 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -248,11 +248,24 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, addr += PAGE_SIZE; /* for next get_user_pages() */ nr_pages--; } + ret = 0; } lru_add_drain_all(); /* to update stats */ - return 0; /* count entire vma as locked_vm */ + return ret; /* count entire vma as locked_vm */ +} + +/* + * convert get_user_pages() return value to posix mlock() error + */ +static int __mlock_posix_error_return(long retval) +{ + if (retval == -EFAULT) + retval = -ENOMEM; + else if (retval == -ENOMEM) + retval = -EAGAIN; + return retval; } #else /* CONFIG_UNEVICTABLE_LRU */ @@ -265,9 +278,15 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, int mlock) { if (mlock && (vma->vm_flags & VM_LOCKED)) - make_pages_present(start, end); + return make_pages_present(start, end); + return 0; +} + +static inline int __mlock_posix_error_return(long retval) +{ return 0; } + #endif /* CONFIG_UNEVICTABLE_LRU */ /** @@ -434,10 +453,7 @@ success: downgrade_write(&mm->mmap_sem); ret = __mlock_vma_pages_range(vma, start, end, 1); - if (ret > 0) { - mm->locked_vm -= ret; - ret = 0; - } + /* * Need to reacquire mmap sem in write mode, as our callers * expect this. We have no support for atomically upgrading @@ -451,6 +467,11 @@ success: /* non-NULL *prev must contain @start, but need to check @end */ if (!(*prev) || end > (*prev)->vm_end) ret = -ENOMEM; + else if (ret > 0) { + mm->locked_vm -= ret; + ret = 0; + } else + ret = __mlock_posix_error_return(ret); /* translate if needed */ } else { /* * TODO: for unlocking, pages will already be resident, so |