summaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorPaul Jackson <pj@sgi.com>2006-01-08 01:01:54 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-08 20:13:43 -0800
commitcf2a473c4089aa41c26f653200673f5a4cc25047 (patch)
tree0bce21f4684a382b13e93ba5b85409cf5eab1c2c /mm/mempolicy.c
parentb4b2641843db124637fa3d2cb2101982035dcc82 (diff)
downloadlinux-cf2a473c4089aa41c26f653200673f5a4cc25047.tar.bz2
[PATCH] cpuset: combine refresh_mems and update_mems
The important code paths through alloc_pages_current() and alloc_page_vma(), by which most kernel page allocations go, both called cpuset_update_current_mems_allowed(), which in turn called refresh_mems(). -Both- of these latter two routines did a tasklock, got the tasks cpuset pointer, and checked for out of date cpuset->mems_generation. That was a silly duplication of code and waste of CPU cycles on an important code path. Consolidated those two routines into a single routine, called cpuset_update_task_memory_state(), since it updates more than just mems_allowed. Changed all callers of either routine to call the new consolidated routine. Signed-off-by: Paul Jackson <pj@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9dea2b8a7d48..515bfeee027e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -387,7 +387,7 @@ static int contextualize_policy(int mode, nodemask_t *nodes)
if (!nodes)
return 0;
- cpuset_update_current_mems_allowed();
+ cpuset_update_task_memory_state();
if (!cpuset_nodes_subset_current_mems_allowed(*nodes))
return -EINVAL;
return mpol_check_policy(mode, nodes);
@@ -461,7 +461,7 @@ long do_get_mempolicy(int *policy, nodemask_t *nmask,
struct vm_area_struct *vma = NULL;
struct mempolicy *pol = current->mempolicy;
- cpuset_update_current_mems_allowed();
+ cpuset_update_task_memory_state();
if (flags & ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR))
return -EINVAL;
if (flags & MPOL_F_ADDR) {
@@ -1089,7 +1089,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
{
struct mempolicy *pol = get_vma_policy(current, vma, addr);
- cpuset_update_current_mems_allowed();
+ cpuset_update_task_memory_state();
if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
unsigned nid;
@@ -1115,7 +1115,7 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
* interrupt context and apply the current process NUMA policy.
* Returns NULL when no page can be allocated.
*
- * Don't call cpuset_update_current_mems_allowed() unless
+ * Don't call cpuset_update_task_memory_state() unless
* 1) it's ok to take cpuset_sem (can WAIT), and
* 2) allocating for current task (not interrupt).
*/
@@ -1124,7 +1124,7 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
struct mempolicy *pol = current->mempolicy;
if ((gfp & __GFP_WAIT) && !in_interrupt())
- cpuset_update_current_mems_allowed();
+ cpuset_update_task_memory_state();
if (!pol || in_interrupt())
pol = &default_policy;
if (pol->policy == MPOL_INTERLEAVE)