summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c63
1 files changed, 43 insertions, 20 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 8fb86ba452b0..82efecbab96f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -32,6 +32,7 @@ static unsigned int surplus_huge_pages_node[MAX_NUMNODES];
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
int hugetlb_dynamic_pool;
+static int hugetlb_next_nid;
/*
* Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
@@ -165,36 +166,56 @@ static int adjust_pool_surplus(int delta)
return ret;
}
-static int alloc_fresh_huge_page(void)
+static struct page *alloc_fresh_huge_page_node(int nid)
{
- static int prev_nid;
struct page *page;
- int nid;
-
- /*
- * Copy static prev_nid to local nid, work on that, then copy it
- * back to prev_nid afterwards: otherwise there's a window in which
- * a racer might pass invalid nid MAX_NUMNODES to alloc_pages_node.
- * But we don't need to use a spin_lock here: it really doesn't
- * matter if occasionally a racer chooses the same nid as we do.
- */
- nid = next_node(prev_nid, node_online_map);
- if (nid == MAX_NUMNODES)
- nid = first_node(node_online_map);
- prev_nid = nid;
- page = alloc_pages_node(nid, htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
- HUGETLB_PAGE_ORDER);
+ page = alloc_pages_node(nid,
+ htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
+ HUGETLB_PAGE_ORDER);
if (page) {
set_compound_page_dtor(page, free_huge_page);
spin_lock(&hugetlb_lock);
nr_huge_pages++;
- nr_huge_pages_node[page_to_nid(page)]++;
+ nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock);
put_page(page); /* free it into the hugepage allocator */
- return 1;
}
- return 0;
+
+ return page;
+}
+
+static int alloc_fresh_huge_page(void)
+{
+ struct page *page;
+ int start_nid;
+ int next_nid;
+ int ret = 0;
+
+ start_nid = hugetlb_next_nid;
+
+ do {
+ page = alloc_fresh_huge_page_node(hugetlb_next_nid);
+ if (page)
+ ret = 1;
+ /*
+ * Use a helper variable to find the next node and then
+ * copy it back to hugetlb_next_nid afterwards:
+ * otherwise there's a window in which a racer might
+ * pass invalid nid MAX_NUMNODES to alloc_pages_node.
+ * But we don't need to use a spin_lock here: it really
+ * doesn't matter if occasionally a racer chooses the
+ * same nid as we do. Move nid forward in the mask even
+ * if we just successfully allocated a hugepage so that
+ * the next caller gets hugepages on the next node.
+ */
+ next_nid = next_node(hugetlb_next_nid, node_online_map);
+ if (next_nid == MAX_NUMNODES)
+ next_nid = first_node(node_online_map);
+ hugetlb_next_nid = next_nid;
+ } while (!page && hugetlb_next_nid != start_nid);
+
+ return ret;
}
static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
@@ -365,6 +386,8 @@ static int __init hugetlb_init(void)
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&hugepage_freelists[i]);
+ hugetlb_next_nid = first_node(node_online_map);
+
for (i = 0; i < max_huge_pages; ++i) {
if (!alloc_fresh_huge_page())
break;