summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-01-06 16:46:43 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2022-03-21 12:56:35 -0400
commit5232c63f46fdd779303527ec36c518cc1e9c6b4e (patch)
tree86a2005fadd461b81c8f9db76ea33e5bcf634ff6 /mm
parent6315d8a23ce308433cf615e435ca2ee2aee7d11c (diff)
downloadlinux-5232c63f46fdd779303527ec36c518cc1e9c6b4e.tar.bz2
mm: Make compound_pincount always available
Move compound_pincount from the third page to the second page, which means it's available for all compound pages. That lets us delete hpage_pincount_available(). On 32-bit systems, there isn't enough space for both compound_pincount and compound_nr in the second page (it would collide with page->private, which is in use for pages in the swap cache), so revert the optimisation of storing both compound_order and compound_nr on 32-bit systems. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/debug.c14
-rw-r--r--mm/gup.c20
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/rmap.c6
5 files changed, 20 insertions, 27 deletions
diff --git a/mm/debug.c b/mm/debug.c
index bc9ac87f0e08..c4cf44266430 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -92,16 +92,10 @@ static void __dump_page(struct page *page)
page, page_ref_count(head), mapcount, mapping,
page_to_pgoff(page), page_to_pfn(page));
if (compound) {
- if (hpage_pincount_available(page)) {
- pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
- head, compound_order(head),
- head_compound_mapcount(head),
- head_compound_pincount(head));
- } else {
- pr_warn("head:%p order:%u compound_mapcount:%d\n",
- head, compound_order(head),
- head_compound_mapcount(head));
- }
+ pr_warn("head:%p order:%u compound_mapcount:%d compound_pincount:%d\n",
+ head, compound_order(head),
+ head_compound_mapcount(head),
+ head_compound_pincount(head));
}
#ifdef CONFIG_MEMCG
diff --git a/mm/gup.c b/mm/gup.c
index 1809dc037a8e..56b6b01a430b 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -99,12 +99,11 @@ retry:
*
* FOLL_GET: page's refcount will be incremented by @refs.
*
- * FOLL_PIN on compound pages that are > two pages long: page's refcount will
- * be incremented by @refs, and page[2].hpage_pinned_refcount will be
- * incremented by @refs * GUP_PIN_COUNTING_BIAS.
+ * FOLL_PIN on compound pages: page's refcount will be incremented by
+ * @refs, and page[1].compound_pincount will be incremented by @refs.
*
- * FOLL_PIN on normal pages, or compound pages that are two pages long:
- * page's refcount will be incremented by @refs * GUP_PIN_COUNTING_BIAS.
+ * FOLL_PIN on normal pages: page's refcount will be incremented by
+ * @refs * GUP_PIN_COUNTING_BIAS.
*
* Return: head page (with refcount appropriately incremented) for success, or
* NULL upon failure. If neither FOLL_GET nor FOLL_PIN was set, that's
@@ -135,16 +134,15 @@ __maybe_unused struct page *try_grab_compound_head(struct page *page,
return NULL;
/*
- * When pinning a compound page of order > 1 (which is
- * what hpage_pincount_available() checks for), use an
- * exact count to track it.
+ * When pinning a compound page, use an exact count to
+ * track it.
*
* However, be sure to *also* increment the normal page
* refcount field at least once, so that the page really
* is pinned. That's why the refcount from the earlier
* try_get_compound_head() is left intact.
*/
- if (hpage_pincount_available(page))
+ if (PageHead(page))
atomic_add(refs, compound_pincount_ptr(page));
else
page_ref_add(page, refs * (GUP_PIN_COUNTING_BIAS - 1));
@@ -166,7 +164,7 @@ static void put_compound_head(struct page *page, int refs, unsigned int flags)
if (flags & FOLL_PIN) {
mod_node_page_state(page_pgdat(page), NR_FOLL_PIN_RELEASED,
refs);
- if (hpage_pincount_available(page))
+ if (PageHead(page))
atomic_sub(refs, compound_pincount_ptr(page));
else
refs *= GUP_PIN_COUNTING_BIAS;
@@ -211,7 +209,7 @@ bool __must_check try_grab_page(struct page *page, unsigned int flags)
* increment the normal page refcount field at least once,
* so that the page really is pinned.
*/
- if (hpage_pincount_available(page)) {
+ if (PageHead(page)) {
page_ref_add(page, 1);
atomic_add(1, compound_pincount_ptr(page));
} else {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 43fb3155298e..785d6e340292 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1320,7 +1320,9 @@ static void __destroy_compound_gigantic_page(struct page *page,
}
set_compound_order(page, 0);
+#ifdef CONFIG_64BIT
page[1].compound_nr = 0;
+#endif
__ClearPageHead(page);
}
@@ -1812,7 +1814,9 @@ out_error:
for (; j < nr_pages; j++, p = mem_map_next(p, page, j))
__ClearPageReserved(p);
set_compound_order(page, 0);
+#ifdef CONFIG_64BIT
page[1].compound_nr = 0;
+#endif
__ClearPageHead(page);
return false;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3589febc6d31..02283598fd14 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -734,8 +734,7 @@ static void prep_compound_head(struct page *page, unsigned int order)
set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
set_compound_order(page, order);
atomic_set(compound_mapcount_ptr(page), -1);
- if (hpage_pincount_available(page))
- atomic_set(compound_pincount_ptr(page), 0);
+ atomic_set(compound_pincount_ptr(page), 0);
}
static void prep_compound_tail(struct page *head, int tail_idx)
diff --git a/mm/rmap.c b/mm/rmap.c
index c7921c102bc0..1a13d5d6cfc7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1219,8 +1219,7 @@ void page_add_new_anon_rmap(struct page *page,
VM_BUG_ON_PAGE(!PageTransHuge(page), page);
/* increment count (starts at -1) */
atomic_set(compound_mapcount_ptr(page), 0);
- if (hpage_pincount_available(page))
- atomic_set(compound_pincount_ptr(page), 0);
+ atomic_set(compound_pincount_ptr(page), 0);
__mod_lruvec_page_state(page, NR_ANON_THPS, nr);
} else {
@@ -2353,8 +2352,7 @@ void hugepage_add_new_anon_rmap(struct page *page,
{
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
atomic_set(compound_mapcount_ptr(page), 0);
- if (hpage_pincount_available(page))
- atomic_set(compound_pincount_ptr(page), 0);
+ atomic_set(compound_pincount_ptr(page), 0);
__page_set_anon_rmap(page, vma, address, 1);
}