summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMuchun Song <songmuchun@bytedance.com>2021-09-07 19:55:55 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-09-08 11:50:23 -0700
commitfe3df441ef885a75a3eff5e151ead1a92266d222 (patch)
tree9e1d1e5c4b1851cb9c825adef186380161b7638a
parent5ef5f810199f421cb6455aa018fb8f21151b4a16 (diff)
downloadlinux-fe3df441ef885a75a3eff5e151ead1a92266d222.tar.bz2
mm: remove redundant compound_head() calling
There is a READ_ONCE() in the macro of compound_head(), which will prevent compiler from optimizing the code when there are more than once calling of it in a function. Remove the redundant calling of compound_head() from page_to_index() and page_add_file_rmap() for better code generation. Link: https://lkml.kernel.org/r/20210811101431.83940-1-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Reviewed-by: David Howells <dhowells@redhat.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: William Kucharski <william.kucharski@oracle.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/pagemap.h7
-rw-r--r--mm/rmap.c6
2 files changed, 7 insertions, 6 deletions
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ed02aa522263..904e57db3a7d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -521,18 +521,17 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
*/
static inline pgoff_t page_to_index(struct page *page)
{
- pgoff_t pgoff;
+ struct page *head;
if (likely(!PageTransTail(page)))
return page->index;
+ head = compound_head(page);
/*
* We don't initialize ->index for tail pages: calculate based on
* head page
*/
- pgoff = compound_head(page)->index;
- pgoff += page - compound_head(page);
- return pgoff;
+ return head->index + page - head;
}
extern pgoff_t hugetlb_basepage_index(struct page *page);
diff --git a/mm/rmap.c b/mm/rmap.c
index b9eb5c12f3fe..b2cebf35ffe7 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1230,11 +1230,13 @@ void page_add_file_rmap(struct page *page, bool compound)
nr_pages);
} else {
if (PageTransCompound(page) && page_mapping(page)) {
+ struct page *head = compound_head(page);
+
VM_WARN_ON_ONCE(!PageLocked(page));
- SetPageDoubleMap(compound_head(page));
+ SetPageDoubleMap(head);
if (PageMlocked(page))
- clear_page_mlock(compound_head(page));
+ clear_page_mlock(head);
}
if (!atomic_inc_and_test(&page->_mapcount))
goto out;