summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h34
-rw-r--r--mm/memremap.c20
-rw-r--r--mm/swap.c10
3 files changed, 20 insertions, 44 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a9d6473fc045..8a59f0456149 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1094,33 +1094,24 @@ static inline bool is_zone_movable_page(const struct page *page)
#ifdef CONFIG_DEV_PAGEMAP_OPS
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-static inline bool page_is_devmap_managed(struct page *page)
+bool __put_devmap_managed_page(struct page *page);
+static inline bool put_devmap_managed_page(struct page *page)
{
if (!static_branch_unlikely(&devmap_managed_key))
return false;
if (!is_zone_device_page(page))
return false;
- switch (page->pgmap->type) {
- case MEMORY_DEVICE_PRIVATE:
- case MEMORY_DEVICE_FS_DAX:
- return true;
- default:
- break;
- }
- return false;
+ if (page->pgmap->type != MEMORY_DEVICE_PRIVATE &&
+ page->pgmap->type != MEMORY_DEVICE_FS_DAX)
+ return false;
+ return __put_devmap_managed_page(page);
}
-void put_devmap_managed_page(struct page *page);
-
#else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool page_is_devmap_managed(struct page *page)
+static inline bool put_devmap_managed_page(struct page *page)
{
return false;
}
-
-static inline void put_devmap_managed_page(struct page *page)
-{
-}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool is_device_private_page(const struct page *page)
@@ -1220,16 +1211,11 @@ static inline void put_page(struct page *page)
struct folio *folio = page_folio(page);
/*
- * For devmap managed pages we need to catch refcount transition from
- * 2 to 1, when refcount reach one it means the page is free and we
- * need to inform the device driver through callback. See
- * include/linux/memremap.h and HMM for details.
+ * For some devmap managed pages we need to catch refcount transition
+ * from 2 to 1:
*/
- if (page_is_devmap_managed(&folio->page)) {
- put_devmap_managed_page(&folio->page);
+ if (put_devmap_managed_page(&folio->page))
return;
- }
-
folio_put(folio);
}
diff --git a/mm/memremap.c b/mm/memremap.c
index 55d23e9f5c04..f41233a67edb 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -502,24 +502,22 @@ void free_devmap_managed_page(struct page *page)
page->pgmap->ops->page_free(page);
}
-void put_devmap_managed_page(struct page *page)
+bool __put_devmap_managed_page(struct page *page)
{
- int count;
-
- if (WARN_ON_ONCE(!page_is_devmap_managed(page)))
- return;
-
- count = page_ref_dec_return(page);
-
/*
* devmap page refcounts are 1-based, rather than 0-based: if
* refcount is 1, then the page is free and the refcount is
* stable because nobody holds a reference on the page.
*/
- if (count == 1)
+ switch (page_ref_dec_return(page)) {
+ case 1:
free_devmap_managed_page(page);
- else if (!count)
+ break;
+ case 0:
__put_page(page);
+ break;
+ }
+ return true;
}
-EXPORT_SYMBOL(put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page);
#endif /* CONFIG_DEV_PAGEMAP_OPS */
diff --git a/mm/swap.c b/mm/swap.c
index e499df864ef7..db8d0eea13d7 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -930,16 +930,8 @@ void release_pages(struct page **pages, int nr)
unlock_page_lruvec_irqrestore(lruvec, flags);
lruvec = NULL;
}
- /*
- * ZONE_DEVICE pages that return 'false' from
- * page_is_devmap_managed() do not require special
- * processing, and instead, expect a call to
- * put_page_testzero().
- */
- if (page_is_devmap_managed(page)) {
- put_devmap_managed_page(page);
+ if (put_devmap_managed_page(page))
continue;
- }
if (put_page_testzero(page))
put_dev_pagemap(page->pgmap);
continue;