summaryrefslogtreecommitdiffstats
path: root/mm/z3fold.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/z3fold.c')
-rw-r--r--mm/z3fold.c51
1 files changed, 36 insertions, 15 deletions
diff --git a/mm/z3fold.c b/mm/z3fold.c
index a43e8bfcaaea..1a029a7432ee 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* z3fold.c
*
@@ -101,6 +102,7 @@ struct z3fold_buddy_slots {
* @refcount: reference count for the z3fold page
* @work: work_struct for page layout optimization
* @slots: pointer to the structure holding buddy slots
+ * @pool: pointer to the containing pool
* @cpu: CPU which this page "belongs" to
* @first_chunks: the size of the first buddy in chunks, 0 if free
* @middle_chunks: the size of the middle buddy in chunks, 0 if free
@@ -114,6 +116,7 @@ struct z3fold_header {
struct kref refcount;
struct work_struct work;
struct z3fold_buddy_slots *slots;
+ struct z3fold_pool *pool;
short cpu;
unsigned short first_chunks;
unsigned short middle_chunks;
@@ -190,10 +193,13 @@ static int size_to_chunks(size_t size)
static void compact_page_work(struct work_struct *w);
-static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool)
+static inline struct z3fold_buddy_slots *alloc_slots(struct z3fold_pool *pool,
+ gfp_t gfp)
{
- struct z3fold_buddy_slots *slots = kmem_cache_alloc(pool->c_handle,
- GFP_KERNEL);
+ struct z3fold_buddy_slots *slots;
+
+ slots = kmem_cache_alloc(pool->c_handle,
+ (gfp & ~(__GFP_HIGHMEM | __GFP_MOVABLE)));
if (slots) {
memset(slots->slot, 0, sizeof(slots->slot));
@@ -290,10 +296,10 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
/* Initializes the z3fold header of a newly allocated z3fold page */
static struct z3fold_header *init_z3fold_page(struct page *page,
- struct z3fold_pool *pool)
+ struct z3fold_pool *pool, gfp_t gfp)
{
struct z3fold_header *zhdr = page_address(page);
- struct z3fold_buddy_slots *slots = alloc_slots(pool);
+ struct z3fold_buddy_slots *slots = alloc_slots(pool, gfp);
if (!slots)
return NULL;
@@ -314,6 +320,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
zhdr->start_middle = 0;
zhdr->cpu = -1;
zhdr->slots = slots;
+ zhdr->pool = pool;
INIT_LIST_HEAD(&zhdr->buddy);
INIT_WORK(&zhdr->work, compact_page_work);
return zhdr;
@@ -420,7 +427,7 @@ static enum buddy handle_to_buddy(unsigned long handle)
static inline struct z3fold_pool *zhdr_to_pool(struct z3fold_header *zhdr)
{
- return slots_to_pool(zhdr->slots);
+ return zhdr->pool;
}
static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked)
@@ -844,7 +851,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp,
enum buddy bud;
bool can_sleep = gfpflags_allow_blocking(gfp);
- if (!size || (gfp & __GFP_HIGHMEM))
+ if (!size)
return -EINVAL;
if (size > PAGE_SIZE)
@@ -907,7 +914,7 @@ retry:
if (!page)
return -ENOMEM;
- zhdr = init_z3fold_page(page, pool);
+ zhdr = init_z3fold_page(page, pool, gfp);
if (!zhdr) {
__free_page(page);
return -ENOMEM;
@@ -918,7 +925,16 @@ retry:
set_bit(PAGE_HEADLESS, &page->private);
goto headless;
}
- __SetPageMovable(page, pool->inode->i_mapping);
+ if (can_sleep) {
+ lock_page(page);
+ __SetPageMovable(page, pool->inode->i_mapping);
+ unlock_page(page);
+ } else {
+ if (trylock_page(page)) {
+ __SetPageMovable(page, pool->inode->i_mapping);
+ unlock_page(page);
+ }
+ }
z3fold_page_lock(zhdr);
found:
@@ -1325,28 +1341,34 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
VM_BUG_ON_PAGE(!PageMovable(page), page);
VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
zhdr = page_address(page);
pool = zhdr_to_pool(zhdr);
- if (!trylock_page(page))
- return -EAGAIN;
-
if (!z3fold_page_trylock(zhdr)) {
- unlock_page(page);
return -EAGAIN;
}
if (zhdr->mapped_count != 0) {
z3fold_page_unlock(zhdr);
- unlock_page(page);
return -EBUSY;
}
+ if (work_pending(&zhdr->work)) {
+ z3fold_page_unlock(zhdr);
+ return -EAGAIN;
+ }
new_zhdr = page_address(newpage);
memcpy(new_zhdr, zhdr, PAGE_SIZE);
newpage->private = page->private;
page->private = 0;
z3fold_page_unlock(zhdr);
spin_lock_init(&new_zhdr->page_lock);
+ INIT_WORK(&new_zhdr->work, compact_page_work);
+ /*
+ * z3fold_page_isolate() ensures that new_zhdr->buddy is empty,
+ * so we only have to reinitialize it.
+ */
+ INIT_LIST_HEAD(&new_zhdr->buddy);
new_mapping = page_mapping(page);
__ClearPageMovable(page);
ClearPagePrivate(page);
@@ -1370,7 +1392,6 @@ static int z3fold_page_migrate(struct address_space *mapping, struct page *newpa
queue_work_on(new_zhdr->cpu, pool->compact_wq, &new_zhdr->work);
page_mapcount_reset(page);
- unlock_page(page);
put_page(page);
return 0;
}