From 351618b203acef13946a03ecf18fbe328c3cdb58 Mon Sep 17 00:00:00 2001 From: Vitaly Wool Date: Mon, 13 May 2019 17:22:46 -0700 Subject: mm/z3fold.c: improve compression by extending search The current z3fold implementation only searches this CPU's page lists for a fitting page to put a new object into. This patch adds quick search for very well fitting pages (i. e. those having exactly the required number of free space) on other CPUs too, before allocating a new page for that object. Link: http://lkml.kernel.org/r/20190417103733.72ae81abe1552397c95a008e@gmail.com Signed-off-by: Vitaly Wool Cc: Bartlomiej Zolnierkiewicz Cc: Dan Streetman Cc: Krzysztof Kozlowski Cc: Oleksiy Avramchenko Cc: Uladzislau Rezki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/z3fold.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/mm/z3fold.c b/mm/z3fold.c index 7a59875d880c..29a4f1249bef 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -522,6 +522,42 @@ lookup: } put_cpu_ptr(pool->unbuddied); + if (!zhdr) { + int cpu; + + /* look for _exact_ match on other cpus' lists */ + for_each_online_cpu(cpu) { + struct list_head *l; + + unbuddied = per_cpu_ptr(pool->unbuddied, cpu); + spin_lock(&pool->lock); + l = &unbuddied[chunks]; + + zhdr = list_first_entry_or_null(READ_ONCE(l), + struct z3fold_header, buddy); + + if (!zhdr || !z3fold_page_trylock(zhdr)) { + spin_unlock(&pool->lock); + zhdr = NULL; + continue; + } + list_del_init(&zhdr->buddy); + zhdr->cpu = -1; + spin_unlock(&pool->lock); + + page = virt_to_page(zhdr); + if (test_bit(NEEDS_COMPACTING, &page->private)) { + z3fold_page_unlock(zhdr); + zhdr = NULL; + if (can_sleep) + cond_resched(); + continue; + } + kref_get(&zhdr->refcount); + break; + } + } + return zhdr; } -- cgit v1.2.3