diff options
-rw-r--r-- | fs/erofs/decompressor.c | 2 | ||||
-rw-r--r-- | fs/erofs/internal.h | 2 | ||||
-rw-r--r-- | fs/erofs/utils.c | 4 | ||||
-rw-r--r-- | fs/erofs/zdata.c | 37 |
4 files changed, 21 insertions, 24 deletions
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 19f89f9fb10c..2890a67a1ded 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, victim = availables[--top]; get_page(victim); } else { - victim = erofs_allocpage(pagepool, GFP_KERNEL, false); + victim = erofs_allocpage(pagepool, GFP_KERNEL); if (!victim) return -ENOMEM; victim->mapping = Z_EROFS_MAPPING_STAGING; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 544a453f3076..0c1175a08e54 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -382,7 +382,7 @@ int erofs_namei(struct inode *dir, struct qstr *name, extern const struct file_operations erofs_dir_fops; /* utils.c / zdata.c */ -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp); #if (EROFS_PCPUBUF_NR_PAGES > 0) void *erofs_get_pcpubuf(unsigned int pagenr); diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index f66043ee16b9..1e8e1450d5b0 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -7,7 +7,7 @@ #include "internal.h" #include <linux/pagevec.h> -struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) +struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp) { struct page *page; @@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) DBG_BUGON(page_ref_count(page) != 1); list_del(&page->lru); } else { - page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); + page = alloc_page(gfp); } return page; } diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 93f8bc1a64f6..1c582a3a40a3 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -546,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt) return true; } -static inline struct page *__stagingpage_alloc(struct list_head *pagepool, - gfp_t gfp) -{ - struct page *page = erofs_allocpage(pagepool, gfp, true); - - page->mapping = Z_EROFS_MAPPING_STAGING; - return page; -} - static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, unsigned int cachestrategy, erofs_off_t la) @@ -661,8 +652,9 @@ retry: /* should allocate an additional staging page for pagevec */ if (err == -EAGAIN) { struct page *const newpage = - __stagingpage_alloc(pagepool, GFP_NOFS); + erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL); + newpage->mapping = Z_EROFS_MAPPING_STAGING; err = z_erofs_attach_page(clt, newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE); if (!err) @@ -1079,19 +1071,24 @@ repeat: unlock_page(page); put_page(page); out_allocpage: - page = __stagingpage_alloc(pagepool, gfp); - if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { - list_add(&page->lru, pagepool); - cpu_relax(); - goto repeat; - } - if (!tocache) - goto out; - if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { + page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); + if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { + /* non-LRU / non-movable temporary page is needed */ page->mapping = Z_EROFS_MAPPING_STAGING; - goto out; + tocache = false; } + if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { + if (tocache) { + /* since it added to managed cache successfully */ + unlock_page(page); + put_page(page); + } else { + list_add(&page->lru, pagepool); + } + cond_resched(); + goto repeat; + } set_page_private(page, (unsigned long)pcl); SetPagePrivate(page); out: /* the only exit (for tracing and debugging) */ |