From 3392ca121872dd8c33015c7703d4981c78819be3 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Mon, 17 Oct 2022 09:17:59 -0700 Subject: filemap: find_lock_entries() now updates start offset Patch series "Rework find_get_entries() and find_lock_entries()", v3. Originally the callers of find_get_entries() and find_lock_entries() were keeping track of the start index themselves as they traverse the search range. This resulted in hacky code such as in shmem_undo_range(): index = folio->index + folio_nr_pages(folio) - 1; where the - 1 is only present to stay in the right spot after incrementing index later. This sort of calculation was also being done on every folio despite not even using index later within that function. These patches change find_get_entries() and find_lock_entries() to calculate the new index instead of leaving it to the callers so we can avoid all these complications. This patch (of 2): Initially, find_lock_entries() was being passed in the start offset as a value. That left the calculation of the offset to the callers. This led to complexity in the callers trying to keep track of the index. Now find_lock_entries() takes in a pointer to the start offset and updates the value to be directly after the last entry found. If no entry is found, the offset is not changed. This gets rid of multiple hacky calculations that kept track of the start offset. Link: https://lkml.kernel.org/r/20221017161800.2003-1-vishal.moola@gmail.com Link: https://lkml.kernel.org/r/20221017161800.2003-2-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Cc: Hugh Dickins Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/truncate.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) (limited to 'mm/truncate.c') diff --git a/mm/truncate.c b/mm/truncate.c index c0be77e5c008..b6065a494c71 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -361,9 +361,8 @@ void truncate_inode_pages_range(struct address_space *mapping, folio_batch_init(&fbatch); index = start; - while (index < end && find_lock_entries(mapping, index, end - 1, + while (index < end && find_lock_entries(mapping, &index, end - 1, &fbatch, indices)) { - index = indices[folio_batch_count(&fbatch) - 1] + 1; truncate_folio_batch_exceptionals(mapping, &fbatch, indices); for (i = 0; i < folio_batch_count(&fbatch); i++) truncate_cleanup_folio(fbatch.folios[i]); @@ -510,20 +509,17 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping, int i; folio_batch_init(&fbatch); - while (find_lock_entries(mapping, index, end, &fbatch, indices)) { + while (find_lock_entries(mapping, &index, end, &fbatch, indices)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; /* We rely upon deletion not changing folio->index */ - index = indices[i]; if (xa_is_value(folio)) { count += invalidate_exceptional_entry(mapping, - index, - folio); + indices[i], folio); continue; } - index += folio_nr_pages(folio) - 1; ret = mapping_evict_folio(mapping, folio); folio_unlock(folio); @@ -542,7 +538,6 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping, folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); cond_resched(); - index++; } return count; } -- cgit v1.2.3 From 9fb6beea79c6e7c959adf4fb7b94cf9a6028b941 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Mon, 17 Oct 2022 09:18:00 -0700 Subject: filemap: find_get_entries() now updates start offset Initially, find_get_entries() was being passed in the start offset as a value. That left the calculation of the offset to the callers. This led to complexity in the callers trying to keep track of the index. Now find_get_entries() takes in a pointer to the start offset and updates the value to be directly after the last entry found. If no entry is found, the offset is not changed. This gets rid of multiple hacky calculations that kept track of the start offset. Link: https://lkml.kernel.org/r/20221017161800.2003-3-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Cc: Hugh Dickins Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/filemap.c | 13 +++++++++++-- mm/internal.h | 2 +- mm/shmem.c | 11 ++++------- mm/truncate.c | 19 +++++++------------ 4 files changed, 23 insertions(+), 22 deletions(-) (limited to 'mm/truncate.c') diff --git a/mm/filemap.c b/mm/filemap.c index 3a73b7b8c2a4..65eee6ec1066 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2048,10 +2048,10 @@ reset: * * Return: The number of entries which were found. */ -unsigned find_get_entries(struct address_space *mapping, pgoff_t start, +unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices) { - XA_STATE(xas, &mapping->i_pages, start); + XA_STATE(xas, &mapping->i_pages, *start); struct folio *folio; rcu_read_lock(); @@ -2062,6 +2062,15 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start, } rcu_read_unlock(); + if (folio_batch_count(fbatch)) { + unsigned long nr = 1; + int idx = folio_batch_count(fbatch) - 1; + + folio = fbatch->folios[idx]; + if (!xa_is_value(folio) && !folio_test_hugetlb(folio)) + nr = folio_nr_pages(folio); + *start = indices[idx] + nr; + } return folio_batch_count(fbatch); } diff --git a/mm/internal.h b/mm/internal.h index c504ac7267e0..68afdbe7106e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -108,7 +108,7 @@ static inline void force_page_cache_readahead(struct address_space *mapping, unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); -unsigned find_get_entries(struct address_space *mapping, pgoff_t start, +unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices); void filemap_free_folio(struct address_space *mapping, struct folio *folio); int truncate_inode_folio(struct address_space *mapping, struct folio *folio); diff --git a/mm/shmem.c b/mm/shmem.c index 6b560c3915af..9c897cf3fb99 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -973,7 +973,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, while (index < end) { cond_resched(); - if (!find_get_entries(mapping, index, end - 1, &fbatch, + if (!find_get_entries(mapping, &index, end - 1, &fbatch, indices)) { /* If all gone or hole-punch or unfalloc, we're done */ if (index == start || end != -1) @@ -985,13 +985,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, for (i = 0; i < folio_batch_count(&fbatch); i++) { folio = fbatch.folios[i]; - index = indices[i]; if (xa_is_value(folio)) { if (unfalloc) continue; - if (shmem_free_swap(mapping, index, folio)) { + if (shmem_free_swap(mapping, indices[i], folio)) { /* Swap was replaced by page: retry */ - index--; + index = indices[i]; break; } nr_swaps_freed++; @@ -1004,19 +1003,17 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, if (folio_mapping(folio) != mapping) { /* Page was replaced by swap: retry */ folio_unlock(folio); - index--; + index = indices[i]; break; } VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); truncate_inode_folio(mapping, folio); } - index = folio->index + folio_nr_pages(folio) - 1; folio_unlock(folio); } folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); - index++; } spin_lock_irq(&info->lock); diff --git a/mm/truncate.c b/mm/truncate.c index b6065a494c71..c7bfd247a651 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -400,7 +400,7 @@ void truncate_inode_pages_range(struct address_space *mapping, index = start; while (index < end) { cond_resched(); - if (!find_get_entries(mapping, index, end - 1, &fbatch, + if (!find_get_entries(mapping, &index, end - 1, &fbatch, indices)) { /* If all gone from start onwards, we're done */ if (index == start) @@ -414,21 +414,18 @@ void truncate_inode_pages_range(struct address_space *mapping, struct folio *folio = fbatch.folios[i]; /* We rely upon deletion not changing page->index */ - index = indices[i]; if (xa_is_value(folio)) continue; folio_lock(folio); - VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); + VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); folio_wait_writeback(folio); truncate_inode_folio(mapping, folio); folio_unlock(folio); - index = folio_index(folio) + folio_nr_pages(folio) - 1; } truncate_folio_batch_exceptionals(mapping, &fbatch, indices); folio_batch_release(&fbatch); - index++; } } EXPORT_SYMBOL(truncate_inode_pages_range); @@ -636,16 +633,15 @@ int invalidate_inode_pages2_range(struct address_space *mapping, folio_batch_init(&fbatch); index = start; - while (find_get_entries(mapping, index, end, &fbatch, indices)) { + while (find_get_entries(mapping, &index, end, &fbatch, indices)) { for (i = 0; i < folio_batch_count(&fbatch); i++) { struct folio *folio = fbatch.folios[i]; /* We rely upon deletion not changing folio->index */ - index = indices[i]; if (xa_is_value(folio)) { if (!invalidate_exceptional_entry2(mapping, - index, folio)) + indices[i], folio)) ret = -EBUSY; continue; } @@ -655,13 +651,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping, * If folio is mapped, before taking its lock, * zap the rest of the file in one hit. */ - unmap_mapping_pages(mapping, index, - (1 + end - index), false); + unmap_mapping_pages(mapping, indices[i], + (1 + end - indices[i]), false); did_range_unmap = 1; } folio_lock(folio); - VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio); + VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); if (folio->mapping != mapping) { folio_unlock(folio); continue; @@ -684,7 +680,6 @@ int invalidate_inode_pages2_range(struct address_space *mapping, folio_batch_remove_exceptionals(&fbatch); folio_batch_release(&fbatch); cond_resched(); - index++; } /* * For DAX we invalidate page tables after invalidating page cache. We -- cgit v1.2.3 From 6e1ca48d0669b0f5efcbaa051b23cd8e651a1614 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Tue, 1 Nov 2022 10:53:26 -0700 Subject: folio-compat: remove lru_cache_add() There are no longer any callers of lru_cache_add(), so remove it. This saves 79 bytes of kernel text. Also cleanup some comments such that they reference the new folio_add_lru() instead. Link: https://lkml.kernel.org/r/20221101175326.13265-6-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Reviewed-by: Matthew Wilcox (Oracle) Cc: Mike Kravetz Cc: Miklos Szeredi Signed-off-by: Andrew Morton --- include/linux/swap.h | 1 - mm/folio-compat.c | 6 ------ mm/truncate.c | 2 +- mm/workingset.c | 5 ++++- 4 files changed, 5 insertions(+), 9 deletions(-) (limited to 'mm/truncate.c') diff --git a/include/linux/swap.h b/include/linux/swap.h index b61e2007d156..0ceed49516ad 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -381,7 +381,6 @@ void lru_note_cost(struct lruvec *lruvec, bool file, void lru_note_cost_refault(struct folio *); void folio_add_lru(struct folio *); void folio_add_lru_vma(struct folio *, struct vm_area_struct *); -void lru_cache_add(struct page *); void mark_page_accessed(struct page *); void folio_mark_accessed(struct folio *); diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 86933fa8f3e1..69ed25790c68 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -76,12 +76,6 @@ bool redirty_page_for_writepage(struct writeback_control *wbc, } EXPORT_SYMBOL(redirty_page_for_writepage); -void lru_cache_add(struct page *page) -{ - folio_add_lru(page_folio(page)); -} -EXPORT_SYMBOL(lru_cache_add); - void lru_cache_add_inactive_or_unevictable(struct page *page, struct vm_area_struct *vma) { diff --git a/mm/truncate.c b/mm/truncate.c index c7bfd247a651..7b4ea4c4a46b 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -565,7 +565,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages); * refcount. We do this because invalidate_inode_pages2() needs stronger * invalidation guarantees, and cannot afford to leave pages behind because * shrink_page_list() has a temp ref on them, or because they're transiently - * sitting in the lru_cache_add() pagevecs. + * sitting in the folio_add_lru() pagevecs. */ static int invalidate_complete_folio2(struct address_space *mapping, struct folio *folio) diff --git a/mm/workingset.c b/mm/workingset.c index d2d02978588c..1a86645b7b3c 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -492,7 +492,10 @@ void workingset_refault(struct folio *folio, void *shadow) /* Folio was active prior to eviction */ if (workingset) { folio_set_workingset(folio); - /* XXX: Move to lru_cache_add() when it supports new vs putback */ + /* + * XXX: Move to folio_add_lru() when it supports new vs + * putback + */ lru_note_cost_refault(folio); mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + file, nr); } -- cgit v1.2.3