summaryrefslogtreecommitdiffstats
path: root/mm/truncate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/truncate.c')
-rw-r--r--mm/truncate.c91
1 files changed, 11 insertions, 80 deletions
diff --git a/mm/truncate.c b/mm/truncate.c
index 8aa4907e06e0..de7f4f47f780 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -326,51 +326,19 @@ void truncate_inode_pages_range(struct address_space *mapping,
pagevec_init(&pvec);
index = start;
- while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE),
- indices)) {
- /*
- * Pagevec array has exceptional entries and we may also fail
- * to lock some pages. So we store pages that can be deleted
- * in a new pagevec.
- */
- struct pagevec locked_pvec;
-
- pagevec_init(&locked_pvec);
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
-
- /* We rely upon deletion not changing page->index */
- index = indices[i];
- if (index >= end)
- break;
-
- if (xa_is_value(page))
- continue;
-
- if (!trylock_page(page))
- continue;
- WARN_ON(page_to_index(page) != index);
- if (PageWriteback(page)) {
- unlock_page(page);
- continue;
- }
- if (page->mapping != mapping) {
- unlock_page(page);
- continue;
- }
- pagevec_add(&locked_pvec, page);
- }
- for (i = 0; i < pagevec_count(&locked_pvec); i++)
- truncate_cleanup_page(mapping, locked_pvec.pages[i]);
- delete_from_page_cache_batch(mapping, &locked_pvec);
- for (i = 0; i < pagevec_count(&locked_pvec); i++)
- unlock_page(locked_pvec.pages[i]);
+ while (index < end && find_lock_entries(mapping, index, end - 1,
+ &pvec, indices)) {
+ index = indices[pagevec_count(&pvec) - 1] + 1;
truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
+ for (i = 0; i < pagevec_count(&pvec); i++)
+ truncate_cleanup_page(mapping, pvec.pages[i]);
+ delete_from_page_cache_batch(mapping, &pvec);
+ for (i = 0; i < pagevec_count(&pvec); i++)
+ unlock_page(pvec.pages[i]);
pagevec_release(&pvec);
cond_resched();
- index++;
}
+
if (partial_start) {
struct page *page = find_lock_page(mapping, start - 1);
if (page) {
@@ -539,9 +507,7 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
int i;
pagevec_init(&pvec);
- while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
- min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
- indices)) {
+ while (find_lock_entries(mapping, index, end, &pvec, indices)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
@@ -555,39 +521,7 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
page);
continue;
}
-
- if (!trylock_page(page))
- continue;
-
- WARN_ON(page_to_index(page) != index);
-
- /* Middle of THP: skip */
- if (PageTransTail(page)) {
- unlock_page(page);
- continue;
- } else if (PageTransHuge(page)) {
- index += HPAGE_PMD_NR - 1;
- i += HPAGE_PMD_NR - 1;
- /*
- * 'end' is in the middle of THP. Don't
- * invalidate the page as the part outside of
- * 'end' could be still useful.
- */
- if (index > end) {
- unlock_page(page);
- continue;
- }
-
- /* Take a pin outside pagevec */
- get_page(page);
-
- /*
- * Drop extra pins before trying to invalidate
- * the huge page.
- */
- pagevec_remove_exceptionals(&pvec);
- pagevec_release(&pvec);
- }
+ index += thp_nr_pages(page) - 1;
ret = invalidate_inode_page(page);
unlock_page(page);
@@ -601,9 +535,6 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
if (nr_pagevec)
(*nr_pagevec)++;
}
-
- if (PageTransHuge(page))
- put_page(page);
count += ret;
}
pagevec_remove_exceptionals(&pvec);