summaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2015-09-04 15:47:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-04 16:54:41 -0700
commitd950c9477d51f0cefc2ed3cf76e695d46af0d9c1 (patch)
treed6ba89db328ec09b2140a84dcdf1bd8ca5cbeaa4 /mm/rmap.c
parent72b252aed506b8f1a03f7abd29caef4cdf6a043b (diff)
downloadlinux-d950c9477d51f0cefc2ed3cf76e695d46af0d9c1.tar.bz2
mm: defer flush of writable TLB entries
If a PTE is unmapped and it's dirty then it was writable recently. Due to deferred TLB flushing, it's best to assume a writable TLB cache entry exists. With that assumption, the TLB must be flushed before any IO can start or the page is freed to avoid lost writes or data corruption. This patch defers flushing of potentially writable TLBs as long as possible. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Dave Hansen <dave.hansen@intel.com> Acked-by: Ingo Molnar <mingo@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 326d5d89e45c..0db38e7d0a72 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -626,16 +626,34 @@ void try_to_unmap_flush(void)
}
cpumask_clear(&tlb_ubc->cpumask);
tlb_ubc->flush_required = false;
+ tlb_ubc->writable = false;
put_cpu();
}
+/* Flush iff there are potentially writable TLB entries that can race with IO */
+void try_to_unmap_flush_dirty(void)
+{
+ struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
+
+ if (tlb_ubc->writable)
+ try_to_unmap_flush();
+}
+
static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
- struct page *page)
+ struct page *page, bool writable)
{
struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
cpumask_or(&tlb_ubc->cpumask, &tlb_ubc->cpumask, mm_cpumask(mm));
tlb_ubc->flush_required = true;
+
+ /*
+ * If the PTE was dirty then it's best to assume it's writable. The
+ * caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
+ * before the page is queued for IO.
+ */
+ if (writable)
+ tlb_ubc->writable = true;
}
/*
@@ -658,7 +676,7 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
}
#else
static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
- struct page *page)
+ struct page *page, bool writable)
{
}
@@ -1315,11 +1333,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
pteval = ptep_get_and_clear(mm, address, pte);
- /* Potentially writable TLBs must be flushed before IO */
- if (pte_dirty(pteval))
- flush_tlb_page(vma, address);
- else
- set_tlb_ubc_flush_pending(mm, page);
+ set_tlb_ubc_flush_pending(mm, page, pte_dirty(pteval));
} else {
pteval = ptep_clear_flush(vma, address, pte);
}