summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-06-28 19:36:15 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-06-29 10:53:48 -0700
commit6e1cae881a0646f31fe2bda90297d820da1137eb (patch)
treeffc0c8f309aedb6033165dd2d60bcaf6f4c0c778 /mm/page-writeback.c
parent0af573780b0b13fceb7fabd49dc1b073cee9a507 (diff)
downloadlinux-6e1cae881a0646f31fe2bda90297d820da1137eb.tar.bz2
mm/writeback: move __set_page_dirty() to core mm
Patch series "Further set_page_dirty cleanups". Prompted by Christoph's recent patches, here are some more patches to improve the state of set_page_dirty(). They're all from the folio tree, so they've been tested to a certain extent. This patch (of 6): Nothing in __set_page_dirty() is specific to buffer_head, so move it to mm/page-writeback.c. That removes the only caller of account_page_dirtied() outside of page-writeback.c, so make it static. Link: https://lkml.kernel.org/r/20210615162342.1669332-1-willy@infradead.org Link: https://lkml.kernel.org/r/20210615162342.1669332-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Jan Kara <jack@suse.cz> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c27
1 files changed, 26 insertions, 1 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 21f4b5972311..1345882c428b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2425,7 +2425,8 @@ int __set_page_dirty_no_writeback(struct page *page)
*
* NOTE: This relies on being atomic wrt interrupts.
*/
-void account_page_dirtied(struct page *page, struct address_space *mapping)
+static void account_page_dirtied(struct page *page,
+ struct address_space *mapping)
{
struct inode *inode = mapping->host;
@@ -2467,6 +2468,30 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
}
/*
+ * Mark the page dirty, and set it dirty in the page cache, and mark the inode
+ * dirty.
+ *
+ * If warn is true, then emit a warning if the page is not uptodate and has
+ * not been truncated.
+ *
+ * The caller must hold lock_page_memcg().
+ */
+void __set_page_dirty(struct page *page, struct address_space *mapping,
+ int warn)
+{
+ unsigned long flags;
+
+ xa_lock_irqsave(&mapping->i_pages, flags);
+ if (page->mapping) { /* Race with truncate? */
+ WARN_ON_ONCE(warn && !PageUptodate(page));
+ account_page_dirtied(page, mapping);
+ __xa_set_mark(&mapping->i_pages, page_index(page),
+ PAGECACHE_TAG_DIRTY);
+ }
+ xa_unlock_irqrestore(&mapping->i_pages, flags);
+}
+
+/*
* For address_spaces which do not use buffers. Just tag the page as dirty in
* the xarray.
*