summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/meta_io.c
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2009-04-20 08:16:26 +0100
committerSteven Whitehouse <swhiteho@redhat.com>2009-05-11 12:36:43 +0100
commit4a0f9a321a113392b448e477018311d14fba2b34 (patch)
tree7092fa29a94ef5aaada6d56a8d761615280c5f71 /fs/gfs2/meta_io.c
parentc969f58ca43fc403c75f5d3da4cf1e21de7afaa0 (diff)
downloadlinux-4a0f9a321a113392b448e477018311d14fba2b34.tar.bz2
GFS2: Optimise writepage for metadata
This adds a GFS2 specific writepage for metadata, rather than continuing to use the VFS function. As a result we now tag all our metadata I/O with the correct flag so that blktraces will now be less confusing. Also, the generic function was checking for a number of corner cases which cannot happen on the metadata address spaces so that this should be faster too. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2/meta_io.c')
-rw-r--r--fs/gfs2/meta_io.c66
1 files changed, 57 insertions, 9 deletions
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 75b2aec06f85..78a5f4312667 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -33,17 +33,65 @@
#include "util.h"
#include "ops_address.h"
-static int aspace_get_block(struct inode *inode, sector_t lblock,
- struct buffer_head *bh_result, int create)
+static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
{
- gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
- return -EOPNOTSUPP;
-}
+ int err;
+ struct buffer_head *bh, *head;
+ int nr_underway = 0;
+ int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ?
+ WRITE_SYNC_PLUG : WRITE));
+
+ BUG_ON(!PageLocked(page));
+ BUG_ON(!page_has_buffers(page));
+
+ head = page_buffers(page);
+ bh = head;
+
+ do {
+ if (!buffer_mapped(bh))
+ continue;
+ /*
+ * If it's a fully non-blocking write attempt and we cannot
+ * lock the buffer then redirty the page. Note that this can
+ * potentially cause a busy-wait loop from pdflush and kswapd
+ * activity, but those code paths have their own higher-level
+ * throttling.
+ */
+ if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
+ lock_buffer(bh);
+ } else if (!trylock_buffer(bh)) {
+ redirty_page_for_writepage(wbc, page);
+ continue;
+ }
+ if (test_clear_buffer_dirty(bh)) {
+ mark_buffer_async_write(bh);
+ } else {
+ unlock_buffer(bh);
+ }
+ } while ((bh = bh->b_this_page) != head);
+
+ /*
+ * The page and its buffers are protected by PageWriteback(), so we can
+ * drop the bh refcounts early.
+ */
+ BUG_ON(PageWriteback(page));
+ set_page_writeback(page);
+
+ do {
+ struct buffer_head *next = bh->b_this_page;
+ if (buffer_async_write(bh)) {
+ submit_bh(write_op, bh);
+ nr_underway++;
+ }
+ bh = next;
+ } while (bh != head);
+ unlock_page(page);
-static int gfs2_aspace_writepage(struct page *page,
- struct writeback_control *wbc)
-{
- return block_write_full_page(page, aspace_get_block, wbc);
+ err = 0;
+ if (nr_underway == 0)
+ end_page_writeback(page);
+
+ return err;
}
static const struct address_space_operations aspace_aops = {