summaryrefslogtreecommitdiffstats
path: root/fs/jbd2
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2015-06-08 12:46:37 -0400
committerTheodore Ts'o <tytso@mit.edu>2015-06-08 12:46:37 -0400
commitde92c8caf16ca84926fa31b7a5590c0fb9c0d5ca (patch)
tree3fda30d72edf93965320abb2d3a689b04fbb9f46 /fs/jbd2
parent8b00f400eedf91d074f831077003c0d4d9147377 (diff)
downloadlinux-de92c8caf16ca84926fa31b7a5590c0fb9c0d5ca.tar.bz2
jbd2: speedup jbd2_journal_get_[write|undo]_access()
jbd2_journal_get_write_access() and jbd2_journal_get_create_access() are frequently called for buffers that are already part of the running transaction - most frequently it is the case for bitmaps, inode table blocks, and superblock. Since in such cases we have nothing to do, it is unfortunate we still grab reference to journal head, lock the bh, lock bh_state only to find out there's nothing to do. Improving this is a bit subtle though since until we find out journal head is attached to the running transaction, it can disappear from under us because checkpointing / commit decided it's no longer needed. We deal with this by protecting journal_head slab with RCU. We still have to be careful about journal head being freed & reallocated within slab and about exposing journal head in consistent state (in particular b_modified and b_frozen_data must be in correct state before we allow user to touch the buffer). Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'fs/jbd2')
-rw-r--r--fs/jbd2/journal.c2
-rw-r--r--fs/jbd2/transaction.c76
2 files changed, 73 insertions, 5 deletions
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 0bc333b4a594..303ccd953e95 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -2321,7 +2321,7 @@ static int jbd2_journal_init_journal_head_cache(void)
jbd2_journal_head_cache = kmem_cache_create("jbd2_journal_head",
sizeof(struct journal_head),
0, /* offset */
- SLAB_TEMPORARY, /* flags */
+ SLAB_TEMPORARY | SLAB_DESTROY_BY_RCU,
NULL); /* ctor */
retval = 0;
if (!jbd2_journal_head_cache) {
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 1bbcf86499c9..f3d06174b051 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -892,6 +892,12 @@ repeat:
JBUFFER_TRACE(jh, "no transaction");
J_ASSERT_JH(jh, !jh->b_next_transaction);
JBUFFER_TRACE(jh, "file as BJ_Reserved");
+ /*
+ * Make sure all stores to jh (b_modified, b_frozen_data) are
+ * visible before attaching it to the running transaction.
+ * Paired with barrier in jbd2_write_access_granted()
+ */
+ smp_wmb();
spin_lock(&journal->j_list_lock);
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
spin_unlock(&journal->j_list_lock);
@@ -904,8 +910,7 @@ repeat:
if (jh->b_frozen_data) {
JBUFFER_TRACE(jh, "has frozen data");
J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
- jh->b_next_transaction = transaction;
- goto done;
+ goto attach_next;
}
JBUFFER_TRACE(jh, "owned by older transaction");
@@ -959,6 +964,13 @@ repeat:
frozen_buffer = NULL;
jbd2_freeze_jh_data(jh);
}
+attach_next:
+ /*
+ * Make sure all stores to jh (b_modified, b_frozen_data) are visible
+ * before attaching it to the running transaction. Paired with barrier
+ * in jbd2_write_access_granted()
+ */
+ smp_wmb();
jh->b_next_transaction = transaction;
done:
@@ -978,6 +990,55 @@ out:
return error;
}
+/* Fast check whether buffer is already attached to the required transaction */
+static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh)
+{
+ struct journal_head *jh;
+ bool ret = false;
+
+ /* Dirty buffers require special handling... */
+ if (buffer_dirty(bh))
+ return false;
+
+ /*
+ * RCU protects us from dereferencing freed pages. So the checks we do
+ * are guaranteed not to oops. However the jh slab object can get freed
+ * & reallocated while we work with it. So we have to be careful. When
+ * we see jh attached to the running transaction, we know it must stay
+ * so until the transaction is committed. Thus jh won't be freed and
+ * will be attached to the same bh while we run. However it can
+ * happen jh gets freed, reallocated, and attached to the transaction
+ * just after we get pointer to it from bh. So we have to be careful
+ * and recheck jh still belongs to our bh before we return success.
+ */
+ rcu_read_lock();
+ if (!buffer_jbd(bh))
+ goto out;
+ /* This should be bh2jh() but that doesn't work with inline functions */
+ jh = READ_ONCE(bh->b_private);
+ if (!jh)
+ goto out;
+ if (jh->b_transaction != handle->h_transaction &&
+ jh->b_next_transaction != handle->h_transaction)
+ goto out;
+ /*
+ * There are two reasons for the barrier here:
+ * 1) Make sure to fetch b_bh after we did previous checks so that we
+ * detect when jh went through free, realloc, attach to transaction
+ * while we were checking. Paired with implicit barrier in that path.
+ * 2) So that access to bh done after jbd2_write_access_granted()
+ * doesn't get reordered and see inconsistent state of concurrent
+ * do_get_write_access().
+ */
+ smp_mb();
+ if (unlikely(jh->b_bh != bh))
+ goto out;
+ ret = true;
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
/**
* int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
* @handle: transaction to add buffer modifications to
@@ -991,9 +1052,13 @@ out:
int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
{
- struct journal_head *jh = jbd2_journal_add_journal_head(bh);
+ struct journal_head *jh;
int rc;
+ if (jbd2_write_access_granted(handle, bh))
+ return 0;
+
+ jh = jbd2_journal_add_journal_head(bh);
/* We do not want to get caught playing with fields which the
* log thread also manipulates. Make sure that the buffer
* completes any outstanding IO before proceeding. */
@@ -1123,11 +1188,14 @@ out:
int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
{
int err;
- struct journal_head *jh = jbd2_journal_add_journal_head(bh);
+ struct journal_head *jh;
char *committed_data = NULL;
JBUFFER_TRACE(jh, "entry");
+ if (jbd2_write_access_granted(handle, bh))
+ return 0;
+ jh = jbd2_journal_add_journal_head(bh);
/*
* Do this first --- it can drop the journal lock, so we want to
* make sure that obtaining the committed_data is done