summaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_buf_item.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_buf_item.c')
-rw-r--r--fs/xfs/xfs_buf_item.c141
1 files changed, 111 insertions, 30 deletions
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index dc0be2a639cc..fb69879e4b2b 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -55,6 +55,24 @@ xfs_buf_log_format_size(
(blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
}
+static inline bool
+xfs_buf_item_straddle(
+ struct xfs_buf *bp,
+ uint offset,
+ int first_bit,
+ int nbits)
+{
+ void *first, *last;
+
+ first = xfs_buf_offset(bp, offset + (first_bit << XFS_BLF_SHIFT));
+ last = xfs_buf_offset(bp,
+ offset + ((first_bit + nbits) << XFS_BLF_SHIFT));
+
+ if (last - first != nbits * XFS_BLF_CHUNK)
+ return true;
+ return false;
+}
+
/*
* This returns the number of log iovecs needed to log the
* given buf log item.
@@ -69,24 +87,56 @@ STATIC void
xfs_buf_item_size_segment(
struct xfs_buf_log_item *bip,
struct xfs_buf_log_format *blfp,
+ uint offset,
int *nvecs,
int *nbytes)
{
struct xfs_buf *bp = bip->bli_buf;
+ int first_bit;
+ int nbits;
int next_bit;
int last_bit;
- last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
- if (last_bit == -1)
+ first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
+ if (first_bit == -1)
return;
- /*
- * initial count for a dirty buffer is 2 vectors - the format structure
- * and the first dirty region.
- */
- *nvecs += 2;
- *nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
+ (*nvecs)++;
+ *nbytes += xfs_buf_log_format_size(blfp);
+
+ do {
+ nbits = xfs_contig_bits(blfp->blf_data_map,
+ blfp->blf_map_size, first_bit);
+ ASSERT(nbits > 0);
+ /*
+ * Straddling a page is rare because we don't log contiguous
+ * chunks of unmapped buffers anywhere.
+ */
+ if (nbits > 1 &&
+ xfs_buf_item_straddle(bp, offset, first_bit, nbits))
+ goto slow_scan;
+
+ (*nvecs)++;
+ *nbytes += nbits * XFS_BLF_CHUNK;
+
+ /*
+ * This takes the bit number to start looking from and
+ * returns the next set bit from there. It returns -1
+ * if there are no more bits set or the start bit is
+ * beyond the end of the bitmap.
+ */
+ first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
+ (uint)first_bit + nbits + 1);
+ } while (first_bit != -1);
+
+ return;
+
+slow_scan:
+ /* Count the first bit we jumped out of the above loop from */
+ (*nvecs)++;
+ *nbytes += XFS_BLF_CHUNK;
+ last_bit = first_bit;
while (last_bit != -1) {
/*
* This takes the bit number to start looking from and
@@ -103,16 +153,15 @@ xfs_buf_item_size_segment(
*/
if (next_bit == -1) {
break;
- } else if (next_bit != last_bit + 1) {
- last_bit = next_bit;
- (*nvecs)++;
- } else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
- (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
- XFS_BLF_CHUNK)) {
+ } else if (next_bit != last_bit + 1 ||
+ xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
last_bit = next_bit;
+ first_bit = next_bit;
(*nvecs)++;
+ nbits = 1;
} else {
last_bit++;
+ nbits++;
}
*nbytes += XFS_BLF_CHUNK;
}
@@ -142,7 +191,10 @@ xfs_buf_item_size(
int *nbytes)
{
struct xfs_buf_log_item *bip = BUF_ITEM(lip);
+ struct xfs_buf *bp = bip->bli_buf;
int i;
+ int bytes;
+ uint offset = 0;
ASSERT(atomic_read(&bip->bli_refcount) > 0);
if (bip->bli_flags & XFS_BLI_STALE) {
@@ -174,7 +226,7 @@ xfs_buf_item_size(
}
/*
- * the vector count is based on the number of buffer vectors we have
+ * The vector count is based on the number of buffer vectors we have
* dirty bits in. This will only be greater than one when we have a
* compound buffer with more than one segment dirty. Hence for compound
* buffers we need to track which segment the dirty bits correspond to,
@@ -182,10 +234,19 @@ xfs_buf_item_size(
* count for the extra buf log format structure that will need to be
* written.
*/
+ bytes = 0;
for (i = 0; i < bip->bli_format_count; i++) {
- xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
- nvecs, nbytes);
+ xfs_buf_item_size_segment(bip, &bip->bli_formats[i], offset,
+ nvecs, &bytes);
+ offset += BBTOB(bp->b_maps[i].bm_len);
}
+
+ /*
+ * Round up the buffer size required to minimise the number of memory
+ * allocations that need to be done as this item grows when relogged by
+ * repeated modifications.
+ */
+ *nbytes = round_up(bytes, 512);
trace_xfs_buf_item_size(bip);
}
@@ -204,18 +265,6 @@ xfs_buf_item_copy_iovec(
nbits * XFS_BLF_CHUNK);
}
-static inline bool
-xfs_buf_item_straddle(
- struct xfs_buf *bp,
- uint offset,
- int next_bit,
- int last_bit)
-{
- return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
- (xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
- XFS_BLF_CHUNK);
-}
-
static void
xfs_buf_item_format_segment(
struct xfs_buf_log_item *bip,
@@ -268,6 +317,38 @@ xfs_buf_item_format_segment(
/*
* Fill in an iovec for each set of contiguous chunks.
*/
+ do {
+ ASSERT(first_bit >= 0);
+ nbits = xfs_contig_bits(blfp->blf_data_map,
+ blfp->blf_map_size, first_bit);
+ ASSERT(nbits > 0);
+
+ /*
+ * Straddling a page is rare because we don't log contiguous
+ * chunks of unmapped buffers anywhere.
+ */
+ if (nbits > 1 &&
+ xfs_buf_item_straddle(bp, offset, first_bit, nbits))
+ goto slow_scan;
+
+ xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
+ first_bit, nbits);
+ blfp->blf_size++;
+
+ /*
+ * This takes the bit number to start looking from and
+ * returns the next set bit from there. It returns -1
+ * if there are no more bits set or the start bit is
+ * beyond the end of the bitmap.
+ */
+ first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
+ (uint)first_bit + nbits + 1);
+ } while (first_bit != -1);
+
+ return;
+
+slow_scan:
+ ASSERT(bp->b_addr == NULL);
last_bit = first_bit;
nbits = 1;
for (;;) {
@@ -292,7 +373,7 @@ xfs_buf_item_format_segment(
blfp->blf_size++;
break;
} else if (next_bit != last_bit + 1 ||
- xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
+ xfs_buf_item_straddle(bp, offset, first_bit, nbits)) {
xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
first_bit, nbits);
blfp->blf_size++;