summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/xfs_iomap.c83
1 files changed, 35 insertions, 48 deletions
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index e74a8c2c94ce..b9a8c3798e08 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -352,22 +352,10 @@ xfs_quota_calc_throttle(
}
/*
- * If we are doing a write at the end of the file and there are no allocations
- * past this one, then extend the allocation out to the file system's write
- * iosize.
- *
* If we don't have a user specified preallocation size, dynamically increase
* the preallocation size as the size of the file grows. Cap the maximum size
* at a single extent or less if the filesystem is near full. The closer the
- * filesystem is to full, the smaller the maximum prealocation.
- *
- * As an exception we don't do any preallocation at all if the file is smaller
- * than the minimum preallocation and we are using the default dynamic
- * preallocation scheme, as it is likely this is the only write to the file that
- * is going to be done.
- *
- * We clean up any extra space left over when the file is closed in
- * xfs_inactive().
+ * filesystem is to being full, the smaller the maximum preallocation.
*/
STATIC xfs_fsblock_t
xfs_iomap_prealloc_size(
@@ -389,41 +377,28 @@ xfs_iomap_prealloc_size(
int shift = 0;
int qshift = 0;
- if (offset + count <= XFS_ISIZE(ip))
- return 0;
-
- if (!(mp->m_flags & XFS_MOUNT_ALLOCSIZE) &&
- (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)))
+ /*
+ * As an exception we don't do any preallocation at all if the file is
+ * smaller than the minimum preallocation and we are using the default
+ * dynamic preallocation scheme, as it is likely this is the only write
+ * to the file that is going to be done.
+ */
+ if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks))
return 0;
/*
- * If an explicit allocsize is set, the file is small, or we
- * are writing behind a hole, then use the minimum prealloc:
+ * Use the minimum preallocation size for small files or if we are
+ * writing right after a hole.
*/
- if ((mp->m_flags & XFS_MOUNT_ALLOCSIZE) ||
- XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
+ if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) ||
!xfs_iext_prev_extent(ifp, &ncur, &prev) ||
prev.br_startoff + prev.br_blockcount < offset_fsb)
return mp->m_allocsize_blocks;
/*
- * Determine the initial size of the preallocation. We are beyond the
- * current EOF here, but we need to take into account whether this is
- * a sparse write or an extending write when determining the
- * preallocation size. Hence we need to look up the extent that ends
- * at the current write offset and use the result to determine the
- * preallocation size.
- *
- * If the extent is a hole, then preallocation is essentially disabled.
- * Otherwise we take the size of the preceding data extents as the basis
- * for the preallocation size. Note that we don't care if the previous
- * extents are written or not.
- *
- * If the size of the extents is greater than half the maximum extent
- * length, then use the current offset as the basis. This ensures that
- * for large files the preallocation size always extends to MAXEXTLEN
- * rather than falling short due to things like stripe unit/width
- * alignment of real extents.
+ * Take the size of the preceding data extents as the basis for the
+ * preallocation size. Note that we don't care if the previous extents
+ * are written or not.
*/
plen = prev.br_blockcount;
while (xfs_iext_prev_extent(ifp, &ncur, &got)) {
@@ -435,19 +410,25 @@ xfs_iomap_prealloc_size(
plen += got.br_blockcount;
prev = got;
}
+
+ /*
+ * If the size of the extents is greater than half the maximum extent
+ * length, then use the current offset as the basis. This ensures that
+ * for large files the preallocation size always extends to MAXEXTLEN
+ * rather than falling short due to things like stripe unit/width
+ * alignment of real extents.
+ */
alloc_blocks = plen * 2;
if (alloc_blocks > MAXEXTLEN)
alloc_blocks = XFS_B_TO_FSB(mp, offset);
- if (!alloc_blocks)
- goto check_writeio;
qblocks = alloc_blocks;
/*
* MAXEXTLEN is not a power of two value but we round the prealloc down
* to the nearest power of two value after throttling. To prevent the
- * round down from unconditionally reducing the maximum supported prealloc
- * size, we round up first, apply appropriate throttling, round down and
- * cap the value to MAXEXTLEN.
+ * round down from unconditionally reducing the maximum supported
+ * prealloc size, we round up first, apply appropriate throttling,
+ * round down and cap the value to MAXEXTLEN.
*/
alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(MAXEXTLEN),
alloc_blocks);
@@ -508,7 +489,6 @@ xfs_iomap_prealloc_size(
*/
while (alloc_blocks && alloc_blocks >= freesp)
alloc_blocks >>= 4;
-check_writeio:
if (alloc_blocks < mp->m_allocsize_blocks)
alloc_blocks = mp->m_allocsize_blocks;
trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift,
@@ -975,9 +955,16 @@ xfs_buffered_write_iomap_begin(
if (error)
goto out_unlock;
- if (eof) {
- prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, offset,
- count, &icur);
+ if (eof && offset + count > XFS_ISIZE(ip)) {
+ /*
+ * Determine the initial size of the preallocation.
+ * We clean up any extra preallocation when the file is closed.
+ */
+ if (mp->m_flags & XFS_MOUNT_ALLOCSIZE)
+ prealloc_blocks = mp->m_allocsize_blocks;
+ else
+ prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork,
+ offset, count, &icur);
if (prealloc_blocks) {
xfs_extlen_t align;
xfs_off_t end_offset;