summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c32
-rw-r--r--fs/xfs/libxfs/xfs_sb.c1
-rw-r--r--fs/xfs/xfs_fsops.c9
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_mount.c192
-rw-r--r--fs/xfs/xfs_mount.h3
-rw-r--r--fs/xfs/xfs_super.c10
-rw-r--r--fs/xfs/xfs_trans.c16
8 files changed, 134 insertions, 131 deletions
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 61ec015dca16..e39c9e83670e 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -2212,9 +2212,8 @@ xfs_bmap_add_extent_delay_real(
diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
(bma->cur ? bma->cur->bc_private.b.allocated : 0));
if (diff > 0) {
- error = xfs_icsb_modify_counters(bma->ip->i_mount,
- XFS_SBS_FDBLOCKS,
- -((int64_t)diff), 0);
+ error = xfs_mod_fdblocks(bma->ip->i_mount,
+ -((int64_t)diff), false);
ASSERT(!error);
if (error)
goto done;
@@ -2265,9 +2264,8 @@ xfs_bmap_add_extent_delay_real(
temp += bma->cur->bc_private.b.allocated;
ASSERT(temp <= da_old);
if (temp < da_old)
- xfs_icsb_modify_counters(bma->ip->i_mount,
- XFS_SBS_FDBLOCKS,
- (int64_t)(da_old - temp), 0);
+ xfs_mod_fdblocks(bma->ip->i_mount,
+ (int64_t)(da_old - temp), false);
}
/* clear out the allocated field, done with it now in any case. */
@@ -2944,8 +2942,8 @@ xfs_bmap_add_extent_hole_delay(
}
if (oldlen != newlen) {
ASSERT(oldlen > newlen);
- xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS,
- (int64_t)(oldlen - newlen), 0);
+ xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
+ false);
/*
* Nothing to do for disk quota accounting here.
*/
@@ -4163,15 +4161,13 @@ xfs_bmapi_reserve_delalloc(
error = xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
-((int64_t)extsz), 0);
} else {
- error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- -((int64_t)alen), 0);
+ error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
}
if (error)
goto out_unreserve_quota;
- error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- -((int64_t)indlen), 0);
+ error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
if (error)
goto out_unreserve_blocks;
@@ -4200,7 +4196,7 @@ out_unreserve_blocks:
if (rt)
xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, extsz, 0);
else
- xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, alen, 0);
+ xfs_mod_fdblocks(mp, alen, false);
out_unreserve_quota:
if (XFS_IS_QUOTA_ON(mp))
xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
@@ -5012,10 +5008,8 @@ xfs_bmap_del_extent(
* Nothing to do for disk quota accounting here.
*/
ASSERT(da_old >= da_new);
- if (da_old > da_new) {
- xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- (int64_t)(da_old - da_new), 0);
- }
+ if (da_old > da_new)
+ xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
done:
*logflagsp = flags;
return error;
@@ -5290,8 +5284,8 @@ xfs_bunmapi(
ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_RTBLKS);
} else {
- xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- (int64_t)del.br_blockcount, 0);
+ xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount,
+ false);
(void)xfs_trans_reserve_quota_nblks(NULL,
ip, -((long)del.br_blockcount), 0,
XFS_QMOPT_RES_REGBLKS);
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index b66aeab99cfb..31a3e972f86f 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -773,6 +773,7 @@ xfs_log_sb(
mp->m_sb.sb_icount = percpu_counter_sum(&mp->m_icount);
mp->m_sb.sb_ifree = percpu_counter_sum(&mp->m_ifree);
+ mp->m_sb.sb_fdblocks = percpu_counter_sum(&mp->m_fdblocks);
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb);
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a1ca9c2b8c00..7ef25588062f 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -640,9 +640,10 @@ xfs_fs_counts(
xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
+ cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
+ XFS_ALLOC_SET_ASIDE(mp);
spin_lock(&mp->m_sb_lock);
- cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
cnt->freertx = mp->m_sb.sb_frextents;
spin_unlock(&mp->m_sb_lock);
return 0;
@@ -717,7 +718,8 @@ retry:
} else {
__int64_t free;
- free = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
+ free = percpu_counter_sum(&mp->m_fdblocks) -
+ XFS_ALLOC_SET_ASIDE(mp);
if (!free)
goto out; /* ENOSPC and fdblks_delta = 0 */
@@ -756,8 +758,7 @@ out:
* the extra reserve blocks from the reserve.....
*/
int error;
- error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- fdblks_delta, 0);
+ error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
if (error == -ENOSPC)
goto retry;
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index ccb1dd0d509e..205b948c9d20 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -461,7 +461,7 @@ xfs_iomap_prealloc_size(
alloc_blocks);
xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
- freesp = mp->m_sb.sb_fdblocks;
+ freesp = percpu_counter_read_positive(&mp->m_fdblocks);
if (freesp < mp->m_low_space[XFS_LOWSP_5_PCNT]) {
shift = 2;
if (freesp < mp->m_low_space[XFS_LOWSP_4_PCNT])
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 650e8f18cd2a..767c09a5d3ff 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1114,7 +1114,6 @@ xfs_mod_icount(
return 0;
}
-
int
xfs_mod_ifree(
struct xfs_mount *mp,
@@ -1128,6 +1127,92 @@ xfs_mod_ifree(
}
return 0;
}
+
+int
+xfs_mod_fdblocks(
+ struct xfs_mount *mp,
+ int64_t delta,
+ bool rsvd)
+{
+ int64_t lcounter;
+ long long res_used;
+ s32 batch;
+
+ if (delta > 0) {
+ /*
+ * If the reserve pool is depleted, put blocks back into it
+ * first. Most of the time the pool is full.
+ */
+ if (likely(mp->m_resblks == mp->m_resblks_avail)) {
+ percpu_counter_add(&mp->m_fdblocks, delta);
+ return 0;
+ }
+
+ spin_lock(&mp->m_sb_lock);
+ res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
+
+ if (res_used > delta) {
+ mp->m_resblks_avail += delta;
+ } else {
+ delta -= res_used;
+ mp->m_resblks_avail = mp->m_resblks;
+ percpu_counter_add(&mp->m_fdblocks, delta);
+ }
+ spin_unlock(&mp->m_sb_lock);
+ return 0;
+ }
+
+ /*
+ * Taking blocks away, need to be more accurate the closer we
+ * are to zero.
+ *
+ * batch size is set to a maximum of 1024 blocks - if we are
+ * allocating of freeing extents larger than this then we aren't
+ * going to be hammering the counter lock so a lock per update
+ * is not a problem.
+ *
+ * If the counter has a value of less than 2 * max batch size,
+ * then make everything serialise as we are real close to
+ * ENOSPC.
+ */
+#define __BATCH 1024
+ if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0)
+ batch = 1;
+ else
+ batch = __BATCH;
+#undef __BATCH
+
+ __percpu_counter_add(&mp->m_fdblocks, delta, batch);
+ if (percpu_counter_compare(&mp->m_fdblocks,
+ XFS_ALLOC_SET_ASIDE(mp)) >= 0) {
+ /* we had space! */
+ return 0;
+ }
+
+ /*
+ * lock up the sb for dipping into reserves before releasing the space
+ * that took us to ENOSPC.
+ */
+ spin_lock(&mp->m_sb_lock);
+ percpu_counter_add(&mp->m_fdblocks, -delta);
+ if (!rsvd)
+ goto fdblocks_enospc;
+
+ lcounter = (long long)mp->m_resblks_avail + delta;
+ if (lcounter >= 0) {
+ mp->m_resblks_avail = lcounter;
+ spin_unlock(&mp->m_sb_lock);
+ return 0;
+ }
+ printk_once(KERN_WARNING
+ "Filesystem \"%s\": reserve blocks depleted! "
+ "Consider increasing reserve pool size.",
+ mp->m_fsname);
+fdblocks_enospc:
+ spin_unlock(&mp->m_sb_lock);
+ return -ENOSPC;
+}
+
/*
* xfs_mod_incore_sb_unlocked() is a utility routine commonly used to apply
* a delta to a specified field in the in-core superblock. Simply
@@ -1146,7 +1231,6 @@ xfs_mod_incore_sb_unlocked(
{
int scounter; /* short counter for 32 bit fields */
long long lcounter; /* long counter for 64 bit fields */
- long long res_used, rem;
/*
* With the in-core superblock spin lock held, switch
@@ -1157,50 +1241,9 @@ xfs_mod_incore_sb_unlocked(
switch (field) {
case XFS_SBS_ICOUNT:
case XFS_SBS_IFREE:
+ case XFS_SBS_FDBLOCKS:
ASSERT(0);
return -EINVAL;
- case XFS_SBS_FDBLOCKS:
- lcounter = (long long)
- mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
- res_used = (long long)(mp->m_resblks - mp->m_resblks_avail);
-
- if (delta > 0) { /* Putting blocks back */
- if (res_used > delta) {
- mp->m_resblks_avail += delta;
- } else {
- rem = delta - res_used;
- mp->m_resblks_avail = mp->m_resblks;
- lcounter += rem;
- }
- } else { /* Taking blocks away */
- lcounter += delta;
- if (lcounter >= 0) {
- mp->m_sb.sb_fdblocks = lcounter +
- XFS_ALLOC_SET_ASIDE(mp);
- return 0;
- }
-
- /*
- * We are out of blocks, use any available reserved
- * blocks if were allowed to.
- */
- if (!rsvd)
- return -ENOSPC;
-
- lcounter = (long long)mp->m_resblks_avail + delta;
- if (lcounter >= 0) {
- mp->m_resblks_avail = lcounter;
- return 0;
- }
- printk_once(KERN_WARNING
- "Filesystem \"%s\": reserve blocks depleted! "
- "Consider increasing reserve pool size.",
- mp->m_fsname);
- return -ENOSPC;
- }
-
- mp->m_sb.sb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
- return 0;
case XFS_SBS_FREXTENTS:
lcounter = (long long)mp->m_sb.sb_frextents;
lcounter += delta;
@@ -1323,7 +1366,7 @@ xfs_mod_incore_sb(
*
* Note that this function may not be used for the superblock values that
* are tracked with the in-memory per-cpu counters - a direct call to
- * xfs_icsb_modify_counters is required for these.
+ * xfs_mod_incore_sb is required for these.
*/
int
xfs_mod_incore_sb_batch(
@@ -1508,7 +1551,6 @@ xfs_icsb_cpu_notify(
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
xfs_icsb_lock(mp);
- xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
xfs_icsb_unlock(mp);
break;
case CPU_DEAD:
@@ -1518,13 +1560,9 @@ xfs_icsb_cpu_notify(
* re-enable the counters. */
xfs_icsb_lock(mp);
spin_lock(&mp->m_sb_lock);
- xfs_icsb_disable_counter(mp, XFS_SBS_FDBLOCKS);
-
- mp->m_sb.sb_fdblocks += cntp->icsb_fdblocks;
memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
- xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
spin_unlock(&mp->m_sb_lock);
xfs_icsb_unlock(mp);
break;
@@ -1550,10 +1588,14 @@ xfs_icsb_init_counters(
if (error)
goto free_icount;
+ error = percpu_counter_init(&mp->m_fdblocks, 0, GFP_KERNEL);
+ if (error)
+ goto free_ifree;
+
mp->m_sb_cnts = alloc_percpu(xfs_icsb_cnts_t);
if (!mp->m_sb_cnts) {
error = -ENOMEM;
- goto free_ifree;
+ goto free_fdblocks;
}
for_each_online_cpu(i) {
@@ -1577,6 +1619,8 @@ xfs_icsb_init_counters(
return 0;
+free_fdblocks:
+ percpu_counter_destroy(&mp->m_fdblocks);
free_ifree:
percpu_counter_destroy(&mp->m_ifree);
free_icount:
@@ -1590,6 +1634,7 @@ xfs_icsb_reinit_counters(
{
percpu_counter_set(&mp->m_icount, mp->m_sb.sb_icount);
percpu_counter_set(&mp->m_ifree, mp->m_sb.sb_ifree);
+ percpu_counter_set(&mp->m_fdblocks, mp->m_sb.sb_fdblocks);
xfs_icsb_lock(mp);
/*
@@ -1597,7 +1642,6 @@ xfs_icsb_reinit_counters(
* initial balance kicks us off correctly
*/
mp->m_icsb_counters = -1;
- xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
xfs_icsb_unlock(mp);
}
@@ -1612,6 +1656,7 @@ xfs_icsb_destroy_counters(
percpu_counter_destroy(&mp->m_icount);
percpu_counter_destroy(&mp->m_ifree);
+ percpu_counter_destroy(&mp->m_fdblocks);
mutex_destroy(&mp->m_icsb_mutex);
}
@@ -1665,18 +1710,11 @@ xfs_icsb_count(
xfs_icsb_cnts_t *cnt,
int flags)
{
- xfs_icsb_cnts_t *cntp;
- int i;
-
memset(cnt, 0, sizeof(xfs_icsb_cnts_t));
if (!(flags & XFS_ICSB_LAZY_COUNT))
xfs_icsb_lock_all_counters(mp);
- for_each_online_cpu(i) {
- cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
- cnt->icsb_fdblocks += cntp->icsb_fdblocks;
- }
if (!(flags & XFS_ICSB_LAZY_COUNT))
xfs_icsb_unlock_all_counters(mp);
@@ -1687,7 +1725,6 @@ xfs_icsb_counter_disabled(
xfs_mount_t *mp,
xfs_sb_field_t field)
{
- ASSERT(field == XFS_SBS_FDBLOCKS);
return test_bit(field, &mp->m_icsb_counters);
}
@@ -1698,8 +1735,6 @@ xfs_icsb_disable_counter(
{
xfs_icsb_cnts_t cnt;
- ASSERT(field == XFS_SBS_FDBLOCKS);
-
/*
* If we are already disabled, then there is nothing to do
* here. We check before locking all the counters to avoid
@@ -1717,9 +1752,6 @@ xfs_icsb_disable_counter(
xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
switch(field) {
- case XFS_SBS_FDBLOCKS:
- mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
- break;
default:
BUG();
}
@@ -1735,18 +1767,11 @@ xfs_icsb_enable_counter(
uint64_t count,
uint64_t resid)
{
- xfs_icsb_cnts_t *cntp;
int i;
- ASSERT(field == XFS_SBS_FDBLOCKS);
-
xfs_icsb_lock_all_counters(mp);
for_each_online_cpu(i) {
- cntp = per_cpu_ptr(mp->m_sb_cnts, i);
switch (field) {
- case XFS_SBS_FDBLOCKS:
- cntp->icsb_fdblocks = count + resid;
- break;
default:
BUG();
break;
@@ -1765,9 +1790,6 @@ xfs_icsb_sync_counters_locked(
xfs_icsb_cnts_t cnt;
xfs_icsb_count(mp, &cnt, flags);
-
- if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
- mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
}
/*
@@ -1809,20 +1831,12 @@ xfs_icsb_balance_counter_locked(
int min_per_cpu)
{
uint64_t count, resid;
- int weight = num_online_cpus();
- uint64_t min = (uint64_t)min_per_cpu;
/* disable counter and sync counter */
xfs_icsb_disable_counter(mp, field);
/* update counters - first CPU gets residual*/
switch (field) {
- case XFS_SBS_FDBLOCKS:
- count = mp->m_sb.sb_fdblocks;
- resid = do_div(count, weight);
- if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
- return;
- break;
default:
BUG();
count = resid = 0; /* quiet, gcc */
@@ -1851,7 +1865,6 @@ xfs_icsb_modify_counters(
int rsvd)
{
xfs_icsb_cnts_t *icsbp;
- long long lcounter; /* long counter for 64 bit fields */
int ret = 0;
might_sleep();
@@ -1871,18 +1884,9 @@ again:
}
switch (field) {
- case XFS_SBS_FDBLOCKS:
- BUG_ON((mp->m_resblks - mp->m_resblks_avail) != 0);
-
- lcounter = icsbp->icsb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
- lcounter += delta;
- if (unlikely(lcounter < 0))
- goto balance_counter;
- icsbp->icsb_fdblocks = lcounter + XFS_ALLOC_SET_ASIDE(mp);
- break;
default:
BUG();
- break;
+ goto balance_counter; /* be still, gcc */
}
xfs_icsb_unlock_cntr(icsbp);
preempt_enable();
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 7ce997d43d81..84b745fbc787 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -85,6 +85,7 @@ typedef struct xfs_mount {
spinlock_t m_sb_lock; /* sb counter lock */
struct percpu_counter m_icount; /* allocated inodes counter */
struct percpu_counter m_ifree; /* free inodes counter */
+ struct percpu_counter m_fdblocks; /* free block counter */
struct xfs_buf *m_sb_bp; /* buffer for superblock */
char *m_fsname; /* filesystem name */
@@ -393,6 +394,8 @@ extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
uint, int);
extern int xfs_mod_icount(struct xfs_mount *mp, int64_t delta);
extern int xfs_mod_ifree(struct xfs_mount *mp, int64_t delta);
+extern int xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
+ bool reserved);
extern int xfs_mount_log_sb(xfs_mount_t *);
extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
extern int xfs_readsb(xfs_mount_t *, int);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 049147776ee1..9ec75074026d 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1087,6 +1087,7 @@ xfs_fs_statfs(
__uint64_t fakeinos, id;
__uint64_t icount;
__uint64_t ifree;
+ __uint64_t fdblocks;
xfs_extlen_t lsize;
__int64_t ffree;
@@ -1100,13 +1101,17 @@ xfs_fs_statfs(
xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
icount = percpu_counter_sum(&mp->m_icount);
ifree = percpu_counter_sum(&mp->m_ifree);
+ fdblocks = percpu_counter_sum(&mp->m_fdblocks);
spin_lock(&mp->m_sb_lock);
statp->f_bsize = sbp->sb_blocksize;
lsize = sbp->sb_logstart ? sbp->sb_logblocks : 0;
statp->f_blocks = sbp->sb_dblocks - lsize;
- statp->f_bfree = statp->f_bavail =
- sbp->sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
+ spin_unlock(&mp->m_sb_lock);
+
+ statp->f_bfree = fdblocks - XFS_ALLOC_SET_ASIDE(mp);
+ statp->f_bavail = statp->f_bfree;
+
fakeinos = statp->f_bfree << sbp->sb_inopblog;
statp->f_files = MIN(icount + fakeinos, (__uint64_t)XFS_MAXINUMBER);
if (mp->m_maxicount)
@@ -1123,7 +1128,6 @@ xfs_fs_statfs(
ffree = statp->f_files - (icount - ifree);
statp->f_ffree = max_t(__int64_t, ffree, 0);
- spin_unlock(&mp->m_sb_lock);
if ((ip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
((mp->m_qflags & (XFS_PQUOTA_ACCT|XFS_PQUOTA_ENFD))) ==
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 68680ce67547..e99f5e552c64 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -173,7 +173,7 @@ xfs_trans_reserve(
uint rtextents)
{
int error = 0;
- int rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
+ bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
/* Mark this thread as being in a transaction */
current_set_flags_nested(&tp->t_pflags, PF_FSTRANS);
@@ -184,8 +184,7 @@ xfs_trans_reserve(
* fail if the count would go below zero.
*/
if (blocks > 0) {
- error = xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
- -((int64_t)blocks), rsvd);
+ error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
if (error != 0) {
current_restore_flags_nested(&tp->t_pflags, PF_FSTRANS);
return -ENOSPC;
@@ -268,8 +267,7 @@ undo_log:
undo_blocks:
if (blocks > 0) {
- xfs_icsb_modify_counters(tp->t_mountp, XFS_SBS_FDBLOCKS,
- (int64_t)blocks, rsvd);
+ xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
tp->t_blk_res = 0;
}
@@ -516,14 +514,13 @@ xfs_trans_unreserve_and_mod_sb(
xfs_mount_t *mp = tp->t_mountp;
/* REFERENCED */
int error;
- int rsvd;
+ bool rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
int64_t blkdelta = 0;
int64_t rtxdelta = 0;
int64_t idelta = 0;
int64_t ifreedelta = 0;
msbp = msb;
- rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
/* calculate deltas */
if (tp->t_blk_res > 0)
@@ -547,8 +544,7 @@ xfs_trans_unreserve_and_mod_sb(
/* apply the per-cpu counters */
if (blkdelta) {
- error = xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS,
- blkdelta, rsvd);
+ error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
if (error)
goto out;
}
@@ -635,7 +631,7 @@ out_undo_icount:
xfs_mod_icount(mp, -idelta);
out_undo_fdblocks:
if (blkdelta)
- xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, -blkdelta, rsvd);
+ xfs_mod_fdblocks(mp, -blkdelta, rsvd);
out:
ASSERT(error == 0);
return;