summaryrefslogtreecommitdiffstats
path: root/fs/xfs/scrub
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/scrub')
-rw-r--r--fs/xfs/scrub/bmap.c22
-rw-r--r--fs/xfs/scrub/dabtree.c4
-rw-r--r--fs/xfs/scrub/quota.c83
-rw-r--r--fs/xfs/scrub/repair.c10
-rw-r--r--fs/xfs/scrub/repair.h4
-rw-r--r--fs/xfs/scrub/rtbitmap.c47
6 files changed, 107 insertions, 63 deletions
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 7badd6dfe544..955302e7cdde 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -45,9 +45,27 @@ xchk_setup_inode_bmap(
*/
if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
+ struct address_space *mapping = VFS_I(sc->ip)->i_mapping;
+
inode_dio_wait(VFS_I(sc->ip));
- error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
- if (error)
+
+ /*
+ * Try to flush all incore state to disk before we examine the
+ * space mappings for the data fork. Leave accumulated errors
+ * in the mapping for the writer threads to consume.
+ *
+ * On ENOSPC or EIO writeback errors, we continue into the
+ * extent mapping checks because write failures do not
+ * necessarily imply anything about the correctness of the file
+ * metadata. The metadata and the file data could be on
+ * completely separate devices; a media failure might only
+ * affect a subset of the disk, etc. We can handle delalloc
+ * extents in the scrubber, so leaving them in memory is fine.
+ */
+ error = filemap_fdatawrite(mapping);
+ if (!error)
+ error = filemap_fdatawait_keep_errors(mapping);
+ if (error && (error != -ENOSPC && error != -EIO))
goto out;
}
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index 44b15015021f..e56786f0a13c 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -476,9 +476,7 @@ xchk_da_btree(
ds.dargs.whichfork = whichfork;
ds.dargs.trans = sc->tp;
ds.dargs.op_flags = XFS_DA_OP_OKNOENT;
- ds.state = xfs_da_state_alloc();
- ds.state->args = &ds.dargs;
- ds.state->mp = mp;
+ ds.state = xfs_da_state_alloc(&ds.dargs);
ds.sc = sc;
ds.private = private;
if (whichfork == XFS_ATTR_FORK) {
diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c
index 905a34558361..e34ca20ae8e4 100644
--- a/fs/xfs/scrub/quota.c
+++ b/fs/xfs/scrub/quota.c
@@ -18,17 +18,17 @@
#include "scrub/common.h"
/* Convert a scrub type code to a DQ flag, or return 0 if error. */
-static inline uint
+static inline xfs_dqtype_t
xchk_quota_to_dqtype(
struct xfs_scrub *sc)
{
switch (sc->sm->sm_type) {
case XFS_SCRUB_TYPE_UQUOTA:
- return XFS_DQ_USER;
+ return XFS_DQTYPE_USER;
case XFS_SCRUB_TYPE_GQUOTA:
- return XFS_DQ_GROUP;
+ return XFS_DQTYPE_GROUP;
case XFS_SCRUB_TYPE_PQUOTA:
- return XFS_DQ_PROJ;
+ return XFS_DQTYPE_PROJ;
default:
return 0;
}
@@ -40,7 +40,7 @@ xchk_setup_quota(
struct xfs_scrub *sc,
struct xfs_inode *ip)
{
- uint dqtype;
+ xfs_dqtype_t dqtype;
int error;
if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp))
@@ -73,26 +73,15 @@ struct xchk_quota_info {
STATIC int
xchk_quota_item(
struct xfs_dquot *dq,
- uint dqtype,
+ xfs_dqtype_t dqtype,
void *priv)
{
struct xchk_quota_info *sqi = priv;
struct xfs_scrub *sc = sqi->sc;
struct xfs_mount *mp = sc->mp;
- struct xfs_disk_dquot *d = &dq->q_core;
struct xfs_quotainfo *qi = mp->m_quotainfo;
xfs_fileoff_t offset;
- unsigned long long bsoft;
- unsigned long long isoft;
- unsigned long long rsoft;
- unsigned long long bhard;
- unsigned long long ihard;
- unsigned long long rhard;
- unsigned long long bcount;
- unsigned long long icount;
- unsigned long long rcount;
xfs_ino_t fs_icount;
- xfs_dqid_t id = be32_to_cpu(d->d_id);
int error = 0;
if (xchk_should_terminate(sc, &error))
@@ -102,27 +91,11 @@ xchk_quota_item(
* Except for the root dquot, the actual dquot we got must either have
* the same or higher id as we saw before.
*/
- offset = id / qi->qi_dqperchunk;
- if (id && id <= sqi->last_id)
+ offset = dq->q_id / qi->qi_dqperchunk;
+ if (dq->q_id && dq->q_id <= sqi->last_id)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
- sqi->last_id = id;
-
- /* Did we get the dquot type we wanted? */
- if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
- xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
-
- if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
- xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
-
- /* Check the limits. */
- bhard = be64_to_cpu(d->d_blk_hardlimit);
- ihard = be64_to_cpu(d->d_ino_hardlimit);
- rhard = be64_to_cpu(d->d_rtb_hardlimit);
-
- bsoft = be64_to_cpu(d->d_blk_softlimit);
- isoft = be64_to_cpu(d->d_ino_softlimit);
- rsoft = be64_to_cpu(d->d_rtb_softlimit);
+ sqi->last_id = dq->q_id;
/*
* Warn if the hard limits are larger than the fs.
@@ -132,25 +105,22 @@ xchk_quota_item(
* Complain about corruption if the soft limit is greater than
* the hard limit.
*/
- if (bhard > mp->m_sb.sb_dblocks)
+ if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
- if (bsoft > bhard)
+ if (dq->q_blk.softlimit > dq->q_blk.hardlimit)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
- if (ihard > M_IGEO(mp)->maxicount)
+ if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
- if (isoft > ihard)
+ if (dq->q_ino.softlimit > dq->q_ino.hardlimit)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
- if (rhard > mp->m_sb.sb_rblocks)
+ if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
- if (rsoft > rhard)
+ if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/* Check the resource counts. */
- bcount = be64_to_cpu(d->d_bcount);
- icount = be64_to_cpu(d->d_icount);
- rcount = be64_to_cpu(d->d_rtbcount);
fs_icount = percpu_counter_sum(&mp->m_icount);
/*
@@ -159,15 +129,15 @@ xchk_quota_item(
* if there are no quota limits.
*/
if (xfs_sb_version_hasreflink(&mp->m_sb)) {
- if (mp->m_sb.sb_dblocks < bcount)
+ if (mp->m_sb.sb_dblocks < dq->q_blk.count)
xchk_fblock_set_warning(sc, XFS_DATA_FORK,
offset);
} else {
- if (mp->m_sb.sb_dblocks < bcount)
+ if (mp->m_sb.sb_dblocks < dq->q_blk.count)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK,
offset);
}
- if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
+ if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks)
xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
/*
@@ -175,13 +145,22 @@ xchk_quota_item(
* lower limit than the actual usage. However, we flag it for
* admin review.
*/
- if (id != 0 && bhard != 0 && bcount > bhard)
+ if (dq->q_id == 0)
+ goto out;
+
+ if (dq->q_blk.hardlimit != 0 &&
+ dq->q_blk.count > dq->q_blk.hardlimit)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
- if (id != 0 && ihard != 0 && icount > ihard)
+
+ if (dq->q_ino.hardlimit != 0 &&
+ dq->q_ino.count > dq->q_ino.hardlimit)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
- if (id != 0 && rhard != 0 && rcount > rhard)
+
+ if (dq->q_rtb.hardlimit != 0 &&
+ dq->q_rtb.count > dq->q_rtb.hardlimit)
xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset);
+out:
if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
return -EFSCORRUPTED;
@@ -235,7 +214,7 @@ xchk_quota(
struct xchk_quota_info sqi;
struct xfs_mount *mp = sc->mp;
struct xfs_quotainfo *qi = mp->m_quotainfo;
- uint dqtype;
+ xfs_dqtype_t dqtype;
int error = 0;
dqtype = xchk_quota_to_dqtype(sc);
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c
index db3cfd12803d..25e86c71e7b9 100644
--- a/fs/xfs/scrub/repair.c
+++ b/fs/xfs/scrub/repair.c
@@ -899,11 +899,11 @@ xrep_find_ag_btree_roots(
void
xrep_force_quotacheck(
struct xfs_scrub *sc,
- uint dqtype)
+ xfs_dqtype_t type)
{
uint flag;
- flag = xfs_quota_chkd_flag(dqtype);
+ flag = xfs_quota_chkd_flag(type);
if (!(flag & sc->mp->m_qflags))
return;
@@ -939,11 +939,11 @@ xrep_ino_dqattach(
"inode %llu repair encountered quota error %d, quotacheck forced.",
(unsigned long long)sc->ip->i_ino, error);
if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot)
- xrep_force_quotacheck(sc, XFS_DQ_USER);
+ xrep_force_quotacheck(sc, XFS_DQTYPE_USER);
if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot)
- xrep_force_quotacheck(sc, XFS_DQ_GROUP);
+ xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP);
if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot)
- xrep_force_quotacheck(sc, XFS_DQ_PROJ);
+ xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ);
/* fall through */
case -ESRCH:
error = 0;
diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h
index 04a47d45605b..fe77de01abe0 100644
--- a/fs/xfs/scrub/repair.h
+++ b/fs/xfs/scrub/repair.h
@@ -6,6 +6,8 @@
#ifndef __XFS_SCRUB_REPAIR_H__
#define __XFS_SCRUB_REPAIR_H__
+#include "xfs_quota_defs.h"
+
static inline int xrep_notsupported(struct xfs_scrub *sc)
{
return -EOPNOTSUPP;
@@ -49,7 +51,7 @@ struct xrep_find_ag_btree {
int xrep_find_ag_btree_roots(struct xfs_scrub *sc, struct xfs_buf *agf_bp,
struct xrep_find_ag_btree *btree_info, struct xfs_buf *agfl_bp);
-void xrep_force_quotacheck(struct xfs_scrub *sc, uint dqtype);
+void xrep_force_quotacheck(struct xfs_scrub *sc, xfs_dqtype_t type);
int xrep_ino_dqattach(struct xfs_scrub *sc);
/* Metadata repairers */
diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c
index c642bc206c41..76e4ffe0315b 100644
--- a/fs/xfs/scrub/rtbitmap.c
+++ b/fs/xfs/scrub/rtbitmap.c
@@ -13,6 +13,7 @@
#include "xfs_trans.h"
#include "xfs_rtalloc.h"
#include "xfs_inode.h"
+#include "xfs_bmap.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
@@ -58,6 +59,41 @@ xchk_rtbitmap_rec(
return 0;
}
+/* Make sure the entire rtbitmap file is mapped with written extents. */
+STATIC int
+xchk_rtbitmap_check_extents(
+ struct xfs_scrub *sc)
+{
+ struct xfs_mount *mp = sc->mp;
+ struct xfs_bmbt_irec map;
+ xfs_rtblock_t off;
+ int nmap;
+ int error = 0;
+
+ for (off = 0; off < mp->m_sb.sb_rbmblocks;) {
+ if (xchk_should_terminate(sc, &error) ||
+ (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
+ break;
+
+ /* Make sure we have a written extent. */
+ nmap = 1;
+ error = xfs_bmapi_read(mp->m_rbmip, off,
+ mp->m_sb.sb_rbmblocks - off, &map, &nmap,
+ XFS_DATA_FORK);
+ if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, off, &error))
+ break;
+
+ if (nmap != 1 || !xfs_bmap_is_written_extent(&map)) {
+ xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, off);
+ break;
+ }
+
+ off += map.br_blockcount;
+ }
+
+ return error;
+}
+
/* Scrub the realtime bitmap. */
int
xchk_rtbitmap(
@@ -65,11 +101,22 @@ xchk_rtbitmap(
{
int error;
+ /* Is the size of the rtbitmap correct? */
+ if (sc->mp->m_rbmip->i_d.di_size !=
+ XFS_FSB_TO_B(sc->mp, sc->mp->m_sb.sb_rbmblocks)) {
+ xchk_ino_set_corrupt(sc, sc->mp->m_rbmip->i_ino);
+ return 0;
+ }
+
/* Invoke the fork scrubber. */
error = xchk_metadata_inode_forks(sc);
if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
return error;
+ error = xchk_rtbitmap_check_extents(sc);
+ if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
+ return error;
+
error = xfs_rtalloc_query_all(sc->tp, xchk_rtbitmap_rec, sc);
if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error))
goto out;