summaryrefslogtreecommitdiff
path: root/fs/xfs/scrub/scrub.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/scrub/scrub.c')
-rw-r--r--fs/xfs/scrub/scrub.c148
1 files changed, 148 insertions, 0 deletions
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c
index 342a50248650..fc2cfef68366 100644
--- a/fs/xfs/scrub/scrub.c
+++ b/fs/xfs/scrub/scrub.c
@@ -20,6 +20,7 @@
#include "xfs_rmap.h"
#include "xfs_xchgrange.h"
#include "xfs_swapext.h"
+#include "xfs_icache.h"
#include "scrub/scrub.h"
#include "scrub/common.h"
#include "scrub/trace.h"
@@ -726,3 +727,150 @@ try_harder:
sc->flags |= XCHK_TRY_HARDER;
goto retry_op;
}
+
+/* Decide if there have been any scrub failures up to this point. */
+static inline bool
+xfs_scrubv_previous_failures(
+ struct xfs_mount *mp,
+ struct xfs_scrub_vec_head *vhead,
+ struct xfs_scrub_vec *barrier_vec)
+{
+ struct xfs_scrub_vec *v;
+ __u32 failmask;
+
+ failmask = barrier_vec->sv_flags & XFS_SCRUB_FLAGS_OUT;
+
+ for (v = vhead->svh_vecs; v < barrier_vec; v++) {
+ if (v->sv_type == XFS_SCRUB_TYPE_BARRIER)
+ continue;
+
+ /*
+ * Runtime errors count as a previous failure, except the ones
+ * used to ask userspace to retry.
+ */
+ if (v->sv_ret && v->sv_ret != -EBUSY && v->sv_ret != -ENOENT &&
+ v->sv_ret != -EUSERS)
+ return true;
+
+ /*
+ * If any of the out-flags on the scrub vector match the mask
+ * that was set on the barrier vector, that's a previous fail.
+ */
+ if (v->sv_flags & failmask)
+ return true;
+ }
+
+ return false;
+}
+
+/* Vectored scrub implementation to reduce ioctl calls. */
+int
+xfs_scrubv_metadata(
+ struct file *file,
+ struct xfs_scrub_vec_head *vhead)
+{
+ struct xfs_inode *ip_in = XFS_I(file_inode(file));
+ struct xfs_mount *mp = ip_in->i_mount;
+ struct xfs_inode *ip = NULL;
+ struct xfs_scrub_vec *v;
+ bool set_dontcache = false;
+ unsigned int i;
+ int error = 0;
+
+ BUILD_BUG_ON(sizeof(struct xfs_scrub_vec_head) ==
+ sizeof(struct xfs_scrub_metadata));
+ BUILD_BUG_ON(XFS_IOC_SCRUB_METADATA == XFS_IOC_SCRUBV_METADATA);
+
+ trace_xchk_scrubv_start(ip_in, vhead);
+
+ if (vhead->svh_flags & ~XFS_SCRUB_VEC_FLAGS_ALL)
+ return -EINVAL;
+ for (i = 0, v = vhead->svh_vecs; i < vhead->svh_nr; i++, v++) {
+ if (v->sv_reserved)
+ return -EINVAL;
+ if (v->sv_type == XFS_SCRUB_TYPE_BARRIER &&
+ (v->sv_flags & ~XFS_SCRUB_FLAGS_OUT))
+ return -EINVAL;
+
+ /*
+ * If we detect at least one inode-type scrub, we might
+ * consider setting dontcache at the end.
+ */
+ if (v->sv_type < XFS_SCRUB_TYPE_NR &&
+ meta_scrub_ops[v->sv_type].type == ST_INODE)
+ set_dontcache = true;
+
+ trace_xchk_scrubv_item(mp, vhead, v);
+ }
+
+ /*
+ * If the caller provided us with a nonzero inode number that isn't the
+ * ioctl file, try to grab a reference to it to eliminate all further
+ * untrusted inode lookups. If we can't get the inode, let each scrub
+ * function try again.
+ */
+ if (vhead->svh_ino != ip_in->i_ino) {
+ xfs_iget(mp, NULL, vhead->svh_ino, XFS_IGET_UNTRUSTED, 0, &ip);
+ if (ip && (VFS_I(ip)->i_generation != vhead->svh_gen ||
+ (xfs_is_metadata_inode(ip) &&
+ !S_ISDIR(VFS_I(ip)->i_mode)))) {
+ xfs_irele(ip);
+ ip = NULL;
+ }
+ }
+ if (!ip) {
+ if (!igrab(VFS_I(ip_in)))
+ return -EFSCORRUPTED;
+ ip = ip_in;
+ }
+
+ /* Run all the scrubbers. */
+ for (i = 0, v = vhead->svh_vecs; i < vhead->svh_nr; i++, v++) {
+ struct xfs_scrub_metadata sm = {
+ .sm_type = v->sv_type,
+ .sm_flags = v->sv_flags,
+ .sm_ino = vhead->svh_ino,
+ .sm_gen = vhead->svh_gen,
+ .sm_agno = vhead->svh_agno,
+ };
+
+ if (v->sv_type == XFS_SCRUB_TYPE_BARRIER) {
+ if (xfs_scrubv_previous_failures(mp, vhead, v)) {
+ v->sv_ret = -ECANCELED;
+ trace_xchk_scrubv_barrier_fail(mp, vhead, v);
+ break;
+ }
+
+ continue;
+ }
+
+ v->sv_ret = xfs_scrub_metadata(file, &sm);
+ v->sv_flags = sm.sm_flags;
+
+ /* Leave the inode in memory if something's wrong with it. */
+ if (xchk_needs_repair(&sm))
+ set_dontcache = false;
+
+ if (vhead->svh_rest_us) {
+ ktime_t expires;
+
+ expires = ktime_add_ns(ktime_get(),
+ vhead->svh_rest_us * 1000);
+ set_current_state(TASK_KILLABLE);
+ schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
+ }
+ if (fatal_signal_pending(current)) {
+ error = -EINTR;
+ break;
+ }
+ }
+
+ /*
+ * If we're holding the only reference to this inode and the scan was
+ * clean, mark it dontcache so that we don't pollute the cache.
+ */
+ if (set_dontcache && atomic_read(&VFS_I(ip)->i_count) == 1)
+ d_mark_dontcache(VFS_I(ip));
+ xfs_irele(ip);
+ return error;
+}