summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_icache.c
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2019-05-21 17:08:14 -0700
committerDarrick J. Wong <darrick.wong@oracle.com>2019-08-16 19:06:29 -0700
commit97c97e52c9dabfc28d197832d84027331faf1bba (patch)
tree73f70af7fc4be7da8b10239199e612dc5d4cf9e1 /fs/xfs/xfs_icache.c
parentdafecdb140f6b8d45e213f9e0363d6b1a8f551d9 (diff)
xfs: create a polled function to force inode inactivationdeferred-inactivation_2019-08-16
Create a polled version of xfs_inactive_force so that we can force inactivation while holding a lock (usually the umount lock) without tripping over the softlockup timer. This is for callers that hold vfs locks while calling inactivation, which is currently unmount, iunlink processing during mount, and rw->ro remount. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs/xfs_icache.c')
-rw-r--r--fs/xfs/xfs_icache.c42
1 files changed, 42 insertions, 0 deletions
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 13e7b806d9a1..168c3c71887c 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -24,6 +24,7 @@
#include "xfs_reflink.h"
#include <linux/iversion.h>
+#include <linux/nmi.h>
static void xfs_perag_set_inactive_tag(struct xfs_perag *pag);
static void xfs_perag_clear_inactive_tag(struct xfs_perag *pag);
@@ -2139,6 +2140,8 @@ xfs_inactive_inodes_pag(
if (inctx.kick_reclaim)
xfs_reclaim_work_queue(pag->pag_mount);
+ wake_up(&pag->pag_mount->m_inactive_wait);
+
return error;
}
@@ -2162,6 +2165,8 @@ xfs_inactive_inodes(
if (inctx.kick_reclaim)
xfs_reclaim_work_queue(mp);
+ wake_up(&mp->m_inactive_wait);
+
return error;
}
@@ -2273,3 +2278,40 @@ xfs_inactive_schedule_work(
xfs_perag_put(pag);
}
}
+
+/* Return true if there are inodes still being inactivated. */
+static bool
+xfs_inactive_pending(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t ag = 0;
+ bool ret = false;
+
+ while (!ret && (pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
+ ag = pag->pag_agno + 1;
+ spin_lock(&pag->pag_ici_lock);
+ if (pag->pag_ici_inactive)
+ ret = true;
+ spin_unlock(&pag->pag_ici_lock);
+ xfs_perag_put(pag);
+ }
+
+ return ret;
+}
+
+/*
+ * Flush all pending inactivation work and poll until finished. This function
+ * is for callers that must flush with vfs locks held, such as unmount,
+ * remount, and iunlinks processing during mount.
+ */
+void
+xfs_inactive_force_poll(
+ struct xfs_mount *mp)
+{
+ xfs_inactive_schedule_work(mp, 0);
+ while (wait_event_timeout(mp->m_inactive_wait,
+ xfs_inactive_pending(mp) == false, HZ / 10) == 0) {
+ touch_softlockup_watchdog();
+ }
+}