diff options
author | Darrick J. Wong <darrick.wong@oracle.com> | 2019-08-30 15:45:12 -0700 |
---|---|---|
committer | Darrick J. Wong <darrick.wong@oracle.com> | 2019-10-19 10:39:13 -0700 |
commit | daab1182ad8f5e1e479a904f2a4ceea3d7d20c65 (patch) | |
tree | 754a47f6e43c3420d7f9c48589d22836fac3d01c /fs/xfs/xfs_icache.c | |
parent | 1034eb85aa650bfc1bf7034837e8dbc3f1f671f7 (diff) |
xfs: create a polled function to force inode inactivationdeferred-inactivation_2019-10-19
Create a polled version of xfs_inactive_force so that we can force
inactivation while holding a lock (usually the umount lock) without
tripping over the softlockup timer. This is for callers that hold vfs
locks while calling inactivation, which is currently unmount, iunlink
processing during mount, and rw->ro remount.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs/xfs_icache.c')
-rw-r--r-- | fs/xfs/xfs_icache.c | 42 |
1 files changed, 42 insertions, 0 deletions
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 4cce17eb3d6c..1c06e2c5f10f 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -24,6 +24,7 @@ #include "xfs_reflink.h" #include <linux/iversion.h> +#include <linux/nmi.h> static void xfs_perag_set_inactive_tag(struct xfs_perag *pag); static void xfs_perag_clear_inactive_tag(struct xfs_perag *pag); @@ -2139,6 +2140,8 @@ xfs_inactive_inodes_pag( if (inctx.kick_reclaim) xfs_reclaim_work_queue(pag->pag_mount); + wake_up(&pag->pag_mount->m_inactive_wait); + return error; } @@ -2162,6 +2165,8 @@ xfs_inactive_inodes( if (inctx.kick_reclaim) xfs_reclaim_work_queue(mp); + wake_up(&mp->m_inactive_wait); + return error; } @@ -2273,3 +2278,40 @@ xfs_inactive_schedule_work( xfs_perag_put(pag); } } + +/* Return true if there are inodes still being inactivated. */ +static bool +xfs_inactive_pending( + struct xfs_mount *mp) +{ + struct xfs_perag *pag; + xfs_agnumber_t ag = 0; + bool ret = false; + + while (!ret && (pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { + ag = pag->pag_agno + 1; + spin_lock(&pag->pag_ici_lock); + if (pag->pag_ici_inactive) + ret = true; + spin_unlock(&pag->pag_ici_lock); + xfs_perag_put(pag); + } + + return ret; +} + +/* + * Flush all pending inactivation work and poll until finished. This function + * is for callers that must flush with vfs locks held, such as unmount, + * remount, and iunlinks processing during mount. + */ +void +xfs_inactive_force_poll( + struct xfs_mount *mp) +{ + xfs_inactive_schedule_work(mp, 0); + while (wait_event_timeout(mp->m_inactive_wait, + xfs_inactive_pending(mp) == false, HZ / 10) == 0) { + touch_softlockup_watchdog(); + } +} |