summaryrefslogtreecommitdiff
path: root/fs/xfs/xfs_icache.c
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2020-02-19 17:02:12 -0800
committerDarrick J. Wong <darrick.wong@oracle.com>2020-03-03 18:47:44 -0800
commitbc7b833dccadaeb8f9ae370ee238f9b8d1baf133 (patch)
tree4bcf96947ead5c9ea3badd211f664f36e6600534 /fs/xfs/xfs_icache.c
parenta7c1a9f4310b7de99e86d4f80265c26263ab4ec0 (diff)
xfs: parallelize inode inactivation
Split the inode inactivation work into per-AG work items so that we can take advantage of parallelization. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs/xfs/xfs_icache.c')
-rw-r--r--fs/xfs/xfs_icache.c90
1 files changed, 77 insertions, 13 deletions
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 13b318dc2e89..5240e9e517d7 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -2130,12 +2130,12 @@ xfs_inode_clear_cowblocks_tag(
/* Queue a new inode inactivation pass if there are reclaimable inodes. */
static void
xfs_inactive_work_queue(
- struct xfs_mount *mp)
+ struct xfs_perag *pag)
{
rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_INACTIVE_TAG))
- queue_delayed_work(mp->m_inactive_workqueue,
- &mp->m_inactive_work,
+ if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_INACTIVE_TAG))
+ queue_delayed_work(pag->pag_mount->m_inactive_workqueue,
+ &pag->pag_inactive_work,
msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
rcu_read_unlock();
}
@@ -2158,7 +2158,7 @@ xfs_perag_set_inactive_tag(
spin_unlock(&mp->m_perag_lock);
/* schedule periodic background inode inactivation */
- xfs_inactive_work_queue(mp);
+ xfs_inactive_work_queue(pag);
trace_xfs_perag_set_inactive(mp, pag->pag_agno, -1, _RET_IP_);
}
@@ -2276,6 +2276,19 @@ static const struct xfs_ici_walk_ops xfs_inactive_iwalk_ops = {
};
/*
+ * Inactivate the inodes in an AG. Even if the filesystem is corrupted, we
+ * still need to clear the INACTIVE iflag so that we can move on to reclaiming
+ * the inode.
+ */
+static int
+xfs_inactive_inodes_pag(
+ struct xfs_perag *pag)
+{
+ return xfs_ici_walk_ag(pag, &xfs_inactive_iwalk_ops, 0, NULL,
+ XFS_ICI_INACTIVE_TAG);
+}
+
+/*
* Walk the AGs and reclaim the inodes in them. Even if the filesystem is
* corrupted, we still need to clear the INACTIVE iflag so that we can move
* on to reclaiming the inode.
@@ -2294,8 +2307,9 @@ void
xfs_inactive_worker(
struct work_struct *work)
{
- struct xfs_mount *mp = container_of(to_delayed_work(work),
- struct xfs_mount, m_inactive_work);
+ struct xfs_perag *pag = container_of(to_delayed_work(work),
+ struct xfs_perag, pag_inactive_work);
+ struct xfs_mount *mp = pag->pag_mount;
int error;
/*
@@ -2310,12 +2324,31 @@ xfs_inactive_worker(
if (!sb_start_write_trylock(mp->m_super))
return;
- error = xfs_inactive_inodes(mp, NULL);
+ error = xfs_inactive_inodes_pag(pag);
if (error && error != -EAGAIN)
xfs_err(mp, "inode inactivation failed, error %d", error);
sb_end_write(mp->m_super);
- xfs_inactive_work_queue(mp);
+ xfs_inactive_work_queue(pag);
+}
+
+/* Wait for all background inactivation work to finish. */
+static void
+xfs_inactive_flush(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_INACTIVE_TAG) {
+ bool flush;
+
+ spin_lock(&pag->pag_ici_lock);
+ flush = pag->pag_ici_inactive > 0;
+ spin_unlock(&pag->pag_ici_lock);
+ if (flush)
+ flush_delayed_work(&pag->pag_inactive_work);
+ }
}
/* Flush all inode inactivation work that might be queued. */
@@ -2323,8 +2356,8 @@ void
xfs_inactive_force(
struct xfs_mount *mp)
{
- queue_delayed_work(mp->m_inactive_workqueue, &mp->m_inactive_work, 0);
- flush_delayed_work(&mp->m_inactive_work);
+ xfs_inactive_schedule_now(mp);
+ xfs_inactive_flush(mp);
}
/*
@@ -2336,9 +2369,40 @@ void
xfs_inactive_shutdown(
struct xfs_mount *mp)
{
- cancel_delayed_work_sync(&mp->m_inactive_work);
- flush_workqueue(mp->m_inactive_workqueue);
+ xfs_inactive_cancel_work(mp);
xfs_inactive_inodes(mp, NULL);
cancel_delayed_work_sync(&mp->m_reclaim_work);
xfs_reclaim_inodes(mp, SYNC_WAIT);
}
+
+/* Cancel all queued inactivation work. */
+void
+xfs_inactive_cancel_work(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_INACTIVE_TAG)
+ cancel_delayed_work_sync(&pag->pag_inactive_work);
+ flush_workqueue(mp->m_inactive_workqueue);
+}
+
+/* Cancel all pending deferred inactivation work and reschedule it now. */
+void
+xfs_inactive_schedule_now(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_INACTIVE_TAG) {
+ spin_lock(&pag->pag_ici_lock);
+ if (pag->pag_ici_inactive) {
+ cancel_delayed_work(&pag->pag_inactive_work);
+ queue_delayed_work(mp->m_inactive_workqueue,
+ &pag->pag_inactive_work, 0);
+ }
+ spin_unlock(&pag->pag_ici_lock);
+ }
+}