summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDarrick J. Wong <darrick.wong@oracle.com>2020-10-25 17:14:52 -0700
committerDarrick J. Wong <darrick.wong@oracle.com>2020-10-26 18:32:20 -0700
commitd483a176342d5a1c0babd6332c120ca106a5894c (patch)
treec9c46df2ced626119e6b59ab82f5adb7478192f5
parent15b5ccb2fdcb0eb7388fe87089c935a78138dbd9 (diff)
xfs: parallelize inode inactivation
Split the inode inactivation work into per-AG work items so that we can take advantage of parallelization. Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
-rw-r--r--fs/xfs/scrub/common.c2
-rw-r--r--fs/xfs/scrub/quotacheck.c2
-rw-r--r--fs/xfs/xfs_icache.c92
-rw-r--r--fs/xfs/xfs_icache.h3
-rw-r--r--fs/xfs/xfs_mount.c3
-rw-r--r--fs/xfs/xfs_mount.h4
-rw-r--r--fs/xfs/xfs_super.c5
7 files changed, 94 insertions, 17 deletions
diff --git a/fs/xfs/scrub/common.c b/fs/xfs/scrub/common.c
index a0b4ec115263..b26e792bb459 100644
--- a/fs/xfs/scrub/common.c
+++ b/fs/xfs/scrub/common.c
@@ -922,6 +922,7 @@ xchk_stop_reaping(
{
sc->flags |= XCHK_REAPING_DISABLED;
xfs_blockgc_stop(sc->mp);
+ xfs_inactive_cancel_work(sc->mp);
}
/* Restart background reaping of resources. */
@@ -929,6 +930,7 @@ void
xchk_start_reaping(
struct xfs_scrub *sc)
{
+ xfs_inactive_schedule_now(sc->mp);
xfs_blockgc_start(sc->mp);
sc->flags &= ~XCHK_REAPING_DISABLED;
}
diff --git a/fs/xfs/scrub/quotacheck.c b/fs/xfs/scrub/quotacheck.c
index c18663b005a1..b6baf3db342a 100644
--- a/fs/xfs/scrub/quotacheck.c
+++ b/fs/xfs/scrub/quotacheck.c
@@ -572,7 +572,7 @@ xqcheck_iwalk(
* links but hasn't yet been inactivated. Try to push
* it towards inactivation.
*/
- xfs_inactive_inodes(xqc->sc->mp, NULL);
+ xfs_inactive_force_ino(xqc->sc->mp, ino);
/* fall through */
case -EINVAL:
/*
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 8939fc6bf1fe..426e63c7718a 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1962,12 +1962,12 @@ xfs_inode_clear_cowblocks_tag(
/* Queue a new inode inactivation pass if there are reclaimable inodes. */
static void
xfs_inactive_work_queue(
- struct xfs_mount *mp)
+ struct xfs_perag *pag)
{
rcu_read_lock();
- if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_INACTIVE_TAG))
- queue_delayed_work(mp->m_inactive_workqueue,
- &mp->m_inactive_work,
+ if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_INACTIVE_TAG))
+ queue_delayed_work(pag->pag_mount->m_inactive_workqueue,
+ &pag->pag_inactive_work,
msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
rcu_read_unlock();
}
@@ -1990,7 +1990,7 @@ xfs_perag_set_inactive_tag(
spin_unlock(&mp->m_perag_lock);
/* schedule periodic background inode inactivation */
- xfs_inactive_work_queue(mp);
+ xfs_inactive_work_queue(pag);
trace_xfs_perag_set_inactive(mp, pag->pag_agno, -1, _RET_IP_);
}
@@ -2123,8 +2123,9 @@ void
xfs_inactive_worker(
struct work_struct *work)
{
- struct xfs_mount *mp = container_of(to_delayed_work(work),
- struct xfs_mount, m_inactive_work);
+ struct xfs_perag *pag = container_of(to_delayed_work(work),
+ struct xfs_perag, pag_inactive_work);
+ struct xfs_mount *mp = pag->pag_mount;
int error;
/*
@@ -2139,12 +2140,32 @@ xfs_inactive_worker(
if (!sb_start_write_trylock(mp->m_super))
return;
- error = xfs_inactive_inodes(mp, NULL);
+ error = xfs_inode_walk_ag(pag, XFS_INODE_WALK_INACTIVE,
+ xfs_inactive_inode, NULL, XFS_ICI_INACTIVE_TAG);
if (error && error != -EAGAIN)
xfs_err(mp, "inode inactivation failed, error %d", error);
sb_end_write(mp->m_super);
- xfs_inactive_work_queue(mp);
+ xfs_inactive_work_queue(pag);
+}
+
+/* Wait for all background inactivation work to finish. */
+static void
+xfs_inactive_flush(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_INACTIVE_TAG) {
+ bool flush;
+
+ spin_lock(&pag->pag_ici_lock);
+ flush = pag->pag_ici_inactive > 0;
+ spin_unlock(&pag->pag_ici_lock);
+ if (flush)
+ flush_delayed_work(&pag->pag_inactive_work);
+ }
}
/* Flush all inode inactivation work that might be queued. */
@@ -2152,8 +2173,8 @@ void
xfs_inactive_force(
struct xfs_mount *mp)
{
- queue_delayed_work(mp->m_inactive_workqueue, &mp->m_inactive_work, 0);
- flush_delayed_work(&mp->m_inactive_work);
+ xfs_inactive_schedule_now(mp);
+ xfs_inactive_flush(mp);
}
/*
@@ -2165,9 +2186,54 @@ void
xfs_inactive_shutdown(
struct xfs_mount *mp)
{
- cancel_delayed_work_sync(&mp->m_inactive_work);
- flush_workqueue(mp->m_inactive_workqueue);
+ xfs_inactive_cancel_work(mp);
xfs_inactive_inodes(mp, NULL);
cancel_delayed_work_sync(&mp->m_reclaim_work);
xfs_reclaim_inodes(mp);
}
+
+/* Force the inactivation of this inode. */
+void
+xfs_inactive_force_ino(
+ struct xfs_mount *mp,
+ xfs_ino_t ino)
+{
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
+ xfs_inode_walk_ag(pag, XFS_INODE_WALK_INACTIVE, xfs_inactive_inode,
+ NULL, XFS_ICI_INACTIVE_TAG);
+ xfs_perag_put(pag);
+}
+
+/* Cancel all queued inactivation work. */
+void
+xfs_inactive_cancel_work(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_INACTIVE_TAG)
+ cancel_delayed_work_sync(&pag->pag_inactive_work);
+ flush_workqueue(mp->m_inactive_workqueue);
+}
+
+/* Cancel all pending deferred inactivation work and reschedule it now. */
+void
+xfs_inactive_schedule_now(
+ struct xfs_mount *mp)
+{
+ struct xfs_perag *pag;
+ xfs_agnumber_t agno;
+
+ for_each_perag_tag(mp, agno, pag, XFS_ICI_INACTIVE_TAG) {
+ spin_lock(&pag->pag_ici_lock);
+ if (pag->pag_ici_inactive) {
+ cancel_delayed_work(&pag->pag_inactive_work);
+ queue_delayed_work(mp->m_inactive_workqueue,
+ &pag->pag_inactive_work, 0);
+ }
+ spin_unlock(&pag->pag_ici_lock);
+ }
+}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 90425c7f2be4..b633b358685a 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -77,6 +77,9 @@ void xfs_blockgc_start(struct xfs_mount *mp);
void xfs_inactive_worker(struct work_struct *work);
int xfs_inactive_inodes(struct xfs_mount *mp, struct xfs_eofblocks *eofb);
void xfs_inactive_force(struct xfs_mount *mp);
+void xfs_inactive_force_ino(struct xfs_mount *mp, xfs_ino_t ino);
void xfs_inactive_shutdown(struct xfs_mount *mp);
+void xfs_inactive_cancel_work(struct xfs_mount *mp);
+void xfs_inactive_schedule_now(struct xfs_mount *mp);
#endif
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 573990c0f30e..01a05ec822a5 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -127,6 +127,7 @@ __xfs_free_perag(
struct xfs_perag *pag = container_of(head, struct xfs_perag, rcu_head);
ASSERT(!delayed_work_pending(&pag->pag_blockgc_work));
+ ASSERT(!delayed_work_pending(&pag->pag_inactive_work));
ASSERT(atomic_read(&pag->pag_ref) == 0);
kmem_free(pag);
}
@@ -148,6 +149,7 @@ xfs_free_perag(
ASSERT(pag);
ASSERT(atomic_read(&pag->pag_ref) == 0);
cancel_delayed_work_sync(&pag->pag_blockgc_work);
+ cancel_delayed_work_sync(&pag->pag_inactive_work);
xfs_iunlink_destroy(pag);
xfs_buf_hash_destroy(pag);
call_rcu(&pag->rcu_head, __xfs_free_perag);
@@ -202,6 +204,7 @@ xfs_initialize_perag(
pag->pag_mount = mp;
spin_lock_init(&pag->pag_ici_lock);
INIT_DELAYED_WORK(&pag->pag_blockgc_work, xfs_blockgc_worker);
+ INIT_DELAYED_WORK(&pag->pag_inactive_work, xfs_inactive_worker);
INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC);
if (xfs_buf_hash_init(pag))
goto out_free_pag;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 7c4d07d5995c..409d4d438244 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -185,7 +185,6 @@ typedef struct xfs_mount {
uint64_t m_resblks_avail;/* available reserved blocks */
uint64_t m_resblks_save; /* reserved blks @ remount,ro */
struct delayed_work m_reclaim_work; /* background inode reclaim */
- struct delayed_work m_inactive_work; /* background inode inactive */
struct xfs_kobj m_kobj;
struct xfs_kobj m_error_kobj;
struct xfs_kobj m_error_meta_kobj;
@@ -378,6 +377,9 @@ typedef struct xfs_perag {
/* background prealloc block trimming */
struct delayed_work pag_blockgc_work;
+ /* background inode inactivation */
+ struct delayed_work pag_inactive_work;
+
/* reference count */
uint8_t pagf_refcount_level;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 7308763eee78..42fc4e8b0a33 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -525,7 +525,8 @@ xfs_init_mount_workqueues(
goto out_destroy_eofb;
mp->m_inactive_workqueue = alloc_workqueue("xfs-inactive/%s",
- WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, mp->m_super->s_id);
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE, 0,
+ mp->m_super->s_id);
if (!mp->m_inactive_workqueue)
goto out_destroy_sync;
@@ -1463,6 +1464,7 @@ xfs_configure_background_workqueues(
max_active = min_t(unsigned int, threads, WQ_UNBOUND_MAX_ACTIVE);
workqueue_set_max_active(mp->m_blockgc_workqueue, max_active);
+ workqueue_set_max_active(mp->m_inactive_workqueue, max_active);
}
static int
@@ -1920,7 +1922,6 @@ static int xfs_init_fs_context(
mutex_init(&mp->m_growlock);
INIT_WORK(&mp->m_flush_inodes_work, xfs_flush_inodes_worker);
INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker);
- INIT_DELAYED_WORK(&mp->m_inactive_work, xfs_inactive_worker);
mp->m_kobj.kobject.kset = xfs_kset;
/*
* We don't create the finobt per-ag space reservation until after log