From 32dd4f9c506b1bf147c24cf05423cd893bc06e38 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Thu, 14 Jul 2022 12:04:43 +1000 Subject: xfs: remove a superflous hash lookup when inserting new buffers Currently on the slow path insert we repeat the initial hash table lookup before we attempt the insert, resulting in a two traversals of the hash table to ensure the insert is valid. The rhashtable API provides a method for an atomic lookup and insert operation, so we can avoid one of the hash table traversals by using this method. Adapted from a large patch containing this optimisation by Christoph Hellwig. Signed-off-by: Dave Chinner Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong --- fs/xfs/xfs_buf.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'fs/xfs/xfs_buf.c') diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 374c4e508b12..1a6542e01bec 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -623,8 +623,15 @@ xfs_buf_find_insert( } spin_lock(&pag->pag_buf_lock); - bp = rhashtable_lookup(&pag->pag_buf_hash, cmap, xfs_buf_hash_params); + bp = rhashtable_lookup_get_insert_fast(&pag->pag_buf_hash, + &new_bp->b_rhash_head, xfs_buf_hash_params); + if (IS_ERR(bp)) { + error = PTR_ERR(bp); + spin_unlock(&pag->pag_buf_lock); + goto out_free_buf; + } if (bp) { + /* found an existing buffer */ atomic_inc(&bp->b_hold); spin_unlock(&pag->pag_buf_lock); error = xfs_buf_find_lock(bp, flags); @@ -635,10 +642,8 @@ xfs_buf_find_insert( goto out_free_buf; } - /* The buffer keeps the perag reference until it is freed. */ + /* The new buffer keeps the perag reference until it is freed. */ new_bp->b_pag = pag; - rhashtable_insert_fast(&pag->pag_buf_hash, &new_bp->b_rhash_head, - xfs_buf_hash_params); spin_unlock(&pag->pag_buf_lock); *bpp = new_bp; return 0; -- cgit v1.2.3