summaryrefslogtreecommitdiff
path: root/net/sunrpc/svc_xprt.c
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2022-01-25 17:57:23 -0500
committerChuck Lever <chuck.lever@oracle.com>2022-02-28 10:26:39 -0500
commitc0219c499799c1e92bd570c15a47e6257a27bb15 (patch)
tree6c07c77a5615aea5a04a947001810e152838e12d /net/sunrpc/svc_xprt.c
parenta9ff2e99e9fa501ec965da03c18a5422b37a2f44 (diff)
SUNRPC: Merge svc_do_enqueue_xprt() into svc_enqueue_xprt()
Neil says: "These functions were separated in commit 0971374e2818 ("SUNRPC: Reduce contention in svc_xprt_enqueue()") so that the XPT_BUSY check happened before taking any spinlocks. We have since moved or removed the spinlocks so the extra test is fairly pointless." I've made this a separate patch in case the XPT_BUSY change has unexpected consequences and needs to be reverted. Suggested-by: Neil Brown <neilb@suse.de> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r--net/sunrpc/svc_xprt.c26
1 files changed, 10 insertions, 16 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 9fce4f7774bb..1c2295209d08 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -32,7 +32,6 @@ static int svc_deferred_recv(struct svc_rqst *rqstp);
static struct cache_deferred_req *svc_defer(struct cache_req *req);
static void svc_age_temp_xprts(struct timer_list *t);
static void svc_delete_xprt(struct svc_xprt *xprt);
-static void svc_xprt_do_enqueue(struct svc_xprt *xprt);
/* apparently the "standard" is that clients close
* idle connections after 5 minutes, servers after
@@ -267,12 +266,12 @@ void svc_xprt_received(struct svc_xprt *xprt)
}
/* As soon as we clear busy, the xprt could be closed and
- * 'put', so we need a reference to call svc_xprt_do_enqueue with:
+ * 'put', so we need a reference to call svc_xprt_enqueue with:
*/
svc_xprt_get(xprt);
smp_mb__before_atomic();
clear_bit(XPT_BUSY, &xprt->xpt_flags);
- svc_xprt_do_enqueue(xprt);
+ svc_xprt_enqueue(xprt);
svc_xprt_put(xprt);
}
EXPORT_SYMBOL_GPL(svc_xprt_received);
@@ -412,6 +411,8 @@ static bool svc_xprt_ready(struct svc_xprt *xprt)
smp_rmb();
xpt_flags = READ_ONCE(xprt->xpt_flags);
+ if (xpt_flags & BIT(XPT_BUSY))
+ return false;
if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE)))
return true;
if (xpt_flags & (BIT(XPT_DATA) | BIT(XPT_DEFERRED))) {
@@ -424,7 +425,12 @@ static bool svc_xprt_ready(struct svc_xprt *xprt)
return false;
}
-static void svc_xprt_do_enqueue(struct svc_xprt *xprt)
+/**
+ * svc_xprt_enqueue - Queue a transport on an idle nfsd thread
+ * @xprt: transport with data pending
+ *
+ */
+void svc_xprt_enqueue(struct svc_xprt *xprt)
{
struct svc_pool *pool;
struct svc_rqst *rqstp = NULL;
@@ -468,18 +474,6 @@ out_unlock:
put_cpu();
trace_svc_xprt_enqueue(xprt, rqstp);
}
-
-/*
- * Queue up a transport with data pending. If there are idle nfsd
- * processes, wake 'em up.
- *
- */
-void svc_xprt_enqueue(struct svc_xprt *xprt)
-{
- if (test_bit(XPT_BUSY, &xprt->xpt_flags))
- return;
- svc_xprt_do_enqueue(xprt);
-}
EXPORT_SYMBOL_GPL(svc_xprt_enqueue);
/*