summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2020-06-11 12:44:56 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-08-19 08:16:21 +0200
commit789be9705ed1f8a5e045891825bee0ead71a0dfc (patch)
treeb277cab5a9a08c359db82a8510bff78dfd2f1776 /net
parentfa6bd08869c5c2c8af315cb1c19b9d791810af0e (diff)
svcrdma: Fix page leak in svc_rdma_recv_read_chunk()
[ Upstream commit e814eecbe3bbeaa8b004d25a4b8974d232b765a9 ] Commit 07d0ff3b0cd2 ("svcrdma: Clean up Read chunk path") moved the page saver logic so that it gets executed event when an error occurs. In that case, the I/O is never posted, and those pages are then leaked. Errors in this path, however, are quite rare. Fixes: 07d0ff3b0cd2 ("svcrdma: Clean up Read chunk path") Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 066af6b2eb01..0bb3f0dca80d 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -677,7 +677,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
struct svc_rdma_read_info *info,
__be32 *p)
{
- unsigned int i;
int ret;
ret = -EINVAL;
@@ -700,12 +699,6 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp,
info->ri_chunklen += rs_length;
}
- /* Pages under I/O have been copied to head->rc_pages.
- * Prevent their premature release by svc_xprt_release() .
- */
- for (i = 0; i < info->ri_readctxt->rc_page_count; i++)
- rqstp->rq_pages[i] = NULL;
-
return ret;
}
@@ -800,6 +793,26 @@ out:
return ret;
}
+/* Pages under I/O have been copied to head->rc_pages. Ensure they
+ * are not released by svc_xprt_release() until the I/O is complete.
+ *
+ * This has to be done after all Read WRs are constructed to properly
+ * handle a page that is part of I/O on behalf of two different RDMA
+ * segments.
+ *
+ * Do this only if I/O has been posted. Otherwise, we do indeed want
+ * svc_xprt_release() to clean things up properly.
+ */
+static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
+ const unsigned int start,
+ const unsigned int num_pages)
+{
+ unsigned int i;
+
+ for (i = start; i < num_pages + start; i++)
+ rqstp->rq_pages[i] = NULL;
+}
+
/**
* svc_rdma_recv_read_chunk - Pull a Read chunk from the client
* @rdma: controlling RDMA transport
@@ -853,6 +866,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
ret = svc_rdma_post_chunk_ctxt(&info->ri_cc);
if (ret < 0)
goto out_err;
+ svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count);
return 0;
out_err: