summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2021-08-30 08:36:34 +1000
committerChuck Lever <chuck.lever@oracle.com>2021-09-01 11:05:07 -0400
commite38b3f20059426a0adbde014ff71071739ab5226 (patch)
tree2df72c4a0c12be47a27ed11f22621d68eaee435b /net
parent0bcc7ca40bd823193224e9f38bafbd8325aaf566 (diff)
SUNRPC: don't pause on incomplete allocation
alloc_pages_bulk_array() attempts to allocate at least one page based on the provided pages, and then opportunistically allocates more if that can be done without dropping the spinlock. So if it returns fewer than requested, that could just mean that it needed to drop the lock. In that case, try again immediately. Only pause for a time if no progress could be made. Reported-and-tested-by: Mike Javorski <mike.javorski@gmail.com> Reported-and-tested-by: Lothar Paltins <lopa@mailbox.org> Fixes: f6e70aab9dfe ("SUNRPC: refresh rq_pages using a bulk page allocator") Signed-off-by: NeilBrown <neilb@suse.de> Acked-by: Mel Gorman <mgorman@suse.com> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/svc_xprt.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 682058a5ec13..796eebf1787d 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -663,7 +663,7 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
struct xdr_buf *arg = &rqstp->rq_arg;
- unsigned long pages, filled;
+ unsigned long pages, filled, ret;
pagevec_init(&rqstp->rq_pvec);
@@ -675,11 +675,12 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
pages = RPCSVC_MAXPAGES;
}
- for (;;) {
- filled = alloc_pages_bulk_array(GFP_KERNEL, pages,
- rqstp->rq_pages);
- if (filled == pages)
- break;
+ for (filled = 0; filled < pages; filled = ret) {
+ ret = alloc_pages_bulk_array(GFP_KERNEL, pages,
+ rqstp->rq_pages);
+ if (ret > filled)
+ /* Made progress, don't sleep yet */
+ continue;
set_current_state(TASK_INTERRUPTIBLE);
if (signalled() || kthread_should_stop()) {