summaryrefslogtreecommitdiff
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/addr.c40
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/clnt.c33
-rw-r--r--net/sunrpc/sched.c20
-rw-r--r--net/sunrpc/svc.c80
-rw-r--r--net/sunrpc/svc_xprt.c1
-rw-r--r--net/sunrpc/sysfs.c12
-rw-r--r--net/sunrpc/xdr.c32
-rw-r--r--net/sunrpc/xprt.c41
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c48
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c23
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c9
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c30
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c14
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h6
-rw-r--r--net/sunrpc/xprtsock.c109
17 files changed, 257 insertions, 246 deletions
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index 6e4dbd577a39..d435bffc6199 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -162,8 +162,10 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
const size_t buflen, const char *delim,
struct sockaddr_in6 *sin6)
{
- char *p;
+ char p[IPV6_SCOPE_ID_LEN + 1];
size_t len;
+ u32 scope_id = 0;
+ struct net_device *dev;
if ((buf + buflen) == delim)
return 1;
@@ -175,29 +177,23 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
return 0;
len = (buf + buflen) - delim - 1;
- p = kmemdup_nul(delim + 1, len, GFP_KERNEL);
- if (p) {
- u32 scope_id = 0;
- struct net_device *dev;
-
- dev = dev_get_by_name(net, p);
- if (dev != NULL) {
- scope_id = dev->ifindex;
- dev_put(dev);
- } else {
- if (kstrtou32(p, 10, &scope_id) != 0) {
- kfree(p);
- return 0;
- }
- }
-
- kfree(p);
-
- sin6->sin6_scope_id = scope_id;
- return 1;
+ if (len > IPV6_SCOPE_ID_LEN)
+ return 0;
+
+ memcpy(p, delim + 1, len);
+ p[len] = 0;
+
+ dev = dev_get_by_name(net, p);
+ if (dev != NULL) {
+ scope_id = dev->ifindex;
+ dev_put(dev);
+ } else {
+ if (kstrtou32(p, 10, &scope_id) != 0)
+ return 0;
}
- return 0;
+ sin6->sin6_scope_id = scope_id;
+ return 1;
}
static size_t rpc_pton6(struct net *net, const char *buf, const size_t buflen,
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 1f2817195549..b87565b64928 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -781,7 +781,7 @@ gss_write_verf(struct svc_rqst *rqstp, struct gss_ctx *ctx_id, u32 seq)
svc_putnl(rqstp->rq_res.head, RPC_AUTH_GSS);
xdr_seq = kmalloc(4, GFP_KERNEL);
if (!xdr_seq)
- return -1;
+ return -ENOMEM;
*xdr_seq = htonl(seq);
iov.iov_base = xdr_seq;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index f056ff931444..a312ea2bc440 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1076,24 +1076,21 @@ void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt)
static
void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt)
{
-
- if (clnt != NULL) {
- rpc_task_set_transport(task, clnt);
- task->tk_client = clnt;
- refcount_inc(&clnt->cl_count);
- if (clnt->cl_softrtry)
- task->tk_flags |= RPC_TASK_SOFT;
- if (clnt->cl_softerr)
- task->tk_flags |= RPC_TASK_TIMEOUT;
- if (clnt->cl_noretranstimeo)
- task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
- if (atomic_read(&clnt->cl_swapper))
- task->tk_flags |= RPC_TASK_SWAPPER;
- /* Add to the client's list of all tasks */
- spin_lock(&clnt->cl_lock);
- list_add_tail(&task->tk_task, &clnt->cl_tasks);
- spin_unlock(&clnt->cl_lock);
- }
+ rpc_task_set_transport(task, clnt);
+ task->tk_client = clnt;
+ refcount_inc(&clnt->cl_count);
+ if (clnt->cl_softrtry)
+ task->tk_flags |= RPC_TASK_SOFT;
+ if (clnt->cl_softerr)
+ task->tk_flags |= RPC_TASK_TIMEOUT;
+ if (clnt->cl_noretranstimeo)
+ task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT;
+ if (atomic_read(&clnt->cl_swapper))
+ task->tk_flags |= RPC_TASK_SWAPPER;
+ /* Add to the client's list of all tasks */
+ spin_lock(&clnt->cl_lock);
+ list_add_tail(&task->tk_task, &clnt->cl_tasks);
+ spin_unlock(&clnt->cl_lock);
}
static void
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index c045f63d11fa..e2c835482791 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -277,9 +277,17 @@ static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
static void rpc_task_set_debuginfo(struct rpc_task *task)
{
- static atomic_t rpc_pid;
+ struct rpc_clnt *clnt = task->tk_client;
- task->tk_pid = atomic_inc_return(&rpc_pid);
+ /* Might be a task carrying a reverse-direction operation */
+ if (!clnt) {
+ static atomic_t rpc_pid;
+
+ task->tk_pid = atomic_inc_return(&rpc_pid);
+ return;
+ }
+
+ task->tk_pid = atomic_inc_return(&clnt->cl_pid);
}
#else
static inline void rpc_task_set_debuginfo(struct rpc_task *task)
@@ -829,6 +837,7 @@ void rpc_exit_task(struct rpc_task *task)
else if (task->tk_client)
rpc_count_iostats(task, task->tk_client->cl_metrics);
if (task->tk_ops->rpc_call_done != NULL) {
+ trace_rpc_task_call_done(task, task->tk_ops->rpc_call_done);
task->tk_ops->rpc_call_done(task, task->tk_calldata);
if (task->tk_action != NULL) {
/* Always release the RPC slot and buffer memory */
@@ -903,8 +912,10 @@ static void __rpc_execute(struct rpc_task *task)
/*
* Lockless check for whether task is sleeping or not.
*/
- if (!RPC_IS_QUEUED(task))
+ if (!RPC_IS_QUEUED(task)) {
+ cond_resched();
continue;
+ }
/*
* Signalled tasks should exit rather than sleep.
@@ -1230,8 +1241,7 @@ static int rpciod_start(void)
if (!wq)
goto out_failed;
rpciod_workqueue = wq;
- /* Note: highpri because network receive is latency sensitive */
- wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
+ wq = alloc_workqueue("xprtiod", WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
if (!wq)
goto free_rpciod;
xprtiod_workqueue = wq;
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index a3bbe5ce4570..4292278a9552 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -1186,45 +1186,6 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
#endif
-static int
-svc_generic_dispatch(struct svc_rqst *rqstp, __be32 *statp)
-{
- struct kvec *argv = &rqstp->rq_arg.head[0];
- struct kvec *resv = &rqstp->rq_res.head[0];
- const struct svc_procedure *procp = rqstp->rq_procinfo;
-
- /*
- * Decode arguments
- * XXX: why do we ignore the return value?
- */
- if (procp->pc_decode &&
- !procp->pc_decode(rqstp, argv->iov_base)) {
- *statp = rpc_garbage_args;
- return 1;
- }
-
- *statp = procp->pc_func(rqstp);
-
- if (*statp == rpc_drop_reply ||
- test_bit(RQ_DROPME, &rqstp->rq_flags))
- return 0;
-
- if (rqstp->rq_auth_stat != rpc_auth_ok)
- return 1;
-
- if (*statp != rpc_success)
- return 1;
-
- /* Encode reply */
- if (procp->pc_encode &&
- !procp->pc_encode(rqstp, resv->iov_base + resv->iov_len)) {
- dprintk("svc: failed to encode reply\n");
- /* serv->sv_stats->rpcsystemerr++; */
- *statp = rpc_system_err;
- }
- return 1;
-}
-
__be32
svc_generic_init_request(struct svc_rqst *rqstp,
const struct svc_program *progp,
@@ -1291,7 +1252,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
__be32 *statp;
u32 prog, vers;
__be32 rpc_stat;
- int auth_res;
+ int auth_res, rc;
__be32 *reply_statp;
rpc_stat = rpc_success;
@@ -1392,28 +1353,18 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
/* Call the function that processes the request. */
- if (!process.dispatch) {
- if (!svc_generic_dispatch(rqstp, statp))
- goto release_dropit;
- if (*statp == rpc_garbage_args)
- goto err_garbage;
- } else {
- dprintk("svc: calling dispatcher\n");
- if (!process.dispatch(rqstp, statp))
- goto release_dropit; /* Release reply info */
- }
-
+ rc = process.dispatch(rqstp, statp);
+ if (procp->pc_release)
+ procp->pc_release(rqstp);
+ if (!rc)
+ goto dropit;
if (rqstp->rq_auth_stat != rpc_auth_ok)
- goto err_release_bad_auth;
+ goto err_bad_auth;
/* Check RPC status result */
if (*statp != rpc_success)
resv->iov_len = ((void*)statp) - resv->iov_base + 4;
- /* Release reply info */
- if (procp->pc_release)
- procp->pc_release(rqstp);
-
if (procp->pc_encode == NULL)
goto dropit;
@@ -1422,9 +1373,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
goto close_xprt;
return 1; /* Caller can now send it */
-release_dropit:
- if (procp->pc_release)
- procp->pc_release(rqstp);
dropit:
svc_authorise(rqstp); /* doesn't hurt to call this twice */
dprintk("svc: svc_process dropit\n");
@@ -1451,9 +1399,6 @@ err_bad_rpc:
svc_putnl(resv, 2);
goto sendit;
-err_release_bad_auth:
- if (procp->pc_release)
- procp->pc_release(rqstp);
err_bad_auth:
dprintk("svc: authentication failed (%d)\n",
be32_to_cpu(rqstp->rq_auth_stat));
@@ -1676,16 +1621,17 @@ EXPORT_SYMBOL_GPL(svc_encode_result_payload);
/**
* svc_fill_write_vector - Construct data argument for VFS write call
* @rqstp: svc_rqst to operate on
- * @pages: list of pages containing data payload
- * @first: buffer containing first section of write payload
- * @total: total number of bytes of write payload
+ * @payload: xdr_buf containing only the write data payload
*
* Fills in rqstp::rq_vec, and returns the number of elements.
*/
-unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, struct page **pages,
- struct kvec *first, size_t total)
+unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
+ struct xdr_buf *payload)
{
+ struct page **pages = payload->pages;
+ struct kvec *first = payload->head;
struct kvec *vec = rqstp->rq_vec;
+ size_t total = payload->len;
unsigned int i;
/* Some types of transport can present the write payload
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 6316bd2b8f37..1e99ba1b9d72 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -687,6 +687,7 @@ static int svc_alloc_arg(struct svc_rqst *rqstp)
set_current_state(TASK_RUNNING);
return -EINTR;
}
+ trace_svc_alloc_arg_err(pages);
schedule_timeout(msecs_to_jiffies(500));
}
rqstp->rq_page_end = &rqstp->rq_pages[pages];
diff --git a/net/sunrpc/sysfs.c b/net/sunrpc/sysfs.c
index 9a6f17e18f73..2766dd21935b 100644
--- a/net/sunrpc/sysfs.c
+++ b/net/sunrpc/sysfs.c
@@ -109,8 +109,10 @@ static ssize_t rpc_sysfs_xprt_srcaddr_show(struct kobject *kobj,
struct sock_xprt *sock;
ssize_t ret = -1;
- if (!xprt)
- return 0;
+ if (!xprt || !xprt_connected(xprt)) {
+ xprt_put(xprt);
+ return -ENOTCONN;
+ }
sock = container_of(xprt, struct sock_xprt, xprt);
if (kernel_getsockname(sock->sock, (struct sockaddr *)&saddr) < 0)
@@ -129,8 +131,10 @@ static ssize_t rpc_sysfs_xprt_info_show(struct kobject *kobj,
struct rpc_xprt *xprt = rpc_sysfs_xprt_kobj_get_xprt(kobj);
ssize_t ret;
- if (!xprt)
- return 0;
+ if (!xprt || !xprt_connected(xprt)) {
+ xprt_put(xprt);
+ return -ENOTCONN;
+ }
ret = sprintf(buf, "last_used=%lu\ncur_cong=%lu\ncong_win=%lu\n"
"max_num_slots=%u\nmin_num_slots=%u\nnum_reqs=%u\n"
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index ca10ba2626f2..df194cc07035 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1633,7 +1633,7 @@ EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
* Sets up @subbuf to represent a portion of @xdr. The portion
* starts at the current offset in @xdr, and extends for a length
* of @nbytes. If this is successful, @xdr is advanced to the next
- * position following that portion.
+ * XDR data item following that portion.
*
* Return values:
* %true: @subbuf has been initialized, and @xdr has been advanced.
@@ -1642,29 +1642,31 @@ EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
bool xdr_stream_subsegment(struct xdr_stream *xdr, struct xdr_buf *subbuf,
unsigned int nbytes)
{
- unsigned int remaining, offset, len;
+ unsigned int start = xdr_stream_pos(xdr);
+ unsigned int remaining, len;
- if (xdr_buf_subsegment(xdr->buf, subbuf, xdr_stream_pos(xdr), nbytes))
+ /* Extract @subbuf and bounds-check the fn arguments */
+ if (xdr_buf_subsegment(xdr->buf, subbuf, start, nbytes))
return false;
- if (subbuf->head[0].iov_len)
- if (!__xdr_inline_decode(xdr, subbuf->head[0].iov_len))
- return false;
-
- remaining = subbuf->page_len;
- offset = subbuf->page_base;
- while (remaining) {
- len = min_t(unsigned int, remaining, PAGE_SIZE) - offset;
-
+ /* Advance @xdr by @nbytes */
+ for (remaining = nbytes; remaining;) {
if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
return false;
- if (!__xdr_inline_decode(xdr, len))
- return false;
+ len = (char *)xdr->end - (char *)xdr->p;
+ if (remaining <= len) {
+ xdr->p = (__be32 *)((char *)xdr->p +
+ (remaining + xdr_pad_size(nbytes)));
+ break;
+ }
+
+ xdr->p = (__be32 *)((char *)xdr->p + len);
+ xdr->end = xdr->p;
remaining -= len;
- offset = 0;
}
+ xdr_stream_set_pos(xdr, start + nbytes);
return true;
}
EXPORT_SYMBOL_GPL(xdr_stream_subsegment);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index cfd681700d1a..a02de2bddb28 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -246,11 +246,9 @@ EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
static void xprt_clear_locked(struct rpc_xprt *xprt)
{
xprt->snd_task = NULL;
- if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
- smp_mb__before_atomic();
- clear_bit(XPRT_LOCKED, &xprt->state);
- smp_mb__after_atomic();
- } else
+ if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state))
+ clear_bit_unlock(XPRT_LOCKED, &xprt->state);
+ else
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
}
@@ -737,6 +735,8 @@ static void xprt_autoclose(struct work_struct *work)
unsigned int pflags = memalloc_nofs_save();
trace_xprt_disconnect_auto(xprt);
+ xprt->connect_cookie++;
+ smp_mb__before_atomic();
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
xprt->ops->close(xprt);
xprt_release_write(xprt, NULL);
@@ -767,7 +767,8 @@ EXPORT_SYMBOL_GPL(xprt_disconnect_done);
*/
static void xprt_schedule_autoclose_locked(struct rpc_xprt *xprt)
{
- set_bit(XPRT_CLOSE_WAIT, &xprt->state);
+ if (test_and_set_bit(XPRT_CLOSE_WAIT, &xprt->state))
+ return;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
queue_work(xprtiod_workqueue, &xprt->task_cleanup);
else if (xprt->snd_task && !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
@@ -1603,15 +1604,14 @@ xprt_transmit(struct rpc_task *task)
{
struct rpc_rqst *next, *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
- int counter, status;
+ int status;
spin_lock(&xprt->queue_lock);
- counter = 0;
- while (!list_empty(&xprt->xmit_queue)) {
- if (++counter == 20)
+ for (;;) {
+ next = list_first_entry_or_null(&xprt->xmit_queue,
+ struct rpc_rqst, rq_xmit);
+ if (!next)
break;
- next = list_first_entry(&xprt->xmit_queue,
- struct rpc_rqst, rq_xmit);
xprt_pin_rqst(next);
spin_unlock(&xprt->queue_lock);
status = xprt_request_transmit(next, task);
@@ -1619,13 +1619,16 @@ xprt_transmit(struct rpc_task *task)
status = 0;
spin_lock(&xprt->queue_lock);
xprt_unpin_rqst(next);
- if (status == 0) {
- if (!xprt_request_data_received(task) ||
- test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
- continue;
- } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
- task->tk_status = status;
- break;
+ if (status < 0) {
+ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+ task->tk_status = status;
+ break;
+ }
+ /* Was @task transmitted, and has it received a reply? */
+ if (xprt_request_data_received(task) &&
+ !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+ break;
+ cond_resched_lock(&xprt->queue_lock);
}
spin_unlock(&xprt->queue_lock);
}
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index f700b34a5bfd..ff699307e820 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -515,8 +515,8 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* a single ib_post_send() call.
*/
prev = &first;
- while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
-
+ mr = rpcrdma_mr_pop(&req->rl_registered);
+ do {
trace_xprtrdma_mr_localinv(mr);
r_xprt->rx_stats.local_inv_needed++;
@@ -533,7 +533,8 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
*prev = last;
prev = &last->next;
- }
+ } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
+
mr = container_of(last, struct rpcrdma_mr, mr_invwr);
/* Strong send queue ordering guarantees that when the
@@ -617,8 +618,8 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* a single ib_post_send() call.
*/
prev = &first;
- while ((mr = rpcrdma_mr_pop(&req->rl_registered))) {
-
+ mr = rpcrdma_mr_pop(&req->rl_registered);
+ do {
trace_xprtrdma_mr_localinv(mr);
r_xprt->rx_stats.local_inv_needed++;
@@ -635,7 +636,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
*prev = last;
prev = &last->next;
- }
+ } while ((mr = rpcrdma_mr_pop(&req->rl_registered)));
/* Strong send queue ordering guarantees that when the
* last WR in the chain completes, all WRs in the chain
@@ -666,3 +667,38 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
*/
rpcrdma_force_disconnect(ep);
}
+
+/**
+ * frwr_wp_create - Create an MR for padding Write chunks
+ * @r_xprt: transport resources to use
+ *
+ * Return 0 on success, negative errno on failure.
+ */
+int frwr_wp_create(struct rpcrdma_xprt *r_xprt)
+{
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
+ struct rpcrdma_mr_seg seg;
+ struct rpcrdma_mr *mr;
+
+ mr = rpcrdma_mr_get(r_xprt);
+ if (!mr)
+ return -EAGAIN;
+ mr->mr_req = NULL;
+ ep->re_write_pad_mr = mr;
+
+ seg.mr_len = XDR_UNIT;
+ seg.mr_page = virt_to_page(ep->re_write_pad);
+ seg.mr_offset = offset_in_page(ep->re_write_pad);
+ if (IS_ERR(frwr_map(r_xprt, &seg, 1, true, xdr_zero, mr)))
+ return -EIO;
+ trace_xprtrdma_mr_fastreg(mr);
+
+ mr->mr_cqe.done = frwr_wc_fastreg;
+ mr->mr_regwr.wr.next = NULL;
+ mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe;
+ mr->mr_regwr.wr.num_sge = 0;
+ mr->mr_regwr.wr.opcode = IB_WR_REG_MR;
+ mr->mr_regwr.wr.send_flags = 0;
+
+ return ib_post_send(ep->re_id->qp, &mr->mr_regwr.wr, NULL);
+}
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index c335c1361564..8035a983c8ce 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -255,15 +255,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
page_base = 0;
}
- if (type == rpcrdma_readch)
- goto out;
-
- /* When encoding a Write chunk, some servers need to see an
- * extra segment for non-XDR-aligned Write chunks. The upper
- * layer provides space in the tail iovec that may be used
- * for this purpose.
- */
- if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup)
+ if (type == rpcrdma_readch || type == rpcrdma_writech)
goto out;
if (xdrbuf->tail[0].iov_len)
@@ -405,6 +397,7 @@ static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
enum rpcrdma_chunktype wtype)
{
struct xdr_stream *xdr = &req->rl_stream;
+ struct rpcrdma_ep *ep = r_xprt->rx_ep;
struct rpcrdma_mr_seg *seg;
struct rpcrdma_mr *mr;
int nsegs, nchunks;
@@ -443,6 +436,18 @@ static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
nsegs -= mr->mr_nents;
} while (nsegs);
+ if (xdr_pad_size(rqst->rq_rcv_buf.page_len)) {
+ if (encode_rdma_segment(xdr, ep->re_write_pad_mr) < 0)
+ return -EMSGSIZE;
+
+ trace_xprtrdma_chunk_wp(rqst->rq_task, ep->re_write_pad_mr,
+ nsegs);
+ r_xprt->rx_stats.write_chunk_count++;
+ r_xprt->rx_stats.total_rdma_request += mr->mr_length;
+ nchunks++;
+ nsegs -= mr->mr_nents;
+ }
+
/* Update count of segments in this Write chunk */
*segcount = cpu_to_be32(nchunks);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 6be23ce7a93d..cf76a6ad127b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -330,9 +330,9 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
/* WARNING: Only wc->wr_cqe and wc->status are reliable */
ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
- trace_svcrdma_wc_receive(wc, &ctxt->rc_cid);
if (wc->status != IB_WC_SUCCESS)
goto flushed;
+ trace_svcrdma_wc_recv(wc, &ctxt->rc_cid);
/* If receive posting fails, the connection is about to be
* lost anyway. The server will not be able to send a reply
@@ -345,7 +345,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
*/
if (rdma->sc_pending_recvs < rdma->sc_max_requests)
if (!svc_rdma_refresh_recvs(rdma, rdma->sc_recv_batch, false))
- goto flushed;
+ goto dropped;
/* All wc fields are now known to be valid */
ctxt->rc_byte_len = wc->byte_len;
@@ -360,6 +360,11 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
return;
flushed:
+ if (wc->status == IB_WC_WR_FLUSH_ERR)
+ trace_svcrdma_wc_recv_flush(wc, &ctxt->rc_cid);
+ else
+ trace_svcrdma_wc_recv_err(wc, &ctxt->rc_cid);
+dropped:
svc_rdma_recv_ctxt_put(rdma, ctxt);
svc_xprt_deferred_close(&rdma->sc_xprt);
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index e27433f08ca7..5f0155fdefc7 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -155,6 +155,7 @@ struct svc_rdma_chunk_ctxt {
struct ib_cqe cc_cqe;
struct svcxprt_rdma *cc_rdma;
struct list_head cc_rwctxts;
+ ktime_t cc_posttime;
int cc_sqecount;
enum ib_wc_status cc_status;
struct completion cc_done;
@@ -267,7 +268,16 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
struct svc_rdma_write_info *info =
container_of(cc, struct svc_rdma_write_info, wi_cc);
- trace_svcrdma_wc_write(wc, &cc->cc_cid);
+ switch (wc->status) {
+ case IB_WC_SUCCESS:
+ trace_svcrdma_wc_write(wc, &cc->cc_cid);
+ break;
+ case IB_WC_WR_FLUSH_ERR:
+ trace_svcrdma_wc_write_flush(wc, &cc->cc_cid);
+ break;
+ default:
+ trace_svcrdma_wc_write_err(wc, &cc->cc_cid);
+ }
svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
@@ -320,11 +330,22 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
struct ib_cqe *cqe = wc->wr_cqe;
struct svc_rdma_chunk_ctxt *cc =
container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
- struct svcxprt_rdma *rdma = cc->cc_rdma;
+ struct svc_rdma_read_info *info;
- trace_svcrdma_wc_read(wc, &cc->cc_cid);
+ switch (wc->status) {
+ case IB_WC_SUCCESS:
+ info = container_of(cc, struct svc_rdma_read_info, ri_cc);
+ trace_svcrdma_wc_read(wc, &cc->cc_cid, info->ri_totalbytes,
+ cc->cc_posttime);
+ break;
+ case IB_WC_WR_FLUSH_ERR:
+ trace_svcrdma_wc_read_flush(wc, &cc->cc_cid);
+ break;
+ default:
+ trace_svcrdma_wc_read_err(wc, &cc->cc_cid);
+ }
- svc_rdma_wake_send_waiters(rdma, cc->cc_sqecount);
+ svc_rdma_wake_send_waiters(cc->cc_rdma, cc->cc_sqecount);
cc->cc_status = wc->status;
complete(&cc->cc_done);
return;
@@ -363,6 +384,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
do {
if (atomic_sub_return(cc->cc_sqecount,
&rdma->sc_sq_avail) > 0) {
+ cc->cc_posttime = ktime_get();
ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
if (ret)
break;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 599021b2391d..22a871e6fe4d 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -280,13 +280,21 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
struct svc_rdma_send_ctxt *ctxt =
container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
- trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
-
svc_rdma_wake_send_waiters(rdma, 1);
complete(&ctxt->sc_done);
if (unlikely(wc->status != IB_WC_SUCCESS))
- svc_xprt_deferred_close(&rdma->sc_xprt);
+ goto flushed;
+
+ trace_svcrdma_wc_send(wc, &ctxt->sc_cid);
+ return;
+
+flushed:
+ if (wc->status != IB_WC_WR_FLUSH_ERR)
+ trace_svcrdma_wc_send_err(wc, &ctxt->sc_cid);
+ else
+ trace_svcrdma_wc_send_flush(wc, &ctxt->sc_cid);
+ svc_xprt_deferred_close(&rdma->sc_xprt);
}
/**
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index aaec3c9be8db..3d3673ba9e1e 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -205,14 +205,12 @@ static void rpcrdma_update_cm_private(struct rpcrdma_ep *ep,
unsigned int rsize, wsize;
/* Default settings for RPC-over-RDMA Version One */
- ep->re_implicit_roundup = xprt_rdma_pad_optimize;
rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
if (pmsg &&
pmsg->cp_magic == rpcrdma_cmp_magic &&
pmsg->cp_version == RPCRDMA_CMP_VERSION) {
- ep->re_implicit_roundup = true;
rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
}
@@ -551,6 +549,7 @@ int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt)
goto out;
}
rpcrdma_mrs_create(r_xprt);
+ frwr_wp_create(r_xprt);
out:
trace_xprtrdma_connect(r_xprt, rc);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index d91f54eae00b..c79f92eeda76 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -68,13 +68,14 @@
/*
* RDMA Endpoint -- connection endpoint details
*/
+struct rpcrdma_mr;
struct rpcrdma_ep {
struct kref re_kref;
struct rdma_cm_id *re_id;
struct ib_pd *re_pd;
unsigned int re_max_rdma_segs;
unsigned int re_max_fr_depth;
- bool re_implicit_roundup;
+ struct rpcrdma_mr *re_write_pad_mr;
enum ib_mr_type re_mrtype;
struct completion re_done;
unsigned int re_send_count;
@@ -97,6 +98,8 @@ struct rpcrdma_ep {
unsigned int re_inline_recv; /* negotiated */
atomic_t re_completion_ids;
+
+ char re_write_pad[XDR_UNIT];
};
/* Pre-allocate extra Work Requests for handling reverse-direction
@@ -535,6 +538,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs);
void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
+int frwr_wp_create(struct rpcrdma_xprt *r_xprt);
/*
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 04f1b78bcbca..ae48c9c84ee1 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1134,6 +1134,7 @@ static void xs_run_error_worker(struct sock_xprt *transport, unsigned int nr)
static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
{
+ xprt->connect_cookie++;
smp_mb__before_atomic();
clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
clear_bit(XPRT_CLOSING, &xprt->state);
@@ -1153,14 +1154,13 @@ static void xs_error_report(struct sock *sk)
struct sock_xprt *transport;
struct rpc_xprt *xprt;
- read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
- goto out;
+ return;
transport = container_of(xprt, struct sock_xprt, xprt);
transport->xprt_err = -sk->sk_err;
if (transport->xprt_err == 0)
- goto out;
+ return;
dprintk("RPC: xs_error_report client %p, error=%d...\n",
xprt, -transport->xprt_err);
trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
@@ -1168,8 +1168,6 @@ static void xs_error_report(struct sock *sk)
/* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
smp_mb__before_atomic();
xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
- out:
- read_unlock_bh(&sk->sk_callback_lock);
}
static void xs_reset_transport(struct sock_xprt *transport)
@@ -1188,7 +1186,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
kernel_sock_shutdown(sock, SHUT_RDWR);
mutex_lock(&transport->recv_mutex);
- write_lock_bh(&sk->sk_callback_lock);
+ lock_sock(sk);
transport->inet = NULL;
transport->sock = NULL;
transport->file = NULL;
@@ -1197,10 +1195,10 @@ static void xs_reset_transport(struct sock_xprt *transport)
xs_restore_old_callbacks(transport, sk);
xprt_clear_connected(xprt);
- write_unlock_bh(&sk->sk_callback_lock);
xs_sock_reset_connection_flags(xprt);
/* Reset stream record info */
xs_stream_reset_connect(transport);
+ release_sock(sk);
mutex_unlock(&transport->recv_mutex);
trace_rpc_socket_close(xprt, sock);
@@ -1364,7 +1362,6 @@ static void xs_data_ready(struct sock *sk)
{
struct rpc_xprt *xprt;
- read_lock_bh(&sk->sk_callback_lock);
dprintk("RPC: xs_data_ready...\n");
xprt = xprt_from_sock(sk);
if (xprt != NULL) {
@@ -1379,7 +1376,6 @@ static void xs_data_ready(struct sock *sk)
if (!test_and_set_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
queue_work(xprtiod_workqueue, &transport->recv_worker);
}
- read_unlock_bh(&sk->sk_callback_lock);
}
/*
@@ -1408,9 +1404,8 @@ static void xs_tcp_state_change(struct sock *sk)
struct rpc_xprt *xprt;
struct sock_xprt *transport;
- read_lock_bh(&sk->sk_callback_lock);
if (!(xprt = xprt_from_sock(sk)))
- goto out;
+ return;
dprintk("RPC: xs_tcp_state_change client %p...\n", xprt);
dprintk("RPC: state %x conn %d dead %d zapped %d sk_shutdown %d\n",
sk->sk_state, xprt_connected(xprt),
@@ -1471,8 +1466,6 @@ static void xs_tcp_state_change(struct sock *sk)
/* Trigger the socket release */
xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT);
}
- out:
- read_unlock_bh(&sk->sk_callback_lock);
}
static void xs_write_space(struct sock *sk)
@@ -1511,13 +1504,9 @@ out:
*/
static void xs_udp_write_space(struct sock *sk)
{
- read_lock_bh(&sk->sk_callback_lock);
-
/* from net/core/sock.c:sock_def_write_space */
if (sock_writeable(sk))
xs_write_space(sk);
-
- read_unlock_bh(&sk->sk_callback_lock);
}
/**
@@ -1532,13 +1521,9 @@ static void xs_udp_write_space(struct sock *sk)
*/
static void xs_tcp_write_space(struct sock *sk)
{
- read_lock_bh(&sk->sk_callback_lock);
-
/* from net/core/stream.c:sk_stream_write_space */
if (sk_stream_is_writeable(sk))
xs_write_space(sk);
-
- read_unlock_bh(&sk->sk_callback_lock);
}
static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
@@ -1833,7 +1818,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
if (!transport->inet) {
struct sock *sk = sock->sk;
- write_lock_bh(&sk->sk_callback_lock);
+ lock_sock(sk);
xs_save_old_callbacks(transport, sk);
@@ -1849,7 +1834,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
transport->sock = sock;
transport->inet = sk;
- write_unlock_bh(&sk->sk_callback_lock);
+ release_sock(sk);
}
xs_stream_start_connect(transport);
@@ -2031,7 +2016,7 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
if (!transport->inet) {
struct sock *sk = sock->sk;
- write_lock_bh(&sk->sk_callback_lock);
+ lock_sock(sk);
xs_save_old_callbacks(transport, sk);
@@ -2048,7 +2033,7 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
xs_set_memalloc(xprt);
- write_unlock_bh(&sk->sk_callback_lock);
+ release_sock(sk);
}
xs_udp_do_set_buffer_size(xprt);
@@ -2174,7 +2159,6 @@ static void xs_tcp_set_connect_timeout(struct rpc_xprt *xprt,
static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
{
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- int ret = -ENOTCONN;
if (!transport->inet) {
struct sock *sk = sock->sk;
@@ -2194,7 +2178,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
xs_tcp_set_socket_timeouts(xprt, sock);
tcp_sock_set_nodelay(sk);
- write_lock_bh(&sk->sk_callback_lock);
+ lock_sock(sk);
xs_save_old_callbacks(transport, sk);
@@ -2214,11 +2198,11 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
transport->sock = sock;
transport->inet = sk;
- write_unlock_bh(&sk->sk_callback_lock);
+ release_sock(sk);
}
if (!xprt_bound(xprt))
- goto out;
+ return -ENOTCONN;
xs_set_memalloc(xprt);
@@ -2226,22 +2210,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
/* Tell the socket layer to start connecting... */
set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
- ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
- switch (ret) {
- case 0:
- xs_set_srcport(transport, sock);
- fallthrough;
- case -EINPROGRESS:
- /* SYN_SENT! */
- if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
- xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
- break;
- case -EADDRNOTAVAIL:
- /* Source port number is unavailable. Try a new one! */
- transport->srcport = 0;
- }
-out:
- return ret;
+ return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
}
/**
@@ -2256,14 +2225,14 @@ static void xs_tcp_setup_socket(struct work_struct *work)
container_of(work, struct sock_xprt, connect_worker.work);
struct socket *sock = transport->sock;
struct rpc_xprt *xprt = &transport->xprt;
- int status = -EIO;
+ int status;
if (!sock) {
sock = xs_create_sock(xprt, transport,
xs_addr(xprt)->sa_family, SOCK_STREAM,
IPPROTO_TCP, true);
if (IS_ERR(sock)) {
- status = PTR_ERR(sock);
+ xprt_wake_pending_tasks(xprt, PTR_ERR(sock));
goto out;
}
}
@@ -2280,21 +2249,21 @@ static void xs_tcp_setup_socket(struct work_struct *work)
xprt, -status, xprt_connected(xprt),
sock->sk->sk_state);
switch (status) {
- default:
- printk("%s: connect returned unhandled error %d\n",
- __func__, status);
- fallthrough;
- case -EADDRNOTAVAIL:
- /* We're probably in TIME_WAIT. Get rid of existing socket,
- * and retry
- */
- xs_tcp_force_close(xprt);
- break;
case 0:
+ xs_set_srcport(transport, sock);
+ fallthrough;
case -EINPROGRESS:
+ /* SYN_SENT! */
+ if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
+ xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
+ fallthrough;
case -EALREADY:
- xprt_unlock_connect(xprt, transport);
- return;
+ goto out_unlock;
+ case -EADDRNOTAVAIL:
+ /* Source port number is unavailable. Try a new one! */
+ transport->srcport = 0;
+ status = -EAGAIN;
+ break;
case -EINVAL:
/* Happens, for instance, if the user specified a link
* local IPv6 address without a scope-id.
@@ -2306,18 +2275,22 @@ static void xs_tcp_setup_socket(struct work_struct *work)
case -EHOSTUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
- /* xs_tcp_force_close() wakes tasks with a fixed error code.
- * We need to wake them first to ensure the correct error code.
- */
- xprt_wake_pending_tasks(xprt, status);
- xs_tcp_force_close(xprt);
- goto out;
+ break;
+ default:
+ printk("%s: connect returned unhandled error %d\n",
+ __func__, status);
+ status = -EAGAIN;
}
- status = -EAGAIN;
+
+ /* xs_tcp_force_close() wakes tasks with a fixed error code.
+ * We need to wake them first to ensure the correct error code.
+ */
+ xprt_wake_pending_tasks(xprt, status);
+ xs_tcp_force_close(xprt);
out:
xprt_clear_connecting(xprt);
+out_unlock:
xprt_unlock_connect(xprt, transport);
- xprt_wake_pending_tasks(xprt, status);
}
/**
@@ -2341,7 +2314,7 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task)
WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport));
- if (transport->sock != NULL) {
+ if (transport->sock != NULL && !xprt_connecting(xprt)) {
dprintk("RPC: xs_connect delayed xprt %p for %lu "
"seconds\n",
xprt, xprt->reestablish_timeout / HZ);