summaryrefslogtreecommitdiff
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorBob Pearson <rpearsonhpe@gmail.com>2022-06-06 09:38:33 -0500
committerJason Gunthorpe <jgg@nvidia.com>2022-06-30 13:54:03 -0300
commit24f0ab0102115c7569379a606f9a26826577522f (patch)
tree5f2e60ae97bad904a6ce348896ca6f1c43fec2cc /drivers/infiniband
parentb54c2a25ace5aed21c9944b7605b623abd2ca99c (diff)
RDMA/rxe: Move code to rxe_prepare_atomic_res()
Separate the code that prepares the atomic responder resource into a subroutine. This is preparation for merging the normal and retry atomic responder flows. Link: https://lore.kernel.org/r/20220606143836.3323-2-rpearsonhpe@gmail.com Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c43
1 files changed, 27 insertions, 16 deletions
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index c45c9d954931..bc84aad62f56 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -1017,38 +1017,49 @@ err1:
return err;
}
-static int send_atomic_ack(struct rxe_qp *qp, u8 syndrome, u32 psn)
+static struct resp_res *rxe_prepare_atomic_res(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
{
- int rc = 0;
+ struct resp_res *res;
+
+ res = &qp->resp.resources[qp->resp.res_head];
+ rxe_advance_resp_resource(qp);
+ free_rd_atomic_resource(qp, res);
+
+ res->type = RXE_ATOMIC_MASK;
+ res->first_psn = pkt->psn;
+ res->last_psn = pkt->psn;
+ res->cur_psn = pkt->psn;
+
+ return res;
+}
+
+static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
+ u8 syndrome)
+{
+ int err = 0;
struct rxe_pkt_info ack_pkt;
struct sk_buff *skb;
struct resp_res *res;
skb = prepare_ack_packet(qp, &ack_pkt, IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE,
- 0, psn, syndrome);
+ 0, pkt->psn, syndrome);
if (!skb) {
- rc = -ENOMEM;
+ err = -ENOMEM;
goto out;
}
- res = &qp->resp.resources[qp->resp.res_head];
- free_rd_atomic_resource(qp, res);
- rxe_advance_resp_resource(qp);
-
skb_get(skb);
- res->type = RXE_ATOMIC_MASK;
+
+ res = rxe_prepare_atomic_res(qp, pkt);
res->atomic.skb = skb;
- res->first_psn = ack_pkt.psn;
- res->last_psn = ack_pkt.psn;
- res->cur_psn = ack_pkt.psn;
- rc = rxe_xmit_packet(qp, &ack_pkt, skb);
- if (rc) {
+ err = rxe_xmit_packet(qp, &ack_pkt, skb);
+ if (err) {
pr_err_ratelimited("Failed sending ack\n");
rxe_put(qp);
}
out:
- return rc;
+ return err;
}
static enum resp_states acknowledge(struct rxe_qp *qp,
@@ -1060,7 +1071,7 @@ static enum resp_states acknowledge(struct rxe_qp *qp,
if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
send_ack(qp, qp->resp.aeth_syndrome, pkt->psn);
else if (pkt->mask & RXE_ATOMIC_MASK)
- send_atomic_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);
+ send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
else if (bth_ack(pkt))
send_ack(qp, AETH_ACK_UNLIMITED, pkt->psn);