From a4e6a1dd57469d6ecee084db1507d3e37908d1e2 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Fri, 31 Jul 2020 09:04:20 +0300 Subject: RDMA/efa: Introduce SRD RNR retry This patch introduces the ability to configure SRD QPs with the RNR retry parameter when issuing a modify QP command. In addition, a capability bit was added to report support to the userspace library. Link: https://lore.kernel.org/r/20200731060420.17053-5-galpress@amazon.com Reviewed-by: Firas JahJah Reviewed-by: Yossi Leybovich Signed-off-by: Gal Pressman Signed-off-by: Jason Gunthorpe --- include/uapi/rdma/efa-abi.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/uapi') diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h index 507a2862bedb..f89fbb5b1e8d 100644 --- a/include/uapi/rdma/efa-abi.h +++ b/include/uapi/rdma/efa-abi.h @@ -105,6 +105,7 @@ struct efa_ibv_create_ah_resp { enum { EFA_QUERY_DEVICE_CAPS_RDMA_READ = 1 << 0, + EFA_QUERY_DEVICE_CAPS_RNR_RETRY = 1 << 1, }; struct efa_ibv_ex_query_device_resp { -- cgit v1.2.3 From 5f9e2822d12fe5050da5db0e65924d5ddc86bf29 Mon Sep 17 00:00:00 2001 From: Bob Pearson Date: Thu, 20 Aug 2020 17:46:23 -0500 Subject: RDMA/rxe: Fix style warnings Fixed several minor checkpatch warnings in existing rxe source. Link: https://lore.kernel.org/r/20200820224638.3212-3-rpearson@hpe.com Signed-off-by: Bob Pearson Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe_comp.c | 3 +-- drivers/infiniband/sw/rxe/rxe_net.c | 2 +- drivers/infiniband/sw/rxe/rxe_qp.c | 3 +-- drivers/infiniband/sw/rxe/rxe_task.h | 2 +- include/uapi/rdma/rdma_user_rxe.h | 6 +++--- 5 files changed, 7 insertions(+), 9 deletions(-) (limited to 'include/uapi') diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index 4bc88708b355..8e28ebb42fce 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -690,9 +690,8 @@ int rxe_completer(void *arg) */ /* there is nothing to retry in this case */ - if (!wqe || (wqe->state == wqe_state_posted)) { + if (!wqe || (wqe->state == wqe_state_posted)) goto exit; - } /* if we've started a retry, don't start another * retry sequence, unless this is a timeout. diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 0c3808611f95..80abd417f2b9 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -120,7 +120,7 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev, ndst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(recv_sockets.sk6->sk), recv_sockets.sk6->sk, &fl6, NULL); - if (unlikely(IS_ERR(ndst))) { + if (IS_ERR(ndst)) { pr_err_ratelimited("no route to %pI6\n", daddr); return NULL; } diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 6c11c3aeeca6..3562b3876101 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -628,9 +628,8 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, if (mask & IB_QP_QKEY) qp->attr.qkey = attr->qkey; - if (mask & IB_QP_AV) { + if (mask & IB_QP_AV) rxe_init_av(&attr->ah_attr, &qp->pri_av); - } if (mask & IB_QP_ALT_PATH) { rxe_init_av(&attr->alt_ah_attr, &qp->alt_av); diff --git a/drivers/infiniband/sw/rxe/rxe_task.h b/drivers/infiniband/sw/rxe/rxe_task.h index 08ff42d451c6..66af2f92358b 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.h +++ b/drivers/infiniband/sw/rxe/rxe_task.h @@ -60,7 +60,7 @@ struct rxe_task { /* * init rxe_task structure * arg => parameter to pass to fcn - * fcn => function to call until it returns != 0 + * func => function to call until it returns != 0 */ int rxe_init_task(void *obj, struct rxe_task *task, void *arg, int (*func)(void *), char *name); diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h index aae2e696bb38..d8f2e0e46dab 100644 --- a/include/uapi/rdma/rdma_user_rxe.h +++ b/include/uapi/rdma/rdma_user_rxe.h @@ -99,8 +99,8 @@ struct rxe_send_wr { struct ib_mr *mr; __aligned_u64 reserved; }; - __u32 key; - __u32 access; + __u32 key; + __u32 access; } reg; } wr; }; @@ -112,7 +112,7 @@ struct rxe_sge { }; struct mminfo { - __aligned_u64 offset; + __aligned_u64 offset; __u32 size; __u32 pad; }; -- cgit v1.2.3 From b60b9c0274007fe68439cd9cefdabbd7fb8a2ce6 Mon Sep 17 00:00:00 2001 From: Bob Pearson Date: Thu, 3 Sep 2020 17:40:34 -0500 Subject: RDMA/core: Added missing WR and WC opcodes Add work completion opcodes to a new ib_uverbs_wc_opcode enum in ib_user_verbs.h. This plays the same role as ib_uverbs_wr_opcode documenting the opcodes in the user space API. Assigned the IB_WC_XXX opcodes in ib_verbs.h to the IB_UVERBS_WC_XXX where they are defined. This follows the same pattern as the IB_WR_XXX opcodes. This fixes an incorrect value for LSO that had crept in but is not currently being used. Added a missing IB_WR_BIND_MW opcode in ib_verbs.h. Link: https://lore.kernel.org/r/20200903224039.437391-2-rpearson@hpe.com Signed-off-by: Bob Pearson Reviewed-by: Jason Gunthorpe Signed-off-by: Jason Gunthorpe --- include/rdma/ib_verbs.h | 16 +++++++++------- include/uapi/rdma/ib_user_verbs.h | 11 +++++++++++ 2 files changed, 20 insertions(+), 7 deletions(-) (limited to 'include/uapi') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 97658d706aa1..5ae1d9849881 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -952,13 +952,14 @@ enum ib_wc_status { const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status); enum ib_wc_opcode { - IB_WC_SEND, - IB_WC_RDMA_WRITE, - IB_WC_RDMA_READ, - IB_WC_COMP_SWAP, - IB_WC_FETCH_ADD, - IB_WC_LSO, - IB_WC_LOCAL_INV, + IB_WC_SEND = IB_UVERBS_WC_SEND, + IB_WC_RDMA_WRITE = IB_UVERBS_WC_RDMA_WRITE, + IB_WC_RDMA_READ = IB_UVERBS_WC_RDMA_READ, + IB_WC_COMP_SWAP = IB_UVERBS_WC_COMP_SWAP, + IB_WC_FETCH_ADD = IB_UVERBS_WC_FETCH_ADD, + IB_WC_BIND_MW = IB_UVERBS_WC_BIND_MW, + IB_WC_LOCAL_INV = IB_UVERBS_WC_LOCAL_INV, + IB_WC_LSO = IB_UVERBS_WC_TSO, IB_WC_REG_MR, IB_WC_MASKED_COMP_SWAP, IB_WC_MASKED_FETCH_ADD, @@ -1291,6 +1292,7 @@ enum ib_wr_opcode { IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ, IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP, IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD, + IB_WR_BIND_MW = IB_UVERBS_WR_BIND_MW, IB_WR_LSO = IB_UVERBS_WR_TSO, IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV, IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV, diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 0474c7400268..456438c18c2c 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -457,6 +457,17 @@ struct ib_uverbs_poll_cq { __u32 ne; }; +enum ib_uverbs_wc_opcode { + IB_UVERBS_WC_SEND = 0, + IB_UVERBS_WC_RDMA_WRITE = 1, + IB_UVERBS_WC_RDMA_READ = 2, + IB_UVERBS_WC_COMP_SWAP = 3, + IB_UVERBS_WC_FETCH_ADD = 4, + IB_UVERBS_WC_BIND_MW = 5, + IB_UVERBS_WC_LOCAL_INV = 6, + IB_UVERBS_WC_TSO = 7, +}; + struct ib_uverbs_wc { __aligned_u64 wr_id; __u32 status; -- cgit v1.2.3 From 09a5f210f67eea4a2176820c3bc398747a564705 Mon Sep 17 00:00:00 2001 From: Wenpeng Liang Date: Wed, 16 Sep 2020 16:43:24 +0800 Subject: RDMA/hns: Add support for CQE in size of 64 Bytes The new version of RoCEE supports using CQE in size of 32B or 64B. The performance of bus can be improved by using larger size of CQE. Link: https://lore.kernel.org/r/1600245806-56321-3-git-send-email-liweihang@huawei.com Signed-off-by: Wenpeng Liang Signed-off-by: Weihang Li Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hns/hns_roce_cq.c | 22 ++++++++++++++++++++-- drivers/infiniband/hw/hns/hns_roce_device.h | 6 +++++- drivers/infiniband/hw/hns/hns_roce_hw_v1.c | 5 ++--- drivers/infiniband/hw/hns/hns_roce_hw_v1.h | 2 +- drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 20 +++++++++++++------- drivers/infiniband/hw/hns/hns_roce_hw_v2.h | 7 +++++-- drivers/infiniband/hw/hns/hns_roce_main.c | 2 ++ include/uapi/rdma/hns-abi.h | 4 +++- 8 files changed, 51 insertions(+), 17 deletions(-) (limited to 'include/uapi') diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index c5acf3332519..fff3e624e261 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -150,7 +150,7 @@ static int alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, int err; buf_attr.page_shift = hr_dev->caps.cqe_buf_pg_sz + HNS_HW_PAGE_SHIFT; - buf_attr.region[0].size = hr_cq->cq_depth * hr_dev->caps.cq_entry_sz; + buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; buf_attr.region[0].hopnum = hr_dev->caps.cqe_hop_num; buf_attr.region_count = 1; buf_attr.fixed_page = true; @@ -224,6 +224,21 @@ static void free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, } } +static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata, + struct hns_roce_ib_create_cq *ucmd) +{ + struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); + + if (udata) { + if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) + hr_cq->cqe_size = ucmd->cqe_size; + else + hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE; + } else { + hr_cq->cqe_size = hr_dev->caps.cqe_sz; + } +} + int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, struct ib_udata *udata) { @@ -258,7 +273,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, INIT_LIST_HEAD(&hr_cq->rq_list); if (udata) { - ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); + ret = ib_copy_from_udata(&ucmd, udata, + min(sizeof(ucmd), udata->inlen)); if (ret) { ibdev_err(ibdev, "Failed to copy CQ udata, err %d\n", ret); @@ -266,6 +282,8 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, } } + set_cqe_size(hr_cq, udata, &ucmd); + ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr); if (ret) { ibdev_err(ibdev, "Failed to alloc CQ buf, err %d\n", ret); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index ef92afdcee34..f935089b86ea 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -81,6 +81,9 @@ #define HNS_ROCE_V3_EQE_SIZE 0x40 +#define HNS_ROCE_V2_CQE_SIZE 32 +#define HNS_ROCE_V3_CQE_SIZE 64 + #define HNS_ROCE_SL_SHIFT 28 #define HNS_ROCE_TCLASS_SHIFT 20 #define HNS_ROCE_FLOW_LABEL_MASK 0xfffff @@ -469,6 +472,7 @@ struct hns_roce_cq { void __iomem *cq_db_l; u16 *tptr_addr; int arm_sn; + int cqe_size; unsigned long cqn; u32 vector; atomic_t refcount; @@ -796,7 +800,7 @@ struct hns_roce_caps { int num_pds; int reserved_pds; u32 mtt_entry_sz; - u32 cq_entry_sz; + u32 cqe_sz; u32 page_size_cap; u32 reserved_lkey; int mtpt_entry_sz; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index e0a171c79ad8..e66661777cb8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -1475,7 +1475,7 @@ static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE; caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE; caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; - caps->cq_entry_sz = HNS_ROCE_V1_CQE_ENTRY_SIZE; + caps->cqe_sz = HNS_ROCE_V1_CQE_SIZE; caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; caps->reserved_lkey = 0; caps->reserved_pds = 0; @@ -1896,8 +1896,7 @@ static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, static void *get_cqe(struct hns_roce_cq *hr_cq, int n) { - return hns_roce_buf_offset(hr_cq->mtr.kmem, - n * HNS_ROCE_V1_CQE_ENTRY_SIZE); + return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE); } static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h index 52307b2c7100..5996892a1b96 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h @@ -74,7 +74,7 @@ #define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64 #define HNS_ROCE_V1_MTT_ENTRY_SIZE 64 -#define HNS_ROCE_V1_CQE_ENTRY_SIZE 32 +#define HNS_ROCE_V1_CQE_SIZE 32 #define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000 #define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17) diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index fe43c15a4793..835fbd74ce98 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -1690,7 +1690,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->mtpt_entry_sz = HNS_ROCE_V2_MTPT_ENTRY_SZ; caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; caps->idx_entry_sz = HNS_ROCE_V2_IDX_ENTRY_SZ; - caps->cq_entry_sz = HNS_ROCE_V2_CQE_ENTRY_SIZE; + caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE; caps->page_size_cap = HNS_ROCE_V2_PAGE_SIZE_SUPPORTED; caps->reserved_lkey = 0; caps->reserved_pds = 0; @@ -1770,6 +1770,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; + caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE; } } @@ -1862,7 +1863,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) caps->max_sq_desc_sz = resp_a->max_sq_desc_sz; caps->max_rq_desc_sz = resp_a->max_rq_desc_sz; caps->max_srq_desc_sz = resp_a->max_srq_desc_sz; - caps->cq_entry_sz = resp_a->cq_entry_sz; + caps->cqe_sz = HNS_ROCE_V2_CQE_SIZE; caps->mtpt_entry_sz = resp_b->mtpt_entry_sz; caps->irrl_entry_sz = resp_b->irrl_entry_sz; @@ -1993,6 +1994,7 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev) if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; + caps->cqe_sz = HNS_ROCE_V3_CQE_SIZE; } calc_pg_sz(caps->num_qps, caps->qpc_entry_sz, caps->qpc_hop_num, @@ -2771,8 +2773,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) { - return hns_roce_buf_offset(hr_cq->mtr.kmem, - n * HNS_ROCE_V2_CQE_ENTRY_SIZE); + return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); } static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n) @@ -2872,6 +2873,10 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQN_M, V2_CQC_BYTE_8_CQN_S, hr_cq->cqn); + roce_set_field(cq_context->byte_8_cqn, V2_CQC_BYTE_8_CQE_SIZE_M, + V2_CQC_BYTE_8_CQE_SIZE_S, hr_cq->cqe_size == + HNS_ROCE_V3_CQE_SIZE ? 1 : 0); + cq_context->cqe_cur_blk_addr = cpu_to_le32(to_hr_hw_page_addr(mtts[0])); roce_set_field(cq_context->byte_16_hop_addr, @@ -3039,7 +3044,8 @@ out: } static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, - struct hns_roce_v2_cqe *cqe, struct ib_wc *wc) + struct hns_roce_cq *cq, struct hns_roce_v2_cqe *cqe, + struct ib_wc *wc) { static const struct { u32 cqe_status; @@ -3080,7 +3086,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, ibdev_err(&hr_dev->ib_dev, "error cqe status 0x%x:\n", cqe_status); print_hex_dump(KERN_ERR, "", DUMP_PREFIX_NONE, 16, 4, cqe, - sizeof(*cqe), false); + cq->cqe_size, false); /* * For hns ROCEE, GENERAL_ERR is an error type that is not defined in @@ -3177,7 +3183,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ++wq->tail; } - get_cqe_status(hr_dev, *cur_qp, cqe, wc); + get_cqe_status(hr_dev, *cur_qp, hr_cq, cqe, wc); if (unlikely(wc->status != IB_WC_SUCCESS)) return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index f98c55a30784..ca6b0554a42c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -86,7 +86,6 @@ #define HNS_ROCE_V2_MTPT_ENTRY_SZ 64 #define HNS_ROCE_V2_MTT_ENTRY_SZ 64 #define HNS_ROCE_V2_IDX_ENTRY_SZ 4 -#define HNS_ROCE_V2_CQE_ENTRY_SIZE 32 #define HNS_ROCE_V2_SCCC_ENTRY_SZ 32 #define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE @@ -309,6 +308,9 @@ struct hns_roce_v2_cq_context { #define V2_CQC_BYTE_8_CQN_S 0 #define V2_CQC_BYTE_8_CQN_M GENMASK(23, 0) +#define V2_CQC_BYTE_8_CQE_SIZE_S 27 +#define V2_CQC_BYTE_8_CQE_SIZE_M GENMASK(28, 27) + #define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_S 0 #define V2_CQC_BYTE_16_CQE_CUR_BLK_ADDR_M GENMASK(19, 0) @@ -896,6 +898,7 @@ struct hns_roce_v2_cqe { u8 smac[4]; __le32 byte_28; __le32 byte_32; + __le32 rsv[8]; }; #define V2_CQE_BYTE_4_OPCODE_S 0 @@ -1571,7 +1574,7 @@ struct hns_roce_query_pf_caps_a { u8 max_sq_desc_sz; u8 max_rq_desc_sz; u8 max_srq_desc_sz; - u8 cq_entry_sz; + u8 cqe_sz; }; struct hns_roce_query_pf_caps_b { diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 2b4d75733e72..6f129e0f6d37 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -323,6 +323,8 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, mutex_init(&context->page_mutex); } + resp.cqe_size = hr_dev->caps.cqe_sz; + ret = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (ret) goto error_fail_copy_to_udata; diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index eb76b38a00d4..9ec85f76e9ac 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h @@ -39,6 +39,8 @@ struct hns_roce_ib_create_cq { __aligned_u64 buf_addr; __aligned_u64 db_addr; + __u32 cqe_size; + __u32 reserved; }; struct hns_roce_ib_create_cq_resp { @@ -73,7 +75,7 @@ struct hns_roce_ib_create_qp_resp { struct hns_roce_ib_alloc_ucontext_resp { __u32 qp_tab_size; - __u32 reserved; + __u32 cqe_size; }; struct hns_roce_ib_alloc_pd_resp { -- cgit v1.2.3 From 677cf51f71c97bcf98852aa2077d7289bc73e3b3 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Wed, 30 Sep 2020 19:38:27 +0300 Subject: RDMA/mlx5: Extend advice MR to support non faulting mode Extend advice MR to support non faulting mode, this can improve performance by increasing the populated page tables in the device. Link: https://lore.kernel.org/r/20200930163828.1336747-4-leon@kernel.org Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/mr.c | 3 ++- drivers/infiniband/hw/mlx5/odp.c | 7 ++++++- include/uapi/rdma/ib_user_ioctl_verbs.h | 1 + 3 files changed, 9 insertions(+), 2 deletions(-) (limited to 'include/uapi') diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index c69ce250769e..151b14038765 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1312,7 +1312,8 @@ int mlx5_ib_advise_mr(struct ib_pd *pd, struct uverbs_attr_bundle *attrs) { if (advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH && - advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE) + advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && + advice != IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT) return -EOPNOTSUPP; return mlx5_ib_advise_mr_prefetch(pd, advice, flags, diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 4743ef78afe0..d01fdec05b89 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -665,6 +665,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr) } #define MLX5_PF_FLAGS_DOWNGRADE BIT(1) +#define MLX5_PF_FLAGS_SNAPSHOT BIT(2) static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, u64 user_va, size_t bcnt, u32 *bytes_mapped, u32 flags) @@ -673,6 +674,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, bool downgrade = flags & MLX5_PF_FLAGS_DOWNGRADE; u64 access_mask; u64 start_idx; + bool fault = !(flags & MLX5_PF_FLAGS_SNAPSHOT); page_shift = odp->page_shift; start_idx = (user_va - ib_umem_start(odp)) >> page_shift; @@ -681,7 +683,7 @@ static int pagefault_real_mr(struct mlx5_ib_mr *mr, struct ib_umem_odp *odp, if (odp->umem.writable && !downgrade) access_mask |= ODP_WRITE_ALLOWED_BIT; - np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, true); + np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault); if (np < 0) return np; @@ -1851,6 +1853,9 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH) pf_flags |= MLX5_PF_FLAGS_DOWNGRADE; + if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT) + pf_flags |= MLX5_PF_FLAGS_SNAPSHOT; + if (flags & IB_UVERBS_ADVISE_MR_FLAG_FLUSH) return mlx5_ib_prefetch_sg_list(pd, advice, pf_flags, sg_list, num_sge); diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 5debab45ebcb..fb8cdb38198b 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -208,6 +208,7 @@ enum ib_uverbs_read_counters_flags { enum ib_uverbs_advise_mr_advice { IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE, + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT, }; enum ib_uverbs_advise_mr_flag { -- cgit v1.2.3 From c4b4d548fabc15b9c5db9f61204dd0c608414d2d Mon Sep 17 00:00:00 2001 From: Avihai Horon Date: Wed, 23 Sep 2020 19:50:14 +0300 Subject: RDMA/core: Introduce new GID table query API Introduce rdma_query_gid_table which enables querying all the GID tables of a given device and copying the attributes of all valid GID entries to a provided buffer. This API provides a faster way to query a GID table using single call and will be used in libibverbs to improve current approach that requires multiple calls to open, close and read multiple sysfs files for a single GID table entry. Link: https://lore.kernel.org/r/20200923165015.2491894-4-leon@kernel.org Signed-off-by: Avihai Horon Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cache.c | 66 +++++++++++++++++++++++++++++++-- include/rdma/ib_cache.h | 3 ++ include/uapi/rdma/ib_user_ioctl_verbs.h | 8 ++++ 3 files changed, 74 insertions(+), 3 deletions(-) (limited to 'include/uapi') diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index cf49ac0b0aa6..3a86a10c9572 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1247,6 +1247,67 @@ done: } EXPORT_SYMBOL(rdma_get_gid_attr); +/** + * rdma_query_gid_table - Reads GID table entries of all the ports of a device up to max_entries. + * @device: The device to query. + * @entries: Entries where GID entries are returned. + * @max_entries: Maximum number of entries that can be returned. + * Entries array must be allocated to hold max_entries number of entries. + * @num_entries: Updated to the number of entries that were successfully read. + * + * Returns number of entries on success or appropriate error code. + */ +ssize_t rdma_query_gid_table(struct ib_device *device, + struct ib_uverbs_gid_entry *entries, + size_t max_entries) +{ + const struct ib_gid_attr *gid_attr; + ssize_t num_entries = 0, ret; + struct ib_gid_table *table; + unsigned int port_num, i; + struct net_device *ndev; + unsigned long flags; + + rdma_for_each_port(device, port_num) { + if (!rdma_ib_or_roce(device, port_num)) + continue; + + table = rdma_gid_table(device, port_num); + read_lock_irqsave(&table->rwlock, flags); + for (i = 0; i < table->sz; i++) { + if (!is_gid_entry_valid(table->data_vec[i])) + continue; + if (num_entries >= max_entries) { + ret = -EINVAL; + goto err; + } + + gid_attr = &table->data_vec[i]->attr; + + memcpy(&entries->gid, &gid_attr->gid, + sizeof(gid_attr->gid)); + entries->gid_index = gid_attr->index; + entries->port_num = gid_attr->port_num; + entries->gid_type = gid_attr->gid_type; + ndev = rcu_dereference_protected( + gid_attr->ndev, + lockdep_is_held(&table->rwlock)); + if (ndev) + entries->netdev_ifindex = ndev->ifindex; + + num_entries++; + entries++; + } + read_unlock_irqrestore(&table->rwlock, flags); + } + + return num_entries; +err: + read_unlock_irqrestore(&table->rwlock, flags); + return ret; +} +EXPORT_SYMBOL(rdma_query_gid_table); + /** * rdma_put_gid_attr - Release reference to the GID attribute * @attr: Pointer to the GID attribute whose reference @@ -1303,7 +1364,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr) struct ib_gid_table_entry *entry = container_of(attr, struct ib_gid_table_entry, attr); struct ib_device *device = entry->attr.device; - struct net_device *ndev = ERR_PTR(-ENODEV); + struct net_device *ndev = ERR_PTR(-EINVAL); u8 port_num = entry->attr.port_num; struct ib_gid_table *table; unsigned long flags; @@ -1315,8 +1376,7 @@ struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr) valid = is_gid_entry_valid(table->data_vec[attr->index]); if (valid) { ndev = rcu_dereference(attr->ndev); - if (!ndev || - (ndev && ((READ_ONCE(ndev->flags) & IFF_UP) == 0))) + if (!ndev) ndev = ERR_PTR(-ENODEV); } read_unlock_irqrestore(&table->rwlock, flags); diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h index 66a8f369a2fa..bae29f50adff 100644 --- a/include/rdma/ib_cache.h +++ b/include/rdma/ib_cache.h @@ -110,5 +110,8 @@ const struct ib_gid_attr *rdma_get_gid_attr(struct ib_device *device, u8 port_num, int index); void rdma_put_gid_attr(const struct ib_gid_attr *attr); void rdma_hold_gid_attr(const struct ib_gid_attr *attr); +ssize_t rdma_query_gid_table(struct ib_device *device, + struct ib_uverbs_gid_entry *entries, + size_t max_entries); #endif /* _IB_CACHE_H */ diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index fb8cdb38198b..14820082de5e 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -251,4 +251,12 @@ enum rdma_driver_id { RDMA_DRIVER_SIW, }; +struct ib_uverbs_gid_entry { + __aligned_u64 gid[2]; + __u32 gid_index; + __u32 port_num; + __u32 gid_type; + __u32 netdev_ifindex; /* It is 0 if there is no netdev associated with it */ +}; + #endif -- cgit v1.2.3 From 9f85cbe50aa044a46f0a22fda323fa27b80c82da Mon Sep 17 00:00:00 2001 From: Avihai Horon Date: Wed, 23 Sep 2020 19:50:15 +0300 Subject: RDMA/uverbs: Expose the new GID query API to user space Expose the query GID table and entry API to user space by adding two new methods and method handlers to the device object. This API provides a faster way to query a GID table using single call and will be used in libibverbs to improve current approach that requires multiple calls to open, close and read multiple sysfs files for a single GID table entry. Link: https://lore.kernel.org/r/20200923165015.2491894-5-leon@kernel.org Signed-off-by: Avihai Horon Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/uverbs_std_types_device.c | 196 +++++++++++++++++++++- include/rdma/ib_verbs.h | 6 +- include/uapi/rdma/ib_user_ioctl_cmds.h | 16 ++ include/uapi/rdma/ib_user_ioctl_verbs.h | 6 + 4 files changed, 220 insertions(+), 4 deletions(-) (limited to 'include/uapi') diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c index 9f43c0161a8e..f367d523a46b 100644 --- a/drivers/infiniband/core/uverbs_std_types_device.c +++ b/drivers/infiniband/core/uverbs_std_types_device.c @@ -3,11 +3,13 @@ * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. */ +#include #include #include "rdma_core.h" #include "uverbs.h" #include #include +#include /* * This ioctl method allows calling any defined write or write_ex @@ -266,6 +268,172 @@ static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_CONTEXT)( return ucontext->device->ops.query_ucontext(ucontext, attrs); } +static int copy_gid_entries_to_user(struct uverbs_attr_bundle *attrs, + struct ib_uverbs_gid_entry *entries, + size_t num_entries, size_t user_entry_size) +{ + const struct uverbs_attr *attr; + void __user *user_entries; + size_t copy_len; + int ret; + int i; + + if (user_entry_size == sizeof(*entries)) { + ret = uverbs_copy_to(attrs, + UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES, + entries, sizeof(*entries) * num_entries); + return ret; + } + + copy_len = min_t(size_t, user_entry_size, sizeof(*entries)); + attr = uverbs_attr_get(attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES); + if (IS_ERR(attr)) + return PTR_ERR(attr); + + user_entries = u64_to_user_ptr(attr->ptr_attr.data); + for (i = 0; i < num_entries; i++) { + if (copy_to_user(user_entries, entries, copy_len)) + return -EFAULT; + + if (user_entry_size > sizeof(*entries)) { + if (clear_user(user_entries + sizeof(*entries), + user_entry_size - sizeof(*entries))) + return -EFAULT; + } + + entries++; + user_entries += user_entry_size; + } + + return uverbs_output_written(attrs, + UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES); +} + +static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_TABLE)( + struct uverbs_attr_bundle *attrs) +{ + struct ib_uverbs_gid_entry *entries; + struct ib_ucontext *ucontext; + struct ib_device *ib_dev; + size_t user_entry_size; + ssize_t num_entries; + size_t max_entries; + size_t num_bytes; + u32 flags; + int ret; + + ret = uverbs_get_flags32(&flags, attrs, + UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, 0); + if (ret) + return ret; + + ret = uverbs_get_const(&user_entry_size, attrs, + UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE); + if (ret) + return ret; + + max_entries = uverbs_attr_ptr_get_array_size( + attrs, UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES, + user_entry_size); + if (max_entries <= 0) + return -EINVAL; + + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + + if (check_mul_overflow(max_entries, sizeof(*entries), &num_bytes)) + return -EINVAL; + + entries = uverbs_zalloc(attrs, num_bytes); + if (!entries) + return -ENOMEM; + + num_entries = rdma_query_gid_table(ib_dev, entries, max_entries); + if (num_entries < 0) + return -EINVAL; + + ret = copy_gid_entries_to_user(attrs, entries, num_entries, + user_entry_size); + if (ret) + return ret; + + ret = uverbs_copy_to(attrs, + UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES, + &num_entries, sizeof(num_entries)); + return ret; +} + +static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_GID_ENTRY)( + struct uverbs_attr_bundle *attrs) +{ + struct ib_uverbs_gid_entry entry = {}; + const struct ib_gid_attr *gid_attr; + struct ib_ucontext *ucontext; + struct ib_device *ib_dev; + struct net_device *ndev; + u32 gid_index; + u32 port_num; + u32 flags; + int ret; + + ret = uverbs_get_flags32(&flags, attrs, + UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, 0); + if (ret) + return ret; + + ret = uverbs_get_const(&port_num, attrs, + UVERBS_ATTR_QUERY_GID_ENTRY_PORT); + if (ret) + return ret; + + ret = uverbs_get_const(&gid_index, attrs, + UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX); + if (ret) + return ret; + + ucontext = ib_uverbs_get_ucontext(attrs); + if (IS_ERR(ucontext)) + return PTR_ERR(ucontext); + ib_dev = ucontext->device; + + if (!rdma_is_port_valid(ib_dev, port_num)) + return -EINVAL; + + if (!rdma_ib_or_roce(ib_dev, port_num)) + return -EOPNOTSUPP; + + gid_attr = rdma_get_gid_attr(ib_dev, port_num, gid_index); + if (IS_ERR(gid_attr)) + return PTR_ERR(gid_attr); + + memcpy(&entry.gid, &gid_attr->gid, sizeof(gid_attr->gid)); + entry.gid_index = gid_attr->index; + entry.port_num = gid_attr->port_num; + entry.gid_type = gid_attr->gid_type; + + rcu_read_lock(); + ndev = rdma_read_gid_attr_ndev_rcu(gid_attr); + if (IS_ERR(ndev)) { + if (PTR_ERR(ndev) != -ENODEV) { + ret = PTR_ERR(ndev); + rcu_read_unlock(); + goto out; + } + } else { + entry.netdev_ifindex = ndev->ifindex; + } + rcu_read_unlock(); + + ret = uverbs_copy_to_struct_or_zero( + attrs, UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, &entry, + sizeof(entry)); +out: + rdma_put_gid_attr(gid_attr); + return ret; +} + DECLARE_UVERBS_NAMED_METHOD( UVERBS_METHOD_GET_CONTEXT, UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS, @@ -300,12 +468,38 @@ DECLARE_UVERBS_NAMED_METHOD( reserved), UA_MANDATORY)); +DECLARE_UVERBS_NAMED_METHOD( + UVERBS_METHOD_QUERY_GID_TABLE, + UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE, u64, + UA_MANDATORY), + UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, u32, + UA_OPTIONAL), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES, + UVERBS_ATTR_MIN_SIZE(0), UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES, + UVERBS_ATTR_TYPE(u64), UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD( + UVERBS_METHOD_QUERY_GID_ENTRY, + UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_PORT, u32, + UA_MANDATORY), + UVERBS_ATTR_CONST_IN(UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX, u32, + UA_MANDATORY), + UVERBS_ATTR_FLAGS_IN(UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, u32, + UA_MANDATORY), + UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, + UVERBS_ATTR_STRUCT(struct ib_uverbs_gid_entry, + netdev_ifindex), + UA_MANDATORY)); + DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DEVICE, &UVERBS_METHOD(UVERBS_METHOD_GET_CONTEXT), &UVERBS_METHOD(UVERBS_METHOD_INVOKE_WRITE), &UVERBS_METHOD(UVERBS_METHOD_INFO_HANDLES), &UVERBS_METHOD(UVERBS_METHOD_QUERY_PORT), - &UVERBS_METHOD(UVERBS_METHOD_QUERY_CONTEXT)); + &UVERBS_METHOD(UVERBS_METHOD_QUERY_CONTEXT), + &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_TABLE), + &UVERBS_METHOD(UVERBS_METHOD_QUERY_GID_ENTRY)); const struct uapi_definition uverbs_def_obj_device[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DEVICE), diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 3b61fba531d0..ce935d70fdc8 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -138,9 +138,9 @@ union ib_gid { extern union ib_gid zgid; enum ib_gid_type { - IB_GID_TYPE_IB = 0, - IB_GID_TYPE_ROCE = 1, - IB_GID_TYPE_ROCE_UDP_ENCAP = 2, + IB_GID_TYPE_IB = IB_UVERBS_GID_TYPE_IB, + IB_GID_TYPE_ROCE = IB_UVERBS_GID_TYPE_ROCE_V1, + IB_GID_TYPE_ROCE_UDP_ENCAP = IB_UVERBS_GID_TYPE_ROCE_V2, IB_GID_TYPE_SIZE }; diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index 99dcabf61a71..7968a1845355 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -70,6 +70,8 @@ enum uverbs_methods_device { UVERBS_METHOD_QUERY_PORT, UVERBS_METHOD_GET_CONTEXT, UVERBS_METHOD_QUERY_CONTEXT, + UVERBS_METHOD_QUERY_GID_TABLE, + UVERBS_METHOD_QUERY_GID_ENTRY, }; enum uverbs_attrs_invoke_write_cmd_attr_ids { @@ -352,4 +354,18 @@ enum uverbs_attrs_async_event_create { UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE, }; +enum uverbs_attrs_query_gid_table_cmd_attr_ids { + UVERBS_ATTR_QUERY_GID_TABLE_ENTRY_SIZE, + UVERBS_ATTR_QUERY_GID_TABLE_FLAGS, + UVERBS_ATTR_QUERY_GID_TABLE_RESP_ENTRIES, + UVERBS_ATTR_QUERY_GID_TABLE_RESP_NUM_ENTRIES, +}; + +enum uverbs_attrs_query_gid_entry_cmd_attr_ids { + UVERBS_ATTR_QUERY_GID_ENTRY_PORT, + UVERBS_ATTR_QUERY_GID_ENTRY_GID_INDEX, + UVERBS_ATTR_QUERY_GID_ENTRY_FLAGS, + UVERBS_ATTR_QUERY_GID_ENTRY_RESP_ENTRY, +}; + #endif diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 14820082de5e..22483799cd07 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -251,6 +251,12 @@ enum rdma_driver_id { RDMA_DRIVER_SIW, }; +enum ib_uverbs_gid_type { + IB_UVERBS_GID_TYPE_IB, + IB_UVERBS_GID_TYPE_ROCE_V1, + IB_UVERBS_GID_TYPE_ROCE_V2, +}; + struct ib_uverbs_gid_entry { __aligned_u64 gid[2]; __u32 gid_index; -- cgit v1.2.3 From e0d696d201dd5d31813787d9b61a42fc459eee89 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 15 Oct 2020 20:42:18 -0300 Subject: RDMA/rxe: Move the definitions for rxe_av.network_type to uAPI RXE was wrongly using an internal kernel enum as part of its uAPI, split this out into a dedicated uAPI enum just for RXE. It only uses the IPv4 and IPv6 values. This was exposed by changing the internal kernel enum definition which broke RXE. Fixes: 1c15b4f2a42f ("RDMA/core: Modify enum ib_gid_type and enum rdma_network_type") Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rxe/rxe_net.c | 8 ++++---- include/uapi/rdma/rdma_user_rxe.h | 6 ++++++ 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'include/uapi') diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 31b93e7e1e2f..575e1a4ec821 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -133,14 +133,14 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev, if (dst) dst_release(dst); - if (av->network_type == RDMA_NETWORK_IPV4) { + if (av->network_type == RXE_NETWORK_TYPE_IPV4) { struct in_addr *saddr; struct in_addr *daddr; saddr = &av->sgid_addr._sockaddr_in.sin_addr; daddr = &av->dgid_addr._sockaddr_in.sin_addr; dst = rxe_find_route4(ndev, saddr, daddr); - } else if (av->network_type == RDMA_NETWORK_IPV6) { + } else if (av->network_type == RXE_NETWORK_TYPE_IPV6) { struct in6_addr *saddr6; struct in6_addr *daddr6; @@ -442,7 +442,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, if (IS_ERR(attr)) return NULL; - if (av->network_type == RDMA_NETWORK_IPV4) + if (av->network_type == RXE_NETWORK_TYPE_IPV6) hdr_len = ETH_HLEN + sizeof(struct udphdr) + sizeof(struct iphdr); else @@ -469,7 +469,7 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, skb->dev = ndev; rcu_read_unlock(); - if (av->network_type == RDMA_NETWORK_IPV4) + if (av->network_type == RXE_NETWORK_TYPE_IPV4) skb->protocol = htons(ETH_P_IP); else skb->protocol = htons(ETH_P_IPV6); diff --git a/include/uapi/rdma/rdma_user_rxe.h b/include/uapi/rdma/rdma_user_rxe.h index d8f2e0e46dab..e591d8c1f3cf 100644 --- a/include/uapi/rdma/rdma_user_rxe.h +++ b/include/uapi/rdma/rdma_user_rxe.h @@ -39,6 +39,11 @@ #include #include +enum { + RXE_NETWORK_TYPE_IPV4 = 1, + RXE_NETWORK_TYPE_IPV6 = 2, +}; + union rxe_gid { __u8 raw[16]; struct { @@ -57,6 +62,7 @@ struct rxe_global_route { struct rxe_av { __u8 port_num; + /* From RXE_NETWORK_TYPE_* */ __u8 network_type; __u8 dmac[6]; struct rxe_global_route grh; -- cgit v1.2.3