From 925d046e7e52c71c3531199ce137e141807ef740 Mon Sep 17 00:00:00 2001 From: Patrisious Haddad Date: Tue, 7 Jun 2022 14:32:44 +0300 Subject: RDMA/core: Add a netevent notifier to cma Add a netevent callback for cma, mainly to catch NETEVENT_NEIGH_UPDATE. Previously, when a system with failover MAC mechanism change its MAC address during a CM connection attempt, the RDMA-CM would take a lot of time till it disconnects and timesout due to the incorrect MAC address. Now when we get a NETEVENT_NEIGH_UPDATE we check if it is due to a failover MAC change and if so, we instantly destroy the CM and notify the user in order to spare the unnecessary waiting for the timeout. Link: https://lore.kernel.org/r/bb255c9e301cd50b905663b8e73f7f5133d0e4c5.1654601342.git.leonro@nvidia.com Signed-off-by: Patrisious Haddad Reviewed-by: Mark Zhang Signed-off-by: Leon Romanovsky --- include/rdma/rdma_cm.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index d989f030fae0..5b18e2e36ee6 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -108,6 +108,7 @@ struct rdma_cm_id { enum rdma_ucm_port_space ps; enum ib_qp_type qp_type; u32 port_num; + struct work_struct net_work; }; struct rdma_cm_id * -- cgit v1.2.3 From f484da847a01936e1d087a80cc96e8254492527c Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Sun, 3 Jul 2022 13:54:03 -0700 Subject: net/mlx5: Expose the ability to point to any UID from shared UID Expose shared_object_to_user_object_allowed, this capability means an object created with shared UID can point to any UID. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- include/linux/mlx5/mlx5_ifc.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 8e87eb47f9dc..9321d774e2d8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1371,7 +1371,9 @@ enum { }; struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_at_0[0x1f]; + u8 reserved_at_0[0x10]; + u8 shared_object_to_user_object_allowed[0x1]; + u8 reserved_at_13[0xe]; u8 vhca_resource_manager[0x1]; u8 hca_cap_2[0x1]; @@ -8507,7 +8509,7 @@ struct mlx5_ifc_create_flow_table_out_bits { struct mlx5_ifc_create_flow_table_in_bits { u8 opcode[0x10]; - u8 reserved_at_10[0x10]; + u8 uid[0x10]; u8 reserved_at_20[0x10]; u8 op_mod[0x10]; -- cgit v1.2.3 From 6c27c56cdc69608211617eea217e1a6cecccc675 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Sun, 3 Jul 2022 13:54:04 -0700 Subject: net/mlx5: fs, expose flow table ID to users Expose the flow table ID to users. This will be used by downstream patches to allow creating steering rules that point to a flow table ID. Signed-off-by: Mark Bloch Reviewed-by: Maor Gottlieb Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 6 ++++++ include/linux/mlx5/fs.h | 1 + 2 files changed, 7 insertions(+) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 14187e50e2f9..1da3dc7c95fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1195,6 +1195,12 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns, } EXPORT_SYMBOL(mlx5_create_flow_table); +u32 mlx5_flow_table_id(struct mlx5_flow_table *ft) +{ + return ft->id; +} +EXPORT_SYMBOL(mlx5_flow_table_id); + struct mlx5_flow_table * mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns, struct mlx5_flow_table_attr *ft_attr, u16 vport) diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index ece3e35622d7..eee07d416b56 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -315,4 +315,5 @@ struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev, void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat *reformat); +u32 mlx5_flow_table_id(struct mlx5_flow_table *ft); #endif -- cgit v1.2.3 From b0bb369ee451323968b31392a86398f15a2ba183 Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Sun, 3 Jul 2022 13:54:05 -0700 Subject: net/mlx5: fs, allow flow table creation with a UID Add UID field to flow table attributes to allow creating flow tables with a non default (zero) uid. Signed-off-by: Mark Bloch Reviewed-by: Alex Vesker Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | 16 ++++++++++------ drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 2 +- .../net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c | 1 + .../net/ethernet/mellanox/mlx5/core/steering/dr_table.c | 8 +++++--- .../net/ethernet/mellanox/mlx5/core/steering/dr_types.h | 1 + drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c | 7 ++++--- .../net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h | 3 ++- include/linux/mlx5/fs.h | 1 + 9 files changed, 26 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 735dc805dad7..e735e19461ba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -50,10 +50,12 @@ static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns, static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int size, + struct mlx5_flow_table_attr *ft_attr, struct mlx5_flow_table *next_ft) { - ft->max_fte = size ? roundup_pow_of_two(size) : 1; + int max_fte = ft_attr->max_fte; + + ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1; return 0; } @@ -258,7 +260,7 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int size, + struct mlx5_flow_table_attr *ft_attr, struct mlx5_flow_table *next_ft) { int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); @@ -267,17 +269,19 @@ static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {}; u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {}; struct mlx5_core_dev *dev = ns->dev; + unsigned int size; int err; - if (size != POOL_NEXT_SIZE) - size = roundup_pow_of_two(size); - size = mlx5_ft_pool_get_avail_sz(dev, ft->type, size); + if (ft_attr->max_fte != POOL_NEXT_SIZE) + size = roundup_pow_of_two(ft_attr->max_fte); + size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte); if (!size) return -ENOSPC; MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); + MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid); MLX5_SET(create_flow_table_in, in, table_type, ft->type); MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level); MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h index 274004e80f03..8ef4254b9ea1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h @@ -38,7 +38,7 @@ struct mlx5_flow_cmds { int (*create_flow_table)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int size, + struct mlx5_flow_table_attr *ft_attr, struct mlx5_flow_table *next_ft); int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 1da3dc7c95fa..35d89edb1bcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1155,7 +1155,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa find_next_chained_ft(fs_prio); ft->def_miss_action = ns->def_miss_action; ft->ns = ns; - err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft); + err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft); if (err) goto free_ft; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 223c8741b7ae..16d65fe4f654 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -439,6 +439,7 @@ int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev, MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE); MLX5_SET(create_flow_table_in, in, table_type, attr->table_type); + MLX5_SET(create_flow_table_in, in, uid, attr->uid); ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context); MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c index e5f6412baea9..31d443dd8386 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c @@ -214,7 +214,7 @@ static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl) tbl->table_type); } -static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl) +static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl, u16 uid) { bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT); bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); @@ -236,6 +236,7 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl) ft_attr.sw_owner = true; ft_attr.decap_en = en_decap; ft_attr.reformat_en = en_encap; + ft_attr.uid = uid; ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr, NULL, &tbl->table_id); @@ -243,7 +244,8 @@ static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl) return ret; } -struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u32 flags) +struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, + u32 flags, u16 uid) { struct mlx5dr_table *tbl; int ret; @@ -263,7 +265,7 @@ struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level, u if (ret) goto free_tbl; - ret = dr_table_create_sw_owned_tbl(tbl); + ret = dr_table_create_sw_owned_tbl(tbl, uid); if (ret) goto uninit_tbl; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 98320e3945ad..50b0dd4fb4a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -1200,6 +1200,7 @@ struct mlx5dr_cmd_query_flow_table_details { struct mlx5dr_cmd_create_flow_table_attr { u32 table_type; + u16 uid; u64 icm_addr_rx; u64 icm_addr_tx; u8 level; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 6a9abba92df6..c30ed8e18458 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -62,7 +62,7 @@ static int set_miss_action(struct mlx5_flow_root_namespace *ns, static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, - unsigned int size, + struct mlx5_flow_table_attr *ft_attr, struct mlx5_flow_table *next_ft) { struct mlx5dr_table *tbl; @@ -71,7 +71,7 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns, if (mlx5_dr_is_fw_table(ft->flags)) return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft, - size, + ft_attr, next_ft); flags = ft->flags; /* turn off encap/decap if not supported for sw-str by fw */ @@ -79,7 +79,8 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns, flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP); - tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags); + tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags, + ft_attr->uid); if (!tbl) { mlx5_core_err(ns->dev, "Failed creating dr flow_table\n"); return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 7626c85643b1..3bb14860b36d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -51,7 +51,8 @@ void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn, struct mlx5dr_domain *peer_dmn); struct mlx5dr_table * -mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags); +mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags, + u16 uid); struct mlx5dr_table * mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft); diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index eee07d416b56..8e73c377da2c 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -178,6 +178,7 @@ struct mlx5_flow_table_attr { int max_fte; u32 level; u32 flags; + u16 uid; struct mlx5_flow_table *next_ft; struct { -- cgit v1.2.3 From 0c6ab0ca9a662d4ca9742d97156bac0d3067d72d Mon Sep 17 00:00:00 2001 From: Mark Bloch Date: Sun, 3 Jul 2022 13:54:07 -0700 Subject: RDMA/mlx5: Expose steering anchor to userspace Expose a steering anchor per priority to allow users to re-inject packets back into default NIC pipeline for additional processing. MLX5_IB_METHOD_STEERING_ANCHOR_CREATE returns a flow table ID which a user can use to re-inject packets at a specific priority. A FTE (flow table entry) can be created and the flow table ID used as a destination. When a packet is taken into a RDMA-controlled steering domain (like software steering) there may be a need to insert the packet back into the default NIC pipeline. This exposes a flow table ID to the user that can be used as a destination in a flow table entry. With this new method priorities that are exposed to users via MLX5_IB_METHOD_FLOW_MATCHER_CREATE can be reached from a non-zero UID. As user-created flow tables (via RDMA DEVX) are created with a non-zero UID thus it's impossible to point to a NIC core flow table (core driver flow tables are created with UID value of zero) from userspace. Create flow tables that are exposed to users with the shared UID, this allows users to point to default NIC flow tables. Steering loops are prevented at FW level as FW enforces that no flow table at level X can point to a table at level lower than X. Link: https://lore.kernel.org/all/20220703205407.110890-6-saeed@kernel.org/ Signed-off-by: Mark Bloch Reviewed-by: Yishai Hadas Signed-off-by: Saeed Mahameed Signed-off-by: Leon Romanovsky --- drivers/infiniband/hw/mlx5/fs.c | 138 +++++++++++++++++++++++++++++-- drivers/infiniband/hw/mlx5/mlx5_ib.h | 6 ++ include/uapi/rdma/mlx5_user_ioctl_cmds.h | 17 ++++ 3 files changed, 156 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index b1402aea29c1..691d00c89f33 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -679,7 +679,15 @@ enum flow_table_type { #define MLX5_FS_MAX_TYPES 6 #define MLX5_FS_MAX_ENTRIES BIT(16) -static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns, +static bool mlx5_ib_shared_ft_allowed(struct ib_device *device) +{ + struct mlx5_ib_dev *dev = to_mdev(device); + + return MLX5_CAP_GEN(dev->mdev, shared_object_to_user_object_allowed); +} + +static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_ib_dev *dev, + struct mlx5_flow_namespace *ns, struct mlx5_ib_flow_prio *prio, int priority, int num_entries, int num_groups, @@ -688,6 +696,8 @@ static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns, struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table *ft; + if (mlx5_ib_shared_ft_allowed(&dev->ib_dev)) + ft_attr.uid = MLX5_SHARED_RESOURCE_UID; ft_attr.prio = priority; ft_attr.max_fte = num_entries; ft_attr.flags = flags; @@ -784,8 +794,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, ft = prio->flow_table; if (!ft) - return _get_prio(ns, prio, priority, max_table_size, num_groups, - flags); + return _get_prio(dev, ns, prio, priority, max_table_size, + num_groups, flags); return prio; } @@ -927,7 +937,7 @@ int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num, prio = &dev->flow_db->opfcs[type]; if (!prio->flow_table) { - prio = _get_prio(ns, prio, priority, + prio = _get_prio(dev, ns, prio, priority, dev->num_ports * MAX_OPFC_RULES, 1, 0); if (IS_ERR(prio)) { err = PTR_ERR(prio); @@ -1499,7 +1509,7 @@ _get_flow_table(struct mlx5_ib_dev *dev, u16 user_priority, if (prio->flow_table) return prio; - return _get_prio(ns, prio, priority, max_table_size, + return _get_prio(dev, ns, prio, priority, max_table_size, MLX5_FS_MAX_TYPES, flags); } @@ -2016,6 +2026,23 @@ static int flow_matcher_cleanup(struct ib_uobject *uobject, return 0; } +static int steering_anchor_cleanup(struct ib_uobject *uobject, + enum rdma_remove_reason why, + struct uverbs_attr_bundle *attrs) +{ + struct mlx5_ib_steering_anchor *obj = uobject->object; + + if (atomic_read(&obj->usecnt)) + return -EBUSY; + + mutex_lock(&obj->dev->flow_db->lock); + put_flow_table(obj->dev, obj->ft_prio, true); + mutex_unlock(&obj->dev->flow_db->lock); + + kfree(obj); + return 0; +} + static int mlx5_ib_matcher_ns(struct uverbs_attr_bundle *attrs, struct mlx5_ib_flow_matcher *obj) { @@ -2122,6 +2149,75 @@ end: return err; } +static int UVERBS_HANDLER(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE)( + struct uverbs_attr_bundle *attrs) +{ + struct ib_uobject *uobj = uverbs_attr_get_uobject( + attrs, MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE); + struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata); + enum mlx5_ib_uapi_flow_table_type ib_uapi_ft_type; + enum mlx5_flow_namespace_type ns_type; + struct mlx5_ib_steering_anchor *obj; + struct mlx5_ib_flow_prio *ft_prio; + u16 priority; + u32 ft_id; + int err; + + if (!capable(CAP_NET_RAW)) + return -EPERM; + + err = uverbs_get_const(&ib_uapi_ft_type, attrs, + MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE); + if (err) + return err; + + err = mlx5_ib_ft_type_to_namespace(ib_uapi_ft_type, &ns_type); + if (err) + return err; + + err = uverbs_copy_from(&priority, attrs, + MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY); + if (err) + return err; + + obj = kzalloc(sizeof(*obj), GFP_KERNEL); + if (!obj) + return -ENOMEM; + + mutex_lock(&dev->flow_db->lock); + ft_prio = _get_flow_table(dev, priority, ns_type, 0); + if (IS_ERR(ft_prio)) { + mutex_unlock(&dev->flow_db->lock); + err = PTR_ERR(ft_prio); + goto free_obj; + } + + ft_prio->refcount++; + ft_id = mlx5_flow_table_id(ft_prio->flow_table); + mutex_unlock(&dev->flow_db->lock); + + err = uverbs_copy_to(attrs, MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID, + &ft_id, sizeof(ft_id)); + if (err) + goto put_flow_table; + + uobj->object = obj; + obj->dev = dev; + obj->ft_prio = ft_prio; + atomic_set(&obj->usecnt, 0); + + return 0; + +put_flow_table: + mutex_lock(&dev->flow_db->lock); + put_flow_table(dev, ft_prio, true); + mutex_unlock(&dev->flow_db->lock); +free_obj: + kfree(obj); + + return err; +} + static struct ib_flow_action * mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev, enum mlx5_ib_uapi_flow_table_type ft_type, @@ -2478,6 +2574,35 @@ DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_FLOW_MATCHER, &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_CREATE), &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_MATCHER_DESTROY)); +DECLARE_UVERBS_NAMED_METHOD( + MLX5_IB_METHOD_STEERING_ANCHOR_CREATE, + UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE, + MLX5_IB_OBJECT_STEERING_ANCHOR, + UVERBS_ACCESS_NEW, + UA_MANDATORY), + UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE, + enum mlx5_ib_uapi_flow_table_type, + UA_MANDATORY), + UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY, + UVERBS_ATTR_TYPE(u16), + UA_MANDATORY), + UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID, + UVERBS_ATTR_TYPE(u32), + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_METHOD_DESTROY( + MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY, + UVERBS_ATTR_IDR(MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE, + MLX5_IB_OBJECT_STEERING_ANCHOR, + UVERBS_ACCESS_DESTROY, + UA_MANDATORY)); + +DECLARE_UVERBS_NAMED_OBJECT( + MLX5_IB_OBJECT_STEERING_ANCHOR, + UVERBS_TYPE_ALLOC_IDR(steering_anchor_cleanup), + &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_CREATE), + &UVERBS_METHOD(MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY)); + const struct uapi_definition mlx5_ib_flow_defs[] = { UAPI_DEF_CHAIN_OBJ_TREE_NAMED( MLX5_IB_OBJECT_FLOW_MATCHER), @@ -2486,6 +2611,9 @@ const struct uapi_definition mlx5_ib_flow_defs[] = { &mlx5_ib_fs), UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, &mlx5_ib_flow_actions), + UAPI_DEF_CHAIN_OBJ_TREE_NAMED( + MLX5_IB_OBJECT_STEERING_ANCHOR, + UAPI_DEF_IS_OBJ_SUPPORTED(mlx5_ib_shared_ft_allowed)), {}, }; diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 7460e0dfe6db..688ee7c05a8f 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -259,6 +259,12 @@ struct mlx5_ib_flow_matcher { u8 match_criteria_enable; }; +struct mlx5_ib_steering_anchor { + struct mlx5_ib_flow_prio *ft_prio; + struct mlx5_ib_dev *dev; + atomic_t usecnt; +}; + struct mlx5_ib_pp { u16 index; struct mlx5_core_dev *mdev; diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h index e539c84d63f1..3bee490eb585 100644 --- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h +++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h @@ -228,6 +228,7 @@ enum mlx5_ib_objects { MLX5_IB_OBJECT_VAR, MLX5_IB_OBJECT_PP, MLX5_IB_OBJECT_UAR, + MLX5_IB_OBJECT_STEERING_ANCHOR, }; enum mlx5_ib_flow_matcher_create_attrs { @@ -248,6 +249,22 @@ enum mlx5_ib_flow_matcher_methods { MLX5_IB_METHOD_FLOW_MATCHER_DESTROY, }; +enum mlx5_ib_flow_steering_anchor_create_attrs { + MLX5_IB_ATTR_STEERING_ANCHOR_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_ATTR_STEERING_ANCHOR_FT_TYPE, + MLX5_IB_ATTR_STEERING_ANCHOR_PRIORITY, + MLX5_IB_ATTR_STEERING_ANCHOR_FT_ID, +}; + +enum mlx5_ib_flow_steering_anchor_destroy_attrs { + MLX5_IB_ATTR_STEERING_ANCHOR_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT), +}; + +enum mlx5_ib_steering_anchor_methods { + MLX5_IB_METHOD_STEERING_ANCHOR_CREATE = (1U << UVERBS_ID_NS_SHIFT), + MLX5_IB_METHOD_STEERING_ANCHOR_DESTROY, +}; + enum mlx5_ib_device_query_context_attrs { MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX = (1U << UVERBS_ID_NS_SHIFT), }; -- cgit v1.2.3 From 8937e28eac0cbe0ff76e15f142e4b2e5adf3f462 Mon Sep 17 00:00:00 2001 From: Xin Gao Date: Fri, 22 Jul 2022 10:18:33 +0800 Subject: RDMA: Fix comment typo The double `get' is duplicated, remove one. Link: https://lore.kernel.org/r/20220722021833.15669-1-gaoxin@cdjrlc.com Signed-off-by: Xin Gao Signed-off-by: Jason Gunthorpe --- include/rdma/ib_verbs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 9c6317cf80d5..7c2f76f34f6f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -4603,7 +4603,7 @@ static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev, /** * ib_lid_cpu16 - Return lid in 16bit CPU encoding. - * In the current implementation the only way to get + * In the current implementation the only way to * get the 32bit lid is from other sources for OPA. * For IB, lids will always be 16bits so cast the * value accordingly. -- cgit v1.2.3 From 0113780870b1597ae49f30abfa4957c239f913d3 Mon Sep 17 00:00:00 2001 From: Aharon Landau Date: Tue, 26 Jul 2022 10:19:11 +0300 Subject: RDMA/mlx5: Rename the mkey cache variables and functions After replacing the MR cache with an Mkey cache, rename the variables and functions to fit the new meaning. Link: https://lore.kernel.org/r/20220726071911.122765-6-michaelgur@nvidia.com Signed-off-by: Aharon Landau Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx5/main.c | 4 +-- drivers/infiniband/hw/mlx5/mlx5_ib.h | 14 +++++----- drivers/infiniband/hw/mlx5/mr.c | 54 ++++++++++++++++++------------------ drivers/infiniband/hw/mlx5/odp.c | 2 +- include/linux/mlx5/driver.h | 6 ++-- 5 files changed, 40 insertions(+), 40 deletions(-) (limited to 'include') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index b68fddeac0f1..a174a0eee8dc 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -4002,7 +4002,7 @@ static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) { int err; - err = mlx5_mr_cache_cleanup(dev); + err = mlx5_mkey_cache_cleanup(dev); if (err) mlx5_ib_warn(dev, "mr cache cleanup failed\n"); @@ -4022,7 +4022,7 @@ static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) if (ret) return ret; - ret = mlx5_mr_cache_init(dev); + ret = mlx5_mkey_cache_init(dev); if (ret) { mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); mlx5r_umr_resource_cleanup(dev); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 91f985cd7d90..2e2ad3918385 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -764,9 +764,9 @@ struct mlx5r_async_create_mkey { u32 mkey; }; -struct mlx5_mr_cache { +struct mlx5_mkey_cache { struct workqueue_struct *wq; - struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES]; + struct mlx5_cache_ent ent[MAX_MKEY_CACHE_ENTRIES]; struct dentry *root; unsigned long last_add; }; @@ -1065,7 +1065,7 @@ struct mlx5_ib_dev { struct mlx5_ib_resources devr; atomic_t mkey_var; - struct mlx5_mr_cache cache; + struct mlx5_mkey_cache cache; struct timer_list delay_timer; /* Prevents soft lock on massive reg MRs */ struct mutex slow_path_mutex; @@ -1310,8 +1310,8 @@ void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, u64 access_flags); void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num); int mlx5_ib_get_cqe_size(struct ib_cq *ibcq); -int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); -int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); +int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev); +int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev); struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, struct mlx5_cache_ent *ent, @@ -1339,7 +1339,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq); void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev); int __init mlx5_ib_odp_init(void); void mlx5_ib_odp_cleanup(void); -void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent); +void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent); void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, struct mlx5_ib_mr *mr, int flags); @@ -1358,7 +1358,7 @@ static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {} static inline int mlx5_ib_odp_init(void) { return 0; } static inline void mlx5_ib_odp_cleanup(void) {} -static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {} +static inline void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) {} static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries, struct mlx5_ib_mr *mr, int flags) {} diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index edbc2990d151..129d531bd01b 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -119,7 +119,7 @@ static int mlx5_ib_create_mkey_cb(struct mlx5r_async_create_mkey *async_create) &async_create->cb_work); } -static int mr_cache_max_order(struct mlx5_ib_dev *dev); +static int mkey_cache_max_order(struct mlx5_ib_dev *dev); static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent); static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) @@ -515,11 +515,11 @@ static const struct file_operations limit_fops = { .read = limit_read, }; -static bool someone_adding(struct mlx5_mr_cache *cache) +static bool someone_adding(struct mlx5_mkey_cache *cache) { unsigned int i; - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) { struct mlx5_cache_ent *ent = &cache->ent[i]; bool ret; @@ -569,7 +569,7 @@ static void queue_adjust_cache_locked(struct mlx5_cache_ent *ent) static void __cache_work_func(struct mlx5_cache_ent *ent) { struct mlx5_ib_dev *dev = ent->dev; - struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_mkey_cache *cache = &dev->cache; int err; xa_lock_irq(&ent->mkeys); @@ -681,7 +681,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, static void clean_keys(struct mlx5_ib_dev *dev, int c) { - struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_mkey_cache *cache = &dev->cache; struct mlx5_cache_ent *ent = &cache->ent[c]; u32 mkey; @@ -696,7 +696,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) xa_unlock_irq(&ent->mkeys); } -static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) +static void mlx5_mkey_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) { if (!mlx5_debugfs_root || dev->is_rep) return; @@ -705,9 +705,9 @@ static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) dev->cache.root = NULL; } -static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) +static void mlx5_mkey_cache_debugfs_init(struct mlx5_ib_dev *dev) { - struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_mkey_cache *cache = &dev->cache; struct mlx5_cache_ent *ent; struct dentry *dir; int i; @@ -717,7 +717,7 @@ static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) cache->root = debugfs_create_dir("mr_cache", mlx5_debugfs_get_dev_root(dev->mdev)); - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) { ent = &cache->ent[i]; sprintf(ent->name, "%d", ent->order); dir = debugfs_create_dir(ent->name, cache->root); @@ -735,9 +735,9 @@ static void delay_time_func(struct timer_list *t) WRITE_ONCE(dev->fill_delay, 0); } -int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) +int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev) { - struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_mkey_cache *cache = &dev->cache; struct mlx5_cache_ent *ent; int i; @@ -750,7 +750,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) mlx5_cmd_init_async_ctx(dev->mdev, &dev->async_ctx); timer_setup(&dev->delay_timer, delay_time_func, 0); - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) { ent = &cache->ent[i]; xa_init_flags(&ent->mkeys, XA_FLAGS_LOCK_IRQ); ent->order = i + 2; @@ -759,12 +759,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); - if (i > MR_CACHE_LAST_STD_ENTRY) { - mlx5_odp_init_mr_cache_entry(ent); + if (i > MKEY_CACHE_LAST_STD_ENTRY) { + mlx5_odp_init_mkey_cache_entry(ent); continue; } - if (ent->order > mr_cache_max_order(dev)) + if (ent->order > mkey_cache_max_order(dev)) continue; ent->page = PAGE_SHIFT; @@ -781,19 +781,19 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) xa_unlock_irq(&ent->mkeys); } - mlx5_mr_cache_debugfs_init(dev); + mlx5_mkey_cache_debugfs_init(dev); return 0; } -int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) +int mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev) { unsigned int i; if (!dev->cache.wq) return 0; - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) { + for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) { struct mlx5_cache_ent *ent = &dev->cache.ent[i]; xa_lock_irq(&ent->mkeys); @@ -802,10 +802,10 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev) cancel_delayed_work_sync(&ent->dwork); } - mlx5_mr_cache_debugfs_cleanup(dev); + mlx5_mkey_cache_debugfs_cleanup(dev); mlx5_cmd_cleanup_async_ctx(&dev->async_ctx); - for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) + for (i = 0; i < MAX_MKEY_CACHE_ENTRIES; i++) clean_keys(dev, i); destroy_workqueue(dev->cache.wq); @@ -872,22 +872,22 @@ static int get_octo_len(u64 addr, u64 len, int page_shift) return (npages + 1) / 2; } -static int mr_cache_max_order(struct mlx5_ib_dev *dev) +static int mkey_cache_max_order(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) - return MR_CACHE_LAST_STD_ENTRY + 2; + return MKEY_CACHE_LAST_STD_ENTRY + 2; return MLX5_MAX_UMR_SHIFT; } -static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev, - unsigned int order) +static struct mlx5_cache_ent *mkey_cache_ent_from_order(struct mlx5_ib_dev *dev, + unsigned int order) { - struct mlx5_mr_cache *cache = &dev->cache; + struct mlx5_mkey_cache *cache = &dev->cache; if (order < cache->ent[0].order) return &cache->ent[0]; order = order - cache->ent[0].order; - if (order > MR_CACHE_LAST_STD_ENTRY) + if (order > MKEY_CACHE_LAST_STD_ENTRY) return NULL; return &cache->ent[order]; } @@ -930,7 +930,7 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, 0, iova); if (WARN_ON(!page_size)) return ERR_PTR(-EINVAL); - ent = mr_cache_ent_from_order( + ent = mkey_cache_ent_from_order( dev, order_base_2(ib_umem_num_dma_blocks(umem, page_size))); /* * Matches access in alloc_cache_mr(). If the MR can't come from the diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 84da5674e1ab..e305bf1dc6c2 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -1588,7 +1588,7 @@ mlx5_ib_odp_destroy_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) return err; } -void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) +void mlx5_odp_init_mkey_cache_entry(struct mlx5_cache_ent *ent) { if (!(ent->dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT)) return; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 76d7661e3e63..220597c2f436 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -728,10 +728,10 @@ enum { }; enum { - MR_CACHE_LAST_STD_ENTRY = 20, + MKEY_CACHE_LAST_STD_ENTRY = 20, MLX5_IMR_MTT_CACHE_ENTRY, MLX5_IMR_KSM_CACHE_ENTRY, - MAX_MR_CACHE_ENTRIES + MAX_MKEY_CACHE_ENTRIES }; struct mlx5_profile { @@ -740,7 +740,7 @@ struct mlx5_profile { struct { int size; int limit; - } mr_cache[MAX_MR_CACHE_ENTRIES]; + } mr_cache[MAX_MKEY_CACHE_ENTRIES]; }; struct mlx5_hca_cap { -- cgit v1.2.3 From d8f70c47394c26a8d7c6e602d909de88d1bdae5e Mon Sep 17 00:00:00 2001 From: Cheng Xu Date: Wed, 27 Jul 2022 09:49:17 +0800 Subject: RDMA: Add ERDMA to rdma_driver_id definition Define RDMA_DRIVER_ERDMA in enum rdma_driver_id. Link: https://lore.kernel.org/r/20220727014927.76564-2-chengyou@linux.alibaba.com Signed-off-by: Cheng Xu Signed-off-by: Jason Gunthorpe --- include/uapi/rdma/ib_user_ioctl_verbs.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h index 3072e5d6b692..7dd56210226f 100644 --- a/include/uapi/rdma/ib_user_ioctl_verbs.h +++ b/include/uapi/rdma/ib_user_ioctl_verbs.h @@ -250,6 +250,7 @@ enum rdma_driver_id { RDMA_DRIVER_QIB, RDMA_DRIVER_EFA, RDMA_DRIVER_SIW, + RDMA_DRIVER_ERDMA, }; enum ib_uverbs_gid_type { -- cgit v1.2.3 From f5995fe2a0b1f4ec5e52e2022452246a13bf89b7 Mon Sep 17 00:00:00 2001 From: Cheng Xu Date: Wed, 27 Jul 2022 09:49:26 +0800 Subject: RDMA/erdma: Add the ABI definitions Add erdma ABI definitions which will be shared between kernel and userspace. This commit also fix compile issues reported by lkp. Link: https://lore.kernel.org/r/20220727014927.76564-11-chengyou@linux.alibaba.com Reported-by: kernel test robot Signed-off-by: Cheng Xu Signed-off-by: Jason Gunthorpe --- include/uapi/rdma/erdma-abi.h | 49 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 include/uapi/rdma/erdma-abi.h (limited to 'include') diff --git a/include/uapi/rdma/erdma-abi.h b/include/uapi/rdma/erdma-abi.h new file mode 100644 index 000000000000..b7a0222f978f --- /dev/null +++ b/include/uapi/rdma/erdma-abi.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* + * Copyright (c) 2020-2022, Alibaba Group. + */ + +#ifndef __ERDMA_USER_H__ +#define __ERDMA_USER_H__ + +#include + +#define ERDMA_ABI_VERSION 1 + +struct erdma_ureq_create_cq { + __aligned_u64 db_record_va; + __aligned_u64 qbuf_va; + __u32 qbuf_len; + __u32 rsvd0; +}; + +struct erdma_uresp_create_cq { + __u32 cq_id; + __u32 num_cqe; +}; + +struct erdma_ureq_create_qp { + __aligned_u64 db_record_va; + __aligned_u64 qbuf_va; + __u32 qbuf_len; + __u32 rsvd0; +}; + +struct erdma_uresp_create_qp { + __u32 qp_id; + __u32 num_sqe; + __u32 num_rqe; + __u32 rq_offset; +}; + +struct erdma_uresp_alloc_ctx { + __u32 dev_id; + __u32 pad; + __u32 sdb_type; + __u32 sdb_offset; + __aligned_u64 sdb; + __aligned_u64 rdb; + __aligned_u64 cdb; +}; + +#endif -- cgit v1.2.3