import rdma-core-32.0-2.el8

This commit is contained in:
CentOS Sources 2020-12-11 10:08:26 +00:00 committed by Andrew Lukoshko
parent 14302db10b
commit 3daeb741a8
8 changed files with 601 additions and 1 deletions

View File

@ -0,0 +1,91 @@
From 169d050bc82a519fdc28f83bb685d86804383f0b Mon Sep 17 00:00:00 2001
From: Greg Inozemtsev <greg@purestorage.com>
Date: Tue, 27 Oct 2020 11:24:30 -0700
Subject: [PATCH] Fix cmd_fd leak in mlx5_alloc_context
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
The alloc_context function is supposed to take ownership of cmd_fd
and close it if it fails and returns NULL. This was not done in
early exit error paths from mlx5_init_context (called from mlx5_alloc_context).
Fixes: cb3be404fadc (“mlx5: Refactor mlx5_alloc_context()”)
Signed-off-by: Greg Inozemtsev <greg@purestorage.com>
---
providers/mlx5/mlx5.c | 28 ++++++++++++++--------------
1 file changed, 14 insertions(+), 14 deletions(-)
diff --git a/providers/mlx5/mlx5.c b/providers/mlx5/mlx5.c
index 0a091f5b6b49..551560d561fe 100644
--- a/providers/mlx5/mlx5.c
+++ b/providers/mlx5/mlx5.c
@@ -1334,25 +1334,14 @@ static void mlx5_uninit_context(struct mlx5_context *context)
}
static struct mlx5_context *mlx5_init_context(struct ibv_device *ibdev,
- int cmd_fd,
- void *private_data)
+ int cmd_fd)
{
- struct mlx5dv_context_attr *ctx_attr = private_data;
struct mlx5_device *mdev = to_mdev(ibdev);
struct mlx5_context *context;
int low_lat_uuars;
int tot_uuars;
int ret;
- if (ctx_attr && ctx_attr->comp_mask) {
- errno = EINVAL;
- return NULL;
- }
-
- ret = get_uar_info(mdev, &tot_uuars, &low_lat_uuars);
- if (ret)
- return NULL;
-
context = verbs_init_and_alloc_context(ibdev, cmd_fd, context, ibv_ctx,
RDMA_DRIVER_MLX5);
if (!context)
@@ -1365,6 +1354,12 @@ static struct mlx5_context *mlx5_init_context(struct ibv_device *ibdev,
strcpy(context->hostname, "host_unknown");
mlx5_single_threaded = single_threaded_app();
+
+ ret = get_uar_info(mdev, &tot_uuars, &low_lat_uuars);
+ if (ret) {
+ mlx5_uninit_context(context);
+ return NULL;
+ }
context->tot_uuars = tot_uuars;
context->low_lat_uuars = low_lat_uuars;
@@ -1569,10 +1564,15 @@ static struct verbs_context *mlx5_alloc_context(struct ibv_device *ibdev,
bool always_devx = false;
int ret;
- context = mlx5_init_context(ibdev, cmd_fd, NULL);
+ context = mlx5_init_context(ibdev, cmd_fd);
if (!context)
return NULL;
+ if (ctx_attr && ctx_attr->comp_mask) {
+ errno = EINVAL;
+ goto err;
+ }
+
req.total_num_bfregs = context->tot_uuars;
req.num_low_latency_bfregs = context->low_lat_uuars;
req.max_cqe_version = MLX5_CQE_VERSION_V1;
@@ -1627,7 +1627,7 @@ static struct verbs_context *mlx5_import_context(struct ibv_device *ibdev,
struct mlx5_context *mctx;
int ret;
- mctx = mlx5_init_context(ibdev, cmd_fd, NULL);
+ mctx = mlx5_init_context(ibdev, cmd_fd);
if (!mctx)
return NULL;
--
2.25.4

View File

@ -0,0 +1,86 @@
From 4b7203f835727e9314ef42db682b578730783d7d Mon Sep 17 00:00:00 2001
From: Gal Pressman <galpress@amazon.com>
Date: Wed, 11 Nov 2020 14:21:13 +0200
Subject: [PATCH] efa: Flush write combining writes before writing to the LLQ
[ Upstream commit 9a0d3830da11a187fb6bffe4f6f361560a0b2f40 ]
An mmio_wc_start() is needed before writing to the LLQ memory in order
to prevent the WQEs copy (WC memory) from being reordered relative to
other mmio writes, such as tx doorbells (NC memory).
This prevents the provider to issue more than max_tx_batch LLQ writes
between two doorbells. This is especially relevant when the user calls
the _post API with more WQEs than max_tx_batch.
Fixes: 7aad28d11981 ("efa: Respect maximum TX doorbell batch")
Signed-off-by: Shadi Ammouri <sammouri@amazon.com>
Signed-off-by: Gal Pressman <galpress@amazon.com>
Signed-off-by: Nicolas Morey-Chaisemartin <nmoreychaisemartin@suse.com>
---
providers/efa/verbs.c | 14 +++++++++++---
1 file changed, 11 insertions(+), 3 deletions(-)
diff --git a/providers/efa/verbs.c b/providers/efa/verbs.c
index e179ff24e911..e80660d1907f 100644
--- a/providers/efa/verbs.c
+++ b/providers/efa/verbs.c
@@ -1389,7 +1389,6 @@ static inline void efa_rq_ring_doorbell(struct efa_rq *rq, uint16_t pc)
static inline void efa_sq_ring_doorbell(struct efa_sq *sq, uint16_t pc)
{
- mmio_flush_writes();
mmio_write32(sq->wq.db, pc);
}
@@ -1510,15 +1509,19 @@ int efa_post_send(struct ibv_qp *ibvqp, struct ibv_send_wr *wr,
if (curbatch == qp->sq.max_batch_wr) {
curbatch = 0;
+ mmio_flush_writes();
efa_sq_ring_doorbell(&qp->sq, qp->sq.wq.pc);
+ mmio_wc_start();
}
wr = wr->next;
}
ring_db:
- if (curbatch)
+ if (curbatch) {
+ mmio_flush_writes();
efa_sq_ring_doorbell(&qp->sq, qp->sq.wq.pc);
+ }
/*
* Not using mmio_wc_spinunlock as the doorbell write should be done
@@ -1774,6 +1777,7 @@ static int efa_send_wr_complete(struct ibv_qp_ex *ibvqpx)
pc = qp->sq.wq.pc - qp->sq.num_wqe_pending;
sq_desc_idx = pc & qp->sq.wq.desc_mask;
+ /* mmio_wc_start() comes from efa_send_wr_start() */
while (qp->sq.num_wqe_pending) {
num_wqe_to_copy = min3(qp->sq.num_wqe_pending,
qp->sq.wq.wqe_cnt - sq_desc_idx,
@@ -1792,13 +1796,17 @@ static int efa_send_wr_complete(struct ibv_qp_ex *ibvqpx)
qp->sq.wq.desc_mask;
if (curbatch == max_txbatch) {
+ mmio_flush_writes();
efa_sq_ring_doorbell(&qp->sq, pc);
curbatch = 0;
+ mmio_wc_start();
}
}
- if (curbatch)
+ if (curbatch) {
+ mmio_flush_writes();
efa_sq_ring_doorbell(&qp->sq, qp->sq.wq.pc);
+ }
out:
/*
* Not using mmio_wc_spinunlock as the doorbell write should be done
--
2.25.4

View File

@ -0,0 +1,78 @@
From a2c4768ff2f0516791b80640894c5abd4eab14bf Mon Sep 17 00:00:00 2001
From: Honggang Li <honli@redhat.com>
Date: Tue, 27 Oct 2020 17:06:43 +0800
Subject: [PATCH] infiniband-diags: specify the HCA name and Port number when
run ibportstate
A host, from which execute the enable/disable/reset command, may be
connected to multiple InfiniBand fabrics. When the HCA name and
Port number were not specified, the libibumad library will pick up the
first active port it was found, which may not be wanted. Recommend to
specific the HCA name and Port number when run ibportstate.
On the other hand, HCA port may be locally changed without the
knowledge of the Subnet Manager. When locally enable a disabled HCA
port, the HCA name and Port number must be specified.
Signed-off-by: Honggang Li <honli@redhat.com>
---
infiniband-diags/ibportstate.c | 4 ++--
infiniband-diags/man/ibportstate.8.in.rst | 13 +++++++------
2 files changed, 9 insertions(+), 8 deletions(-)
diff --git a/infiniband-diags/ibportstate.c b/infiniband-diags/ibportstate.c
index 17d1e1503834..7f3afb83bdb3 100644
--- a/infiniband-diags/ibportstate.c
+++ b/infiniband-diags/ibportstate.c
@@ -401,8 +401,8 @@ int main(int argc, char **argv)
"\twidth, query, down, arm, active, vls, mtu, lid, smlid, lmc,\n"
"\tmkey, mkeylease, mkeyprot\n";
const char *usage_examples[] = {
- "3 1 disable\t\t\t# by lid",
- "-G 0x2C9000100D051 1 enable\t# by guid",
+ "-C qib0 -P 1 3 1 disable # by CA name, CA Port Number, lid, physical port number",
+ "-C qib0 -P 1 3 1 enable # by CA name, CA Port Number, lid, physical port number",
"-D 0 1\t\t\t# (query) by direct route",
"3 1 reset\t\t\t# by lid",
"3 1 speed 1\t\t\t# by lid",
diff --git a/infiniband-diags/man/ibportstate.8.in.rst b/infiniband-diags/man/ibportstate.8.in.rst
index da1de4c76317..309a3d191c51 100644
--- a/infiniband-diags/man/ibportstate.8.in.rst
+++ b/infiniband-diags/man/ibportstate.8.in.rst
@@ -22,8 +22,9 @@ DESCRIPTION
ibportstate allows the port state and port physical state of an IB port
to be queried (in addition to link width and speed being validated
relative to the peer port when the port queried is a switch port),
-or a switch port to be disabled, enabled, or reset. It
-also allows the link speed/width enabled on any IB port to be adjusted.
+or a switch port to be disabled, enabled, or reset. InfiniBand HCA port
+state may be changed locally without the knowledge of the Subnet Manager.
+It also allows the link speed/width enabled on any IB port to be adjusted.
OPTIONS
=======
@@ -34,8 +35,8 @@ OPTIONS
mkey, mkeylease, mkeyprot
(Default is query)
- **enable, disable, and reset** are only allowed on switch ports (An
- error is indicated if attempted on CA or router ports)
+ **enable, disable, and reset** change or reset a switch or HCA port state
+ (You must specify the CA name and Port number when locally change CA port state.)
**off** change the port state to disable.
@@ -114,8 +115,8 @@ EXAMPLES
========
::
- ibportstate 3 1 disable # by lid
- ibportstate -G 0x2C9000100D051 1 enable # by guid
+ ibportstate -C qib0 -P 1 3 1 disable # by CA name, CA Port Number, lid, physical port number
+ ibportstate -C qib0 -P 1 3 1 enable # by CA name, CA Port Number, lid, physical port number
ibportstate -D 0 1 # (query) by direct route
ibportstate 3 1 reset # by lid
ibportstate 3 1 speed 1 # by lid
--
2.25.4

View File

@ -0,0 +1,41 @@
From 297cc2c6323514a69b57aeeb5207cf63e5e2549b Mon Sep 17 00:00:00 2001
From: Bodong Wang <bodong@nvidia.com>
Date: Tue, 27 Oct 2020 08:59:02 -0500
Subject: [PATCH] mlx5: DR, Create NC UAR as default but fall-back to WC if
failed
[ Upstream commit 40b8e48792a423da1ceeaf58ac8eee81e9e0194a ]
Some devices may only support either NC or WC UAR. To handle such
devices, first try to create NC, then try WC if NC failed.
Fixes: 84ac5272a05a ("mlx5: Enhance mlx5dv_devx_alloc_uar() functionality")
Signed-off-by: Bodong Wang <bodong@nvidia.com>
Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
Signed-off-by: Nicolas Morey-Chaisemartin <nmoreychaisemartin@suse.com>
---
providers/mlx5/dr_domain.c | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/providers/mlx5/dr_domain.c b/providers/mlx5/dr_domain.c
index b47c5841f624..5b8739dca2ef 100644
--- a/providers/mlx5/dr_domain.c
+++ b/providers/mlx5/dr_domain.c
@@ -50,7 +50,13 @@ static int dr_domain_init_resources(struct mlx5dv_dr_domain *dmn)
return ret;
}
- dmn->uar = mlx5dv_devx_alloc_uar(dmn->ctx, 0);
+ dmn->uar = mlx5dv_devx_alloc_uar(dmn->ctx,
+ MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC);
+
+ if (!dmn->uar)
+ dmn->uar = mlx5dv_devx_alloc_uar(dmn->ctx,
+ MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF);
+
if (!dmn->uar) {
dr_dbg(dmn, "Can't allocate UAR\n");
goto clean_pd;
--
2.25.4

View File

@ -0,0 +1,176 @@
From 2432a74c845b7b529cb6e9044140b9445922a7ae Mon Sep 17 00:00:00 2001
From: Alex Vesker <valex@nvidia.com>
Date: Wed, 28 Oct 2020 12:26:32 +0200
Subject: [PATCH] mlx5: DR, Fix incorrect use of fl_roce_enabled capability
[ Upstream commit 2337d6790ad21b1d0c5373cf2aa6f8e70a510434 ]
Creating a FL QP should be allowed only when RoCE is enabled (roce_en)
and FL is supported with RoCE enabled. Previously we relied on the
general HCA cap whether RoCE is enabled and ignored its real state.
Creating a FL QP even if RoCE is disabled which could results in a
failure to modify QP.
Fixes: 6724f6530d3e ("mlx5: DR, Query RoCE capabilities")
Signed-off-by: Alex Vesker <valex@nvidia.com>
Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
Signed-off-by: Nicolas Morey-Chaisemartin <nmoreychaisemartin@suse.com>
---
providers/mlx5/dr_devx.c | 24 ++++++++++++++++++++++++
providers/mlx5/dr_domain.c | 2 +-
providers/mlx5/dr_send.c | 8 +++++++-
providers/mlx5/mlx5_ifc.h | 29 +++++++++++++++++++++++++++++
providers/mlx5/mlx5dv_dr.h | 2 ++
5 files changed, 63 insertions(+), 2 deletions(-)
diff --git a/providers/mlx5/dr_devx.c b/providers/mlx5/dr_devx.c
index cd0f8bbc2e44..74a8155b8777 100644
--- a/providers/mlx5/dr_devx.c
+++ b/providers/mlx5/dr_devx.c
@@ -66,6 +66,26 @@ int dr_devx_query_esw_vport_context(struct ibv_context *ctx,
return 0;
}
+static int dr_devx_query_nic_vport_context(struct ibv_context *ctx,
+ bool *roce_en)
+{
+ uint32_t out[DEVX_ST_SZ_DW(query_nic_vport_context_out)] = {};
+ uint32_t in[DEVX_ST_SZ_DW(query_nic_vport_context_in)] = {};
+ int err;
+
+ DEVX_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+ err = mlx5dv_devx_general_cmd(ctx, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ dr_dbg_ctx(ctx, "Query nic vport context failed %d\n", err);
+ return err;
+ }
+
+ *roce_en = DEVX_GET(query_nic_vport_context_out, out,
+ nic_vport_context.roce_en);
+ return 0;
+}
+
int dr_devx_query_gvmi(struct ibv_context *ctx, bool other_vport,
uint16_t vport_number, uint16_t *gvmi)
{
@@ -225,6 +245,10 @@ int dr_devx_query_device(struct ibv_context *ctx, struct dr_devx_caps *caps)
/* RoCE caps */
if (roce) {
+ err = dr_devx_query_nic_vport_context(ctx, &caps->roce_caps.roce_en);
+ if (err)
+ return err;
+
DEVX_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
DEVX_SET(query_hca_cap_in, in, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_ROCE |
diff --git a/providers/mlx5/dr_domain.c b/providers/mlx5/dr_domain.c
index 916283e505aa..b47c5841f624 100644
--- a/providers/mlx5/dr_domain.c
+++ b/providers/mlx5/dr_domain.c
@@ -199,7 +199,7 @@ static int dr_domain_caps_init(struct ibv_context *ctx,
* force-loopback.
*/
if ((dmn->type != MLX5DV_DR_DOMAIN_TYPE_FDB) &&
- !dmn->info.caps.roce_caps.fl_rc_qp_when_roce_enabled)
+ !dr_send_allow_fl(&dmn->info.caps))
return 0;
ret = dr_domain_query_fdb_caps(ctx, dmn);
diff --git a/providers/mlx5/dr_send.c b/providers/mlx5/dr_send.c
index dfda549d7f01..67ab1c7eb40f 100644
--- a/providers/mlx5/dr_send.c
+++ b/providers/mlx5/dr_send.c
@@ -820,6 +820,12 @@ int dr_send_postsend_action(struct mlx5dv_dr_domain *dmn,
return ret;
}
+bool dr_send_allow_fl(struct dr_devx_caps *caps)
+{
+ return (caps->roce_caps.roce_en &&
+ caps->roce_caps.fl_rc_qp_when_roce_enabled);
+}
+
static int dr_prepare_qp_to_rts(struct mlx5dv_dr_domain *dmn)
{
struct dr_devx_qp_rts_attr rts_attr = {};
@@ -844,7 +850,7 @@ static int dr_prepare_qp_to_rts(struct mlx5dv_dr_domain *dmn)
rtr_attr.port_num = port;
/* Enable force-loopback on the QP */
- if (dmn->info.caps.roce_caps.fl_rc_qp_when_roce_enabled) {
+ if (dr_send_allow_fl(&dmn->info.caps)) {
rtr_attr.fl = true;
} else {
ret = dr_devx_query_gid(dmn->ctx, port, gid_index, &rtr_attr.dgid_attr);
diff --git a/providers/mlx5/mlx5_ifc.h b/providers/mlx5/mlx5_ifc.h
index 815207a435a8..58b7da23b3aa 100644
--- a/providers/mlx5/mlx5_ifc.h
+++ b/providers/mlx5/mlx5_ifc.h
@@ -49,6 +49,7 @@ enum {
MLX5_CMD_OP_RTS2RTS_QP = 0x505,
MLX5_CMD_OP_QUERY_QP = 0x50b,
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754,
MLX5_CMD_OP_QUERY_ROCE_ADDRESS = 0x760,
MLX5_CMD_OP_QUERY_LAG = 0x842,
MLX5_CMD_OP_CREATE_TIR = 0x900,
@@ -1963,6 +1964,34 @@ struct mlx5_ifc_query_esw_vport_context_in_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_nic_vport_context_bits {
+ u8 reserved_at_0[0x1f];
+ u8 roce_en[0x1];
+
+ u8 reserved_at_20[0x7e0];
+};
+
+struct mlx5_ifc_query_nic_vport_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+
+ struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
+};
+
+struct mlx5_ifc_query_nic_vport_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x40];
+};
+
enum {
MLX5_QPC_ST_RC = 0x0,
};
diff --git a/providers/mlx5/mlx5dv_dr.h b/providers/mlx5/mlx5dv_dr.h
index 22e99eef4f32..a130211c15f5 100644
--- a/providers/mlx5/mlx5dv_dr.h
+++ b/providers/mlx5/mlx5dv_dr.h
@@ -582,6 +582,7 @@ struct dr_devx_vport_cap {
};
struct dr_devx_roce_cap {
+ bool roce_en;
bool fl_rc_qp_when_roce_enabled;
};
@@ -1032,6 +1033,7 @@ struct dr_send_ring {
int dr_send_ring_alloc(struct mlx5dv_dr_domain *dmn);
void dr_send_ring_free(struct dr_send_ring *send_ring);
int dr_send_ring_force_drain(struct mlx5dv_dr_domain *dmn);
+bool dr_send_allow_fl(struct dr_devx_caps *caps);
int dr_send_postsend_ste(struct mlx5dv_dr_domain *dmn, struct dr_ste *ste,
uint8_t *data, uint16_t size, uint16_t offset);
int dr_send_postsend_htbl(struct mlx5dv_dr_domain *dmn, struct dr_ste_htbl *htbl,
--
2.25.4

View File

@ -0,0 +1,38 @@
From e99103602f477c84f31b79779f7544d6e1704593 Mon Sep 17 00:00:00 2001
From: Michael Guralnik <michaelgur@nvidia.com>
Date: Mon, 19 Oct 2020 09:00:56 +0300
Subject: [PATCH] mlx5: Fix wqe size parameter in wqe signature calculation
[ Upstream commit 8cde184624d192f7cd61247eec92a3a9b4ba7c7f ]
WQE signature calculation requires passing the size of the WQE as a
parameter.
Current code passes the qpn_ds field as the WQE size which is wrong.
Fixing to extract from the qpn_ds the WQE size in bytes and use it for
the signature calculations.
Fixes: 8c4791ae2395 ("libmlx5: First version of libmlx5")
Signed-off-by: Michael Guralnik <michaelgur@nvidia.com>
Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
Signed-off-by: Nicolas Morey-Chaisemartin <nmoreychaisemartin@suse.com>
---
providers/mlx5/qp.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/providers/mlx5/qp.c b/providers/mlx5/qp.c
index 077346d63137..13d7bcccc39d 100644
--- a/providers/mlx5/qp.c
+++ b/providers/mlx5/qp.c
@@ -368,7 +368,7 @@ static int set_data_inl_seg(struct mlx5_qp *qp, struct ibv_send_wr *wr,
static uint8_t wq_sig(struct mlx5_wqe_ctrl_seg *ctrl)
{
- return calc_sig(ctrl, be32toh(ctrl->qpn_ds));
+ return calc_sig(ctrl, (be32toh(ctrl->qpn_ds) & 0x3f) << 4);
}
#ifdef MLX5_DEBUG
--
2.25.4

View File

@ -0,0 +1,71 @@
From 8bb25f86ea1976bc8dcc009be37e7c779d131811 Mon Sep 17 00:00:00 2001
From: Patrisious Haddad <phaddad@nvidia.com>
Date: Mon, 26 Oct 2020 10:38:13 +0200
Subject: [PATCH] udaddy: Fix create_reply_ah error flow
[ Upstream commit 2213fe559b74d4281f9d42e425dfbd7e0f582a67 ]
Return error in case create_reply_ah() fails to create AH.
Fixes: a7eb7efbf69f ("r8077: Add support for UD QPs to the RDMA CM library, along with a goofy test program")
Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
Signed-off-by: Yishai Hadas <yishaih@nvidia.com>
Signed-off-by: Nicolas Morey-Chaisemartin <nmoreychaisemartin@suse.com>
---
librdmacm/examples/udaddy.c | 19 ++++++++++++++-----
1 file changed, 14 insertions(+), 5 deletions(-)
diff --git a/librdmacm/examples/udaddy.c b/librdmacm/examples/udaddy.c
index 9283caa4971f..9f911289da3c 100644
--- a/librdmacm/examples/udaddy.c
+++ b/librdmacm/examples/udaddy.c
@@ -449,23 +449,27 @@ static void destroy_nodes(void)
free(test.nodes);
}
-static void create_reply_ah(struct cmatest_node *node, struct ibv_wc *wc)
+static int create_reply_ah(struct cmatest_node *node, struct ibv_wc *wc)
{
struct ibv_qp_attr attr;
struct ibv_qp_init_attr init_attr;
node->ah = ibv_create_ah_from_wc(node->pd, wc, node->mem,
node->cma_id->port_num);
+ if (!node->ah)
+ return -1;
node->remote_qpn = be32toh(wc->imm_data);
- ibv_query_qp(node->cma_id->qp, &attr, IBV_QP_QKEY, &init_attr);
+ if (ibv_query_qp(node->cma_id->qp, &attr, IBV_QP_QKEY, &init_attr))
+ return -1;
node->remote_qkey = attr.qkey;
+ return 0;
}
static int poll_cqs(void)
{
struct ibv_wc wc[8];
- int done, i, ret;
+ int done, i, ret, rc;
for (i = 0; i < connections; i++) {
if (!test.nodes[i].connected)
@@ -478,8 +482,13 @@ static int poll_cqs(void)
return ret;
}
- if (ret && !test.nodes[i].ah)
- create_reply_ah(&test.nodes[i], wc);
+ if (ret && !test.nodes[i].ah) {
+ rc = create_reply_ah(&test.nodes[i], wc);
+ if (rc) {
+ printf("udaddy: failed to create reply AH\n");
+ return rc;
+ }
+ }
}
}
return 0;
--
2.25.4

View File

@ -1,6 +1,6 @@
Name: rdma-core
Version: 32.0
Release: 1%{?dist}
Release: 2%{?dist}
Summary: RDMA core userspace libraries and daemons
# Almost everything is licensed under the OFA dual GPLv2, 2 Clause BSD license
@ -19,6 +19,14 @@ Source3: rxe_cfg.8.gz
Patch3: udev-keep-NAME_KERNEL-as-default-interface-naming-co.patch
# stable-v32 patch
Patch101: 0001-ABI-Files.patch
# Bug fixes applied after upstream v32.0
Patch201: 0001-Fix-cmd_fd-leak-in-mlx5_alloc_context.patch
Patch202: 0001-mlx5-Fix-wqe-size-parameter-in-wqe-signature-calcula.patch
Patch203: 0001-mlx5-DR-Fix-incorrect-use-of-fl_roce_enabled-capabil.patch
Patch204: 0001-mlx5-DR-Create-NC-UAR-as-default-but-fall-back-to-WC.patch
Patch205: 0001-efa-Flush-write-combining-writes-before-writing-to-t.patch
Patch206: 0001-udaddy-Fix-create_reply_ah-error-flow.patch
Patch207: 0001-infiniband-diags-specify-the-HCA-name-and-Port-numbe.patch
# Do not build static libs by default.
%define with_static %{?_with_static: 1} %{?!_with_static: 0}
@ -261,6 +269,13 @@ easy, object-oriented access to IB verbs.
%setup -q
%patch3 -p1
%patch101 -p1
%patch201 -p1
%patch202 -p1
%patch203 -p1
%patch204 -p1
%patch205 -p1
%patch206 -p1
%patch207 -p1
%build
@ -646,6 +661,10 @@ fi
%endif
%changelog
* Tue Dec 08 2020 Honggang Li <honli@redhat.com> - 32.0-2
- Backport bug fixes applied after upstream v32.0
- Resolves: bz1902613, bz1875265
* Tue Nov 03 2020 Honggang Li <honli@redhat.com> - 32.0-1
- Update to upstream v32 release for features and fixes
- Support Amazon Elastic Fabric Adapter