| From: Konstantin Taranov <kotaranov@microsoft.com> |
| Date: Thu, 5 Sep 2024 14:05:11 +0200 |
| Subject: providers/mana: fix WRs with zero sges |
| |
| The HW requires at least one SGE posted in work queues. |
| Add dummy SGE when zero SGEs are requested in receive WRs. |
| |
| Fixes: ec26fedd55e0 ("providers/mana: Post recv and post send") |
| Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com> |
| Origin: upstream, https://github.com/linux-rdma/rdma-core/pull/1556 |
| --- |
| providers/mana/qp.c | 11 ++++++++--- |
| providers/mana/wr.c | 14 ++++++++++---- |
| 2 files changed, 18 insertions(+), 7 deletions(-) |
| |
| diff --git a/providers/mana/qp.c b/providers/mana/qp.c |
| index a6b28d5..a318620 100644 |
| --- a/providers/mana/qp.c |
| +++ b/providers/mana/qp.c |
| @@ -180,12 +180,15 @@ struct mana_qp *mana_get_qp_from_rq(struct mana_context *ctx, uint32_t qid) |
| static uint32_t get_queue_size(struct ibv_qp_init_attr *attr, enum user_queue_types type) |
| { |
| uint32_t size = 0; |
| + uint32_t sges = 0; |
| |
| if (attr->qp_type == IBV_QPT_RC) { |
| switch (type) { |
| case USER_RC_SEND_QUEUE_REQUESTER: |
| - /* For write with imm we need +1 */ |
| - size = attr->cap.max_send_wr * get_large_wqe_size(attr->cap.max_send_sge + 1); |
| + /* WQE must have at least one SGE */ |
| + /* For write with imm we need one extra SGE */ |
| + sges = max(1U, attr->cap.max_send_sge) + 1; |
| + size = attr->cap.max_send_wr * get_large_wqe_size(sges); |
| break; |
| case USER_RC_SEND_QUEUE_RESPONDER: |
| size = MANA_PAGE_SIZE; |
| @@ -194,7 +197,9 @@ static uint32_t get_queue_size(struct ibv_qp_init_attr *attr, enum user_queue_ty |
| size = MANA_PAGE_SIZE; |
| break; |
| case USER_RC_RECV_QUEUE_RESPONDER: |
| - size = attr->cap.max_recv_wr * get_wqe_size(attr->cap.max_recv_sge); |
| + /* WQE must have at least one SGE */ |
| + sges = max(1U, attr->cap.max_recv_sge); |
| + size = attr->cap.max_recv_wr * get_wqe_size(sges); |
| break; |
| default: |
| return 0; |
| diff --git a/providers/mana/wr.c b/providers/mana/wr.c |
| index 5975971..755e6a8 100644 |
| --- a/providers/mana/wr.c |
| +++ b/providers/mana/wr.c |
| @@ -106,12 +106,18 @@ static inline void gdma_write_sge(struct gdma_wqe *wqe, void *oob_sge, |
| } |
| |
| static inline int |
| -gdma_post_rq_wqe(struct mana_gdma_queue *wq, struct ibv_sge *sgl, void *oob, |
| +gdma_post_rq_wqe(struct mana_gdma_queue *wq, struct ibv_sge *sgl, struct rdma_recv_oob *recv_oob, |
| uint32_t num_sge, enum gdma_work_req_flags flags, struct gdma_wqe *wqe) |
| { |
| - uint32_t wqe_size = get_wqe_size(num_sge); |
| + struct ibv_sge dummy = {1, 0, 0}; |
| + uint32_t wqe_size; |
| int ret; |
| |
| + if (num_sge == 0) { |
| + num_sge = 1; |
| + sgl = &dummy; |
| + } |
| + wqe_size = get_wqe_size(num_sge); |
| ret = gdma_get_current_wqe(wq, INLINE_OOB_SMALL_SIZE, wqe_size, wqe); |
| if (ret) |
| return ret; |
| @@ -119,8 +125,8 @@ gdma_post_rq_wqe(struct mana_gdma_queue *wq, struct ibv_sge *sgl, void *oob, |
| wqe->gdma_oob->rx.num_sgl_entries = num_sge; |
| wqe->gdma_oob->rx.inline_client_oob_size = INLINE_OOB_SMALL_SIZE / sizeof(uint32_t); |
| wqe->gdma_oob->rx.check_sn = (flags & GDMA_WORK_REQ_CHECK_SN) != 0; |
| - if (oob) |
| - memcpy(wqe->client_oob, oob, INLINE_OOB_SMALL_SIZE); |
| + if (recv_oob) |
| + memcpy(wqe->client_oob, recv_oob, INLINE_OOB_SMALL_SIZE); |
| |
| gdma_write_sge(wqe, NULL, sgl, num_sge); |
| gdma_advance_producer(wq, wqe->size_in_bu); |