debian: Drop patches applied upstream

Signed-off-by: Benjamin Drung <bdrung@ubuntu.com>
diff --git a/debian/patches/libibumad-Fix-iteration-index-for-switches.patch b/debian/patches/libibumad-Fix-iteration-index-for-switches.patch
deleted file mode 100644
index 8d04c26..0000000
--- a/debian/patches/libibumad-Fix-iteration-index-for-switches.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-From: Asaf Mazor <amazor@nvidia.com>
-Date: Mon, 17 Mar 2025 13:51:37 +0200
-Subject: libibumad: Fix iteration index for switches
-
-Updated start index from 1 to 0 in:
- - count_ports_by_guid
- - umad_get_smi_gsi_pairs
- - umad_find_active
-
-This fix should add support for switches
-(In HCA port 0 will be null and skipped)
-
-Fixes: be54b52e94be ("libibumad: Add new API to support SMI/GSI seperation")
-Signed-off-by: Asaf Mazor <amazor@nvidia.com>
-Origin: upstream, https://github.com/linux-rdma/rdma-core/pull/1583
----
- libibumad/umad.c | 10 +++++-----
- 1 file changed, 5 insertions(+), 5 deletions(-)
-
-diff --git a/libibumad/umad.c b/libibumad/umad.c
-index db3eb52..644d43d 100644
---- a/libibumad/umad.c
-+++ b/libibumad/umad.c
-@@ -1444,9 +1444,9 @@ static int count_ports_by_guid(char legacy_ca_names[][UMAD_CA_NAME_LEN], size_t
- 		if (umad_get_ca(legacy_ca_names[c_idx], &curr_ca) < 0)
- 			continue;
- 
--		size_t p_idx = 1;
-+		size_t p_idx = 0;
- 
--		for (p_idx = 1; p_idx < (size_t)curr_ca.numports + 1; ++p_idx) {
-+		for (p_idx = 0; p_idx < (size_t)curr_ca.numports + 1; ++p_idx) {
- 			umad_port_t *p_port = curr_ca.ports[p_idx];
- 			size_t count_idx = 0;
- 
-@@ -1496,9 +1496,9 @@ int umad_get_smi_gsi_pairs(struct umad_ca_pair cas[], size_t max)
- 		if (umad_get_ca(legacy_ca_names[c_idx], &curr_ca) < 0)
- 			continue;
- 
--		size_t p_idx = 1;
-+		size_t p_idx = 0;
- 
--		for (p_idx = 1; p_idx < (size_t)curr_ca.numports + 1; ++p_idx) {
-+		for (p_idx = 0; p_idx < (size_t)curr_ca.numports + 1; ++p_idx) {
- 			umad_port_t *p_port = curr_ca.ports[p_idx];
- 			uint8_t guid_count = 0;
- 
-@@ -1558,7 +1558,7 @@ static int umad_find_active(struct umad_ca_pair *ca_pair, const umad_ca_t *ca, b
- 	if (!ca_pair)
- 		return 1;
- 
--	for (i = 1; i < (size_t)ca->numports + 1; ++i) {
-+	for (i = 0; i < (size_t)ca->numports + 1; ++i) {
- 		if (!umad_check_active(ca, i)) {
- 			*portnum_to_set = ca->ports[i]->portnum;
- 			return 0;
diff --git a/debian/patches/providers-mana-Fix-modify-RC-QPs-for-RTS-and-INIT-states.patch b/debian/patches/providers-mana-Fix-modify-RC-QPs-for-RTS-and-INIT-states.patch
deleted file mode 100644
index ee9e841..0000000
--- a/debian/patches/providers-mana-Fix-modify-RC-QPs-for-RTS-and-INIT-states.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From: Konstantin Taranov <kotaranov@microsoft.com>
-Date: Wed, 31 Jul 2024 11:05:53 +0200
-Subject: providers/mana: Fix modify RC QPs for RTS and INIT states
-
-Reset RC QP state on RESET as user can submit
-receive request during INIT state.
-Use attr->sq_psn only when IBV_QP_SQ_PSN is specified.
-
-Fixes: 166c46bc7bbd ("providers/mana: Implement modify QP state")
-Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
-Origin: upstream, https://github.com/linux-rdma/rdma-core/pull/1556
----
- providers/mana/qp.c | 14 ++++++++------
- 1 file changed, 8 insertions(+), 6 deletions(-)
-
-diff --git a/providers/mana/qp.c b/providers/mana/qp.c
-index a318620..4591cee 100644
---- a/providers/mana/qp.c
-+++ b/providers/mana/qp.c
-@@ -347,22 +347,24 @@ static void mana_ib_modify_rc_qp(struct mana_qp *qp, struct ibv_qp_attr *attr, i
- 
- 	switch (attr->qp_state) {
- 	case IBV_QPS_RESET:
--	case IBV_QPS_INIT:
- 		for (i = 0; i < USER_RC_QUEUE_TYPE_MAX; ++i) {
- 			qp->rc_qp.queues[i].prod_idx = 0;
- 			qp->rc_qp.queues[i].cons_idx = 0;
- 		}
- 		mana_ib_reset_rb_shmem(qp);
- 		reset_shadow_queue(&qp->shadow_rq);
-+		reset_shadow_queue(&qp->shadow_sq);
-+	case IBV_QPS_INIT:
- 		break;
- 	case IBV_QPS_RTR:
- 		break;
- 	case IBV_QPS_RTS:
--		reset_shadow_queue(&qp->shadow_sq);
--		qp->rc_qp.sq_ssn = 1;
--		qp->rc_qp.sq_psn = attr->sq_psn;
--		qp->rc_qp.sq_highest_completed_psn = PSN_DEC(attr->sq_psn);
--		gdma_arm_normal_cqe(&qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER], attr->sq_psn);
-+		if (attr_mask & IBV_QP_SQ_PSN) {
-+			qp->rc_qp.sq_ssn = 1;
-+			qp->rc_qp.sq_psn = attr->sq_psn;
-+			qp->rc_qp.sq_highest_completed_psn = PSN_DEC(attr->sq_psn);
-+			gdma_arm_normal_cqe(&qp->rc_qp.queues[USER_RC_RECV_QUEUE_REQUESTER], attr->sq_psn);
-+		}
- 		break;
- 	default:
- 		break;
diff --git a/debian/patches/providers-mana-Fix-return-value-on-unsupported-QP-type.patch b/debian/patches/providers-mana-Fix-return-value-on-unsupported-QP-type.patch
deleted file mode 100644
index 4a26273..0000000
--- a/debian/patches/providers-mana-Fix-return-value-on-unsupported-QP-type.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From: Shiraz Saleem <shirazsaleem@microsoft.com>
-Date: Wed, 14 Aug 2024 14:40:39 -0700
-Subject: providers/mana: Fix return value on unsupported QP type
-
-Return EOPNOTSUPP when QP type is not supported during create QP.
-
-Fixes: 443f196deee0 ("mana: Microsoft Azure Network Adapter (MANA) RDMA provider")
-Signed-off-by: Shiraz Saleem <shirazsaleem@microsoft.com>
-Origin: upstream, https://github.com/linux-rdma/rdma-core/pull/1556
----
- providers/mana/qp.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/providers/mana/qp.c b/providers/mana/qp.c
-index 596ab75..a6b28d5 100644
---- a/providers/mana/qp.c
-+++ b/providers/mana/qp.c
-@@ -327,7 +327,7 @@ struct ibv_qp *mana_create_qp(struct ibv_pd *ibpd,
- 	default:
- 		verbs_err(verbs_get_ctx(ibpd->context),
- 			  "QP type %u is not supported\n", attr->qp_type);
--		errno = EINVAL;
-+		errno = EOPNOTSUPP;
- 	}
- 
- 	return NULL;
-@@ -533,7 +533,7 @@ struct ibv_qp *mana_create_qp_ex(struct ibv_context *context,
- 	default:
- 		verbs_err(verbs_get_ctx(context),
- 			  "QP type %u is not supported\n", attr->qp_type);
--		errno = EINVAL;
-+		errno = EOPNOTSUPP;
- 	}
- 
- 	return NULL;
diff --git a/debian/patches/providers-mana-Fix-return-values-on-unsupported-parent-do.patch b/debian/patches/providers-mana-Fix-return-values-on-unsupported-parent-do.patch
deleted file mode 100644
index 7b6753e..0000000
--- a/debian/patches/providers-mana-Fix-return-values-on-unsupported-parent-do.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From: Shiraz Saleem <shirazsaleem@microsoft.com>
-Date: Tue, 3 Sep 2024 22:05:59 -0500
-Subject: providers/mana: Fix return values on unsupported parent domain flags
-
-Return EOPNOTSUPP when unsupported flags are used to create parent
-domain
-
-Fixes: 443f196deee0 ("mana: Microsoft Azure Network Adapter (MANA) RDMA provider")
-Signed-off-by: Shiraz Saleem <shirazsaleem@microsoft.com>
-Origin: upstream, https://github.com/linux-rdma/rdma-core/pull/1556
----
- providers/mana/mana.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/providers/mana/mana.c b/providers/mana/mana.c
-index efd5379..22e5ff9 100644
---- a/providers/mana/mana.c
-+++ b/providers/mana/mana.c
-@@ -113,7 +113,7 @@ mana_alloc_parent_domain(struct ibv_context *context,
- 		verbs_err(
- 			verbs_get_ctx(context),
- 			"This driver supports IBV_PARENT_DOMAIN_INIT_ATTR_PD_CONTEXT only\n");
--		errno = EINVAL;
-+		errno = EOPNOTSUPP;
- 		return NULL;
- 	}
- 
diff --git a/debian/patches/providers-mana-fix-WRs-with-zero-sges.patch b/debian/patches/providers-mana-fix-WRs-with-zero-sges.patch
deleted file mode 100644
index e01bd19..0000000
--- a/debian/patches/providers-mana-fix-WRs-with-zero-sges.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From: Konstantin Taranov <kotaranov@microsoft.com>
-Date: Thu, 5 Sep 2024 14:05:11 +0200
-Subject: providers/mana: fix WRs with zero sges
-
-The HW requires at least one SGE posted in work queues.
-Add dummy SGE when zero SGEs are requested in receive WRs.
-
-Fixes: ec26fedd55e0 ("providers/mana: Post recv and post send")
-Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
-Origin: upstream, https://github.com/linux-rdma/rdma-core/pull/1556
----
- providers/mana/qp.c | 11 ++++++++---
- providers/mana/wr.c | 14 ++++++++++----
- 2 files changed, 18 insertions(+), 7 deletions(-)
-
-diff --git a/providers/mana/qp.c b/providers/mana/qp.c
-index a6b28d5..a318620 100644
---- a/providers/mana/qp.c
-+++ b/providers/mana/qp.c
-@@ -180,12 +180,15 @@ struct mana_qp *mana_get_qp_from_rq(struct mana_context *ctx, uint32_t qid)
- static uint32_t get_queue_size(struct ibv_qp_init_attr *attr, enum user_queue_types type)
- {
- 	uint32_t size = 0;
-+	uint32_t sges = 0;
- 
- 	if (attr->qp_type == IBV_QPT_RC) {
- 		switch (type) {
- 		case USER_RC_SEND_QUEUE_REQUESTER:
--			/* For write with imm we need +1 */
--			size = attr->cap.max_send_wr * get_large_wqe_size(attr->cap.max_send_sge + 1);
-+			/* WQE must have at least one SGE */
-+			/* For write with imm we need one extra SGE */
-+			sges = max(1U, attr->cap.max_send_sge) + 1;
-+			size = attr->cap.max_send_wr * get_large_wqe_size(sges);
- 			break;
- 		case USER_RC_SEND_QUEUE_RESPONDER:
- 			size = MANA_PAGE_SIZE;
-@@ -194,7 +197,9 @@ static uint32_t get_queue_size(struct ibv_qp_init_attr *attr, enum user_queue_ty
- 			size = MANA_PAGE_SIZE;
- 			break;
- 		case USER_RC_RECV_QUEUE_RESPONDER:
--			size = attr->cap.max_recv_wr * get_wqe_size(attr->cap.max_recv_sge);
-+			/* WQE must have at least one SGE */
-+			sges = max(1U, attr->cap.max_recv_sge);
-+			size = attr->cap.max_recv_wr * get_wqe_size(sges);
- 			break;
- 		default:
- 			return 0;
-diff --git a/providers/mana/wr.c b/providers/mana/wr.c
-index 5975971..755e6a8 100644
---- a/providers/mana/wr.c
-+++ b/providers/mana/wr.c
-@@ -106,12 +106,18 @@ static inline void gdma_write_sge(struct gdma_wqe *wqe, void *oob_sge,
- }
- 
- static inline int
--gdma_post_rq_wqe(struct mana_gdma_queue *wq, struct ibv_sge *sgl,  void *oob,
-+gdma_post_rq_wqe(struct mana_gdma_queue *wq, struct ibv_sge *sgl,  struct rdma_recv_oob *recv_oob,
- 		 uint32_t num_sge, enum gdma_work_req_flags flags, struct gdma_wqe *wqe)
- {
--	uint32_t wqe_size = get_wqe_size(num_sge);
-+	struct ibv_sge dummy = {1, 0, 0};
-+	uint32_t wqe_size;
- 	int ret;
- 
-+	if (num_sge == 0) {
-+		num_sge = 1;
-+		sgl = &dummy;
-+	}
-+	wqe_size = get_wqe_size(num_sge);
- 	ret = gdma_get_current_wqe(wq, INLINE_OOB_SMALL_SIZE, wqe_size, wqe);
- 	if (ret)
- 		return ret;
-@@ -119,8 +125,8 @@ gdma_post_rq_wqe(struct mana_gdma_queue *wq, struct ibv_sge *sgl,  void *oob,
- 	wqe->gdma_oob->rx.num_sgl_entries = num_sge;
- 	wqe->gdma_oob->rx.inline_client_oob_size = INLINE_OOB_SMALL_SIZE / sizeof(uint32_t);
- 	wqe->gdma_oob->rx.check_sn = (flags & GDMA_WORK_REQ_CHECK_SN) != 0;
--	if (oob)
--		memcpy(wqe->client_oob, oob, INLINE_OOB_SMALL_SIZE);
-+	if (recv_oob)
-+		memcpy(wqe->client_oob, recv_oob, INLINE_OOB_SMALL_SIZE);
- 
- 	gdma_write_sge(wqe, NULL, sgl, num_sge);
- 	gdma_advance_producer(wq, wqe->size_in_bu);
diff --git a/debian/patches/series b/debian/patches/series
index 8536538..adeb696 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,9 +1,4 @@
-providers-mana-Fix-return-value-on-unsupported-QP-type.patch
-providers-mana-fix-WRs-with-zero-sges.patch
-providers-mana-Fix-return-values-on-unsupported-parent-do.patch
-providers-mana-Fix-modify-RC-QPs-for-RTS-and-INIT-states.patch
 providers-mana-Implement-signal-all-for-RC-QPs.patch
 providers-mana-Process-Error-cqes.patch
 providers-mana-Add-dma-buf-support.patch
-libibumad-Fix-iteration-index-for-switches.patch
 providers-mana-improve-synchronization-on-the-shadow-queu.patch