Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F106962574
D43567.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
143 KB
Referenced Files
None
Subscribers
None
D43567.diff
View Options
diff --git a/contrib/ofed/libirdma/abi.h b/contrib/ofed/libirdma/abi.h
--- a/contrib/ofed/libirdma/abi.h
+++ b/contrib/ofed/libirdma/abi.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (C) 2019 - 2022 Intel Corporation
+ * Copyright (C) 2019 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef PROVIDER_IRDMA_ABI_H
#define PROVIDER_IRDMA_ABI_H
@@ -86,6 +85,7 @@
struct ibv_create_qp ibv_cmd;
__aligned_u64 user_wqe_bufs;
__aligned_u64 user_compl_ctx;
+ __aligned_u64 comp_mask;
};
struct irdma_ucreate_qp_resp {
@@ -98,6 +98,9 @@
__u8 lsmm;
__u8 rsvd;
__u32 qp_caps;
+ __aligned_u64 comp_mask;
+ __u8 start_wqe_idx;
+ __u8 rsvd2[7];
};
struct irdma_umodify_qp_resp {
@@ -138,6 +141,8 @@
__u8 hw_rev;
__u8 rsvd2;
__aligned_u64 comp_mask;
+ __u16 min_hw_wq_size;
+ __u8 rsvd3[6];
};
struct irdma_ureg_mr {
diff --git a/contrib/ofed/libirdma/i40e_devids.h b/contrib/ofed/libirdma/i40e_devids.h
--- a/contrib/ofed/libirdma/i40e_devids.h
+++ b/contrib/ofed/libirdma/i40e_devids.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef I40E_DEVIDS_H
#define I40E_DEVIDS_H
diff --git a/contrib/ofed/libirdma/i40iw_hw.h b/contrib/ofed/libirdma/i40iw_hw.h
--- a/contrib/ofed/libirdma/i40iw_hw.h
+++ b/contrib/ofed/libirdma/i40iw_hw.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef I40IW_HW_H
#define I40IW_HW_H
diff --git a/contrib/ofed/libirdma/ice_devids.h b/contrib/ofed/libirdma/ice_devids.h
--- a/contrib/ofed/libirdma/ice_devids.h
+++ b/contrib/ofed/libirdma/ice_devids.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef ICE_DEVIDS_H
#define ICE_DEVIDS_H
diff --git a/contrib/ofed/libirdma/irdma-abi.h b/contrib/ofed/libirdma/irdma-abi.h
--- a/contrib/ofed/libirdma/irdma-abi.h
+++ b/contrib/ofed/libirdma/irdma-abi.h
@@ -35,7 +35,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_ABI_H
#define IRDMA_ABI_H
@@ -55,6 +54,11 @@
enum {
IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
+ IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
+};
+
+enum {
+ IRDMA_CREATE_QP_USE_START_WQE_IDX = 1 << 0,
};
struct irdma_alloc_ucontext_req {
@@ -83,6 +87,8 @@
__u8 hw_rev;
__u8 rsvd2;
__aligned_u64 comp_mask;
+ __u16 min_hw_wq_size;
+ __u8 rsvd3[6];
};
struct irdma_alloc_pd_resp {
@@ -102,6 +108,7 @@
struct irdma_create_qp_req {
__aligned_u64 user_wqe_bufs;
__aligned_u64 user_compl_ctx;
+ __aligned_u64 comp_mask;
};
struct irdma_mem_reg_req {
@@ -131,6 +138,9 @@
__u8 lsmm;
__u8 rsvd;
__u32 qp_caps;
+ __aligned_u64 comp_mask;
+ __u8 start_wqe_idx;
+ __u8 rsvd2[7];
};
struct irdma_modify_qp_resp {
diff --git a/contrib/ofed/libirdma/irdma.h b/contrib/ofed/libirdma/irdma.h
--- a/contrib/ofed/libirdma/irdma.h
+++ b/contrib/ofed/libirdma/irdma.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_H
#define IRDMA_H
@@ -47,7 +46,6 @@
IRDMA_GEN_RSVD = 0,
IRDMA_GEN_1 = 1,
IRDMA_GEN_2 = 2,
- IRDMA_GEN_MAX = 2,
};
struct irdma_uk_attrs {
diff --git a/contrib/ofed/libirdma/irdma_defs.h b/contrib/ofed/libirdma/irdma_defs.h
--- a/contrib/ofed/libirdma/irdma_defs.h
+++ b/contrib/ofed/libirdma/irdma_defs.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_DEFS_H
#define IRDMA_DEFS_H
@@ -457,19 +456,6 @@
IRDMA_WQE_SIZE_256 = 256,
};
-enum irdma_ws_op_type {
- IRDMA_WS_OP_TYPE_NODE = 0,
- IRDMA_WS_OP_TYPE_LEAF_NODE_GROUP,
-};
-
-enum irdma_ws_rate_limit_flags {
- IRDMA_WS_RATE_LIMIT_FLAGS_VALID = 0x1,
- IRDMA_WS_NO_RDMA_RATE_LIMIT = 0x2,
- IRDMA_WS_LEAF_NODE_IS_PART_GROUP = 0x4,
- IRDMA_WS_TREE_RATE_LIMITING = 0x8,
- IRDMA_WS_PACING_CONTROL = 0x10,
-};
-
/**
* set_64bit_val - set 64 bit value to hw wqe
* @wqe_words: wqe addr to write
diff --git a/contrib/ofed/libirdma/irdma_uk.c b/contrib/ofed/libirdma/irdma_uk.c
--- a/contrib/ofed/libirdma/irdma_uk.c
+++ b/contrib/ofed/libirdma/irdma_uk.c
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#include "osdep.h"
#include "irdma_defs.h"
@@ -46,16 +45,16 @@
* @valid: The wqe valid
*/
static void
-irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
+irdma_set_fragment(__le64 * wqe, u32 offset, struct ibv_sge *sge,
u8 valid)
{
if (sge) {
set_64bit_val(wqe, offset,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
FIELD_PREP(IRDMAQPSQ_VALID, valid) |
- FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
- FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
@@ -72,14 +71,14 @@
*/
static void
irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
- struct irdma_sge *sge, u8 valid)
+ struct ibv_sge *sge, u8 valid)
{
if (sge) {
set_64bit_val(wqe, offset,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
@@ -210,8 +209,7 @@
if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
!qp->push_mode) {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
+ irdma_uk_qp_post_wr(qp);
} else {
push = (__le64 *) ((uintptr_t)qp->push_wqe +
(wqe_idx & 0x7) * 0x20);
@@ -339,7 +337,7 @@
return EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
read_fence |= info->read_fence;
@@ -358,7 +356,7 @@
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
set_64bit_val(wqe, IRDMA_BYTE_16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
if (info->imm_data_valid) {
set_64bit_val(wqe, IRDMA_BYTE_0,
@@ -387,7 +385,7 @@
++addl_frag_cnt;
}
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
@@ -438,7 +436,7 @@
return EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
if (ret_code)
@@ -476,8 +474,8 @@
++addl_frag_cnt;
}
set_64bit_val(wqe, IRDMA_BYTE_16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
FIELD_PREP(IRDMAQPSQ_OPCODE,
@@ -526,7 +524,7 @@
return EINVAL;
for (i = 0; i < op_info->num_sges; i++)
- total_size += op_info->sg_list[i].len;
+ total_size += op_info->sg_list[i].length;
if (info->imm_data_valid)
frag_cnt = op_info->num_sges + 1;
@@ -621,15 +619,15 @@
* @polarity: compatibility parameter
*/
static void
-irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
+irdma_copy_inline_data_gen_1(u8 *wqe, struct ibv_sge *sge_list,
u32 num_sges, u8 polarity)
{
u32 quanta_bytes_remaining = 16;
u32 i;
for (i = 0; i < num_sges; i++) {
- u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
- u32 sge_len = sge_list[i].len;
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
while (sge_len) {
u32 bytes_copied;
@@ -684,7 +682,7 @@
* @polarity: polarity of wqe valid bit
*/
static void
-irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list,
+irdma_copy_inline_data(u8 *wqe, struct ibv_sge *sge_list,
u32 num_sges, u8 polarity)
{
u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
@@ -695,8 +693,8 @@
wqe += 8;
for (i = 0; i < num_sges; i++) {
- u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
- u32 sge_len = sge_list[i].len;
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
while (sge_len) {
u32 bytes_copied;
@@ -776,7 +774,7 @@
return EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
if (unlikely(total_size > qp->max_inline_data))
return EINVAL;
@@ -789,9 +787,9 @@
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
read_fence |= info->read_fence;
set_64bit_val(wqe, IRDMA_BYTE_16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
@@ -847,7 +845,7 @@
return EINVAL;
for (i = 0; i < op_info->num_sges; i++)
- total_size += op_info->sg_list[i].len;
+ total_size += op_info->sg_list[i].length;
if (unlikely(total_size > qp->max_inline_data))
return EINVAL;
@@ -912,7 +910,7 @@
u64 hdr;
u32 wqe_idx;
bool local_fence = false;
- struct irdma_sge sge = {0};
+ struct ibv_sge sge = {0};
u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
info->push_wqe = qp->push_db ? true : false;
@@ -923,7 +921,7 @@
if (!wqe)
return ENOSPC;
- sge.stag = op_info->target_stag;
+ sge.lkey = op_info->target_stag;
qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
@@ -1437,8 +1435,7 @@
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
- memset(info, 0,
- sizeof(struct irdma_cq_poll_info));
+ memset(info, 0, sizeof(*info));
return irdma_uk_cq_poll_cmpl(cq, info);
}
}
@@ -1511,7 +1508,6 @@
if (pring && IRDMA_RING_MORE_WORK(*pring))
move_cq_head = false;
}
-
if (move_cq_head) {
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
@@ -1592,10 +1588,12 @@
int
irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
- if (*sqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
- *sqdepth = uk_attrs->min_hw_wq_size << shift;
+ if (*sqdepth < min_size)
+ *sqdepth = min_size;
else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
return EINVAL;
@@ -1609,10 +1607,12 @@
int
irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
- if (*rqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
- *rqdepth = uk_attrs->min_hw_wq_size << shift;
+ if (*rqdepth < min_size)
+ *rqdepth = min_size;
else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
return EINVAL;
@@ -1645,41 +1645,16 @@
{
u16 move_cnt = 1;
- if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
+ if (info->start_wqe_idx)
+ move_cnt = info->start_wqe_idx;
+ else if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
move_cnt = 3;
-
qp->conn_wqes = move_cnt;
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt);
}
-/**
- * irdma_uk_calc_shift_wq - calculate WQE shift for both SQ and RQ
- * @ukinfo: qp initialization info
- * @sq_shift: Returns shift of SQ
- * @rq_shift: Returns shift of RQ
- */
-void
-irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
- u8 *rq_shift)
-{
- bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
-
- irdma_get_wqe_shift(ukinfo->uk_attrs,
- imm_support ? ukinfo->max_sq_frag_cnt + 1 :
- ukinfo->max_sq_frag_cnt,
- ukinfo->max_inline_data, sq_shift);
-
- irdma_get_wqe_shift(ukinfo->uk_attrs, ukinfo->max_rq_frag_cnt, 0,
- rq_shift);
-
- if (ukinfo->uk_attrs->hw_rev == IRDMA_GEN_1) {
- if (ukinfo->abi_ver > 4)
- *rq_shift = IRDMA_MAX_RQ_WQE_SHIFT_GEN1;
- }
-}
-
/**
* irdma_uk_calc_depth_shift_sq - calculate depth and shift for SQ size.
* @ukinfo: qp initialization info
@@ -1692,6 +1667,7 @@
{
bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
int status;
+
irdma_get_wqe_shift(ukinfo->uk_attrs,
imm_support ? ukinfo->max_sq_frag_cnt + 1 :
ukinfo->max_sq_frag_cnt,
@@ -1786,6 +1762,8 @@
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
qp->wqe_ops = iw_wqe_uk_ops;
+ qp->start_wqe_idx = info->start_wqe_idx;
+
return ret_code;
}
diff --git a/contrib/ofed/libirdma/irdma_umain.h b/contrib/ofed/libirdma/irdma_umain.h
--- a/contrib/ofed/libirdma/irdma_umain.h
+++ b/contrib/ofed/libirdma/irdma_umain.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_UMAIN_H
#define IRDMA_UMAIN_H
@@ -97,7 +96,6 @@
LIST_ENTRY(irdma_cq_buf) list;
struct irdma_cq_uk cq;
struct verbs_mr vmr;
- size_t buf_size;
};
extern pthread_mutex_t sigusr1_wait_mutex;
@@ -143,7 +141,6 @@
struct ibv_recv_wr *pend_rx_wr;
struct irdma_qp_uk qp;
enum ibv_qp_type qp_type;
- struct irdma_sge *recv_sges;
};
/* irdma_uverbs.c */
diff --git a/contrib/ofed/libirdma/irdma_umain.c b/contrib/ofed/libirdma/irdma_umain.c
--- a/contrib/ofed/libirdma/irdma_umain.c
+++ b/contrib/ofed/libirdma/irdma_umain.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2021 - 2023 Intel Corporation
+ * Copyright (c) 2021 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#include <sys/mman.h>
@@ -49,7 +48,7 @@
/**
* Driver version
*/
-char libirdma_version[] = "1.2.17-k";
+char libirdma_version[] = "1.2.36-k";
unsigned int irdma_dbg;
diff --git a/contrib/ofed/libirdma/irdma_uquery.h b/contrib/ofed/libirdma/irdma_uquery.h
--- a/contrib/ofed/libirdma/irdma_uquery.h
+++ b/contrib/ofed/libirdma/irdma_uquery.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_UQUERY_H
diff --git a/contrib/ofed/libirdma/irdma_user.h b/contrib/ofed/libirdma/irdma_user.h
--- a/contrib/ofed/libirdma/irdma_user.h
+++ b/contrib/ofed/libirdma/irdma_user.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef IRDMA_USER_H
#define IRDMA_USER_H
@@ -51,7 +50,7 @@
#define irdma_access_privileges u32
#define irdma_physical_fragment u64
#define irdma_address_list u64 *
-#define irdma_sgl struct irdma_sge *
+#define irdma_sgl struct ibv_sge *
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
@@ -81,96 +80,6 @@
#define IRDMA_OP_TYPE_REC_IMM 0x3f
#define IRDMA_FLUSH_MAJOR_ERR 1
-#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
-
-/* Async Events codes */
-#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
-#define IRDMA_AE_AMP_INVALID_STAG 0x0103
-#define IRDMA_AE_AMP_BAD_QP 0x0104
-#define IRDMA_AE_AMP_BAD_PD 0x0105
-#define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106
-#define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107
-#define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108
-#define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109
-#define IRDMA_AE_AMP_TO_WRAP 0x010a
-#define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c
-#define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d
-#define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e
-#define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110
-#define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111
-#define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112
-#define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113
-#define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114
-#define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115
-#define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116
-#define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117
-#define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118
-#define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119
-#define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a
-#define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b
-#define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c
-#define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d
-#define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e
-#define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f
-#define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120
-#define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121
-#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132
-#define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133
-#define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134
-#define IRDMA_AE_UDA_L4LEN_INVALID 0x0135
-#define IRDMA_AE_BAD_CLOSE 0x0201
-#define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202
-#define IRDMA_AE_CQ_OPERATION_ERROR 0x0203
-#define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205
-#define IRDMA_AE_STAG_ZERO_INVALID 0x0206
-#define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207
-#define IRDMA_AE_IB_INVALID_REQUEST 0x0208
-#define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a
-#define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b
-#define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c
-#define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d
-#define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e
-#define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220
-#define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301
-#define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303
-#define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304
-#define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305
-#define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306
-#define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307
-#define IRDMA_AE_DDP_NO_L_BIT 0x0308
-#define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311
-#define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312
-#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
-#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
-#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
-#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
-#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
-#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
-#define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383
-#define IRDMA_AE_INVALID_ARP_ENTRY 0x0401
-#define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402
-#define IRDMA_AE_STALE_ARP_ENTRY 0x0403
-#define IRDMA_AE_INVALID_AH_ENTRY 0x0406
-#define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501
-#define IRDMA_AE_LLP_CONNECTION_RESET 0x0502
-#define IRDMA_AE_LLP_FIN_RECEIVED 0x0503
-#define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504
-#define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505
-#define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507
-#define IRDMA_AE_LLP_SYN_RECEIVED 0x0508
-#define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509
-#define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a
-#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
-#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
-#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
-#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
-#define IRDMA_AE_RESET_SENT 0x0601
-#define IRDMA_AE_TERMINATE_SENT 0x0602
-#define IRDMA_AE_RESET_NOT_SENT 0x0603
-#define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700
-#define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701
-#define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702
-#define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900
enum irdma_device_caps_const {
IRDMA_WQE_SIZE = 4,
@@ -202,8 +111,7 @@
IRDMA_MAX_OUTBOUND_MSG_SIZE = 65537,
/* 64K +1 */
IRDMA_MAX_INBOUND_MSG_SIZE = 65537,
- IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
- IRDMA_MAX_PE_ENA_VF_COUNT = 32,
+ IRDMA_MAX_PE_ENA_VF_COUNT = 32,
IRDMA_MAX_VF_FPM_ID = 47,
IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
IRDMA_MAX_INLINE_DATA_SIZE = 101,
@@ -230,12 +138,7 @@
FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
-};
-
-enum irdma_qp_event_type {
- IRDMA_QP_EVENT_CATASTROPHIC,
- IRDMA_QP_EVENT_ACCESS_ERR,
- IRDMA_QP_EVENT_REQ_ERR,
+ FLUSH_RNR_RETRY_EXC_ERR,
};
enum irdma_cmpl_status {
@@ -283,12 +186,6 @@
struct irdma_qp_uk_init_info;
struct irdma_cq_uk_init_info;
-struct irdma_sge {
- irdma_tagged_offset tag_off;
- u32 len;
- irdma_stag stag;
-};
-
struct irdma_ring {
volatile u32 head;
volatile u32 tail; /* effective tail */
@@ -320,13 +217,13 @@
struct irdma_rdma_write {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
- struct irdma_sge rem_addr;
+ struct ibv_sge rem_addr;
};
struct irdma_rdma_read {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
- struct irdma_sge rem_addr;
+ struct ibv_sge rem_addr;
};
struct irdma_bind_window {
@@ -400,11 +297,6 @@
} stat;
};
-struct qp_err_code {
- enum irdma_flush_opcode flush_code;
- enum irdma_qp_event_type event_type;
-};
-
int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info, bool post_sq);
int irdma_uk_inline_send(struct irdma_qp_uk *qp,
@@ -427,9 +319,9 @@
bool post_sq);
struct irdma_wqe_uk_ops {
- void (*iw_copy_inline_data)(u8 *dest, struct irdma_sge *sge_list, u32 num_sges, u8 polarity);
+ void (*iw_copy_inline_data)(u8 *dest, struct ibv_sge *sge_list, u32 num_sges, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
- void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
+ void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ibv_sge *sge,
u8 valid);
void (*iw_set_mw_bind_wqe)(__le64 *wqe,
struct irdma_bind_window *op_info);
@@ -445,8 +337,6 @@
struct irdma_cq_uk_init_info *info);
int irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
-void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift,
- u8 *rq_shift);
int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo,
u32 *sq_depth, u8 *sq_shift);
int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo,
@@ -495,6 +385,7 @@
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
+ u8 start_wqe_idx;
bool deferred_flag:1;
bool push_mode:1; /* whether the last post wqe was pushed */
bool push_dropped:1;
@@ -542,6 +433,7 @@
u32 sq_depth;
u32 rq_depth;
u8 first_sq_wq;
+ u8 start_wqe_idx;
u8 type;
u8 sq_shift;
u8 rq_shift;
@@ -575,75 +467,4 @@
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
u32 wqe_idx, bool post_sq);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
-
-static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id)
-{
- struct qp_err_code qp_err = { 0 };
-
- switch (ae_id) {
- case IRDMA_AE_AMP_BOUNDS_VIOLATION:
- case IRDMA_AE_AMP_INVALID_STAG:
- case IRDMA_AE_AMP_RIGHTS_VIOLATION:
- case IRDMA_AE_AMP_UNALLOCATED_STAG:
- case IRDMA_AE_AMP_BAD_PD:
- case IRDMA_AE_AMP_BAD_QP:
- case IRDMA_AE_AMP_BAD_STAG_KEY:
- case IRDMA_AE_AMP_BAD_STAG_INDEX:
- case IRDMA_AE_AMP_TO_WRAP:
- case IRDMA_AE_PRIV_OPERATION_DENIED:
- qp_err.flush_code = FLUSH_PROT_ERR;
- qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_UDA_XMIT_BAD_PD:
- case IRDMA_AE_WQE_UNEXPECTED_OPCODE:
- qp_err.flush_code = FLUSH_LOC_QP_OP_ERR;
- qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT:
- case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
- case IRDMA_AE_UDA_L4LEN_INVALID:
- case IRDMA_AE_DDP_UBE_INVALID_MO:
- case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
- qp_err.flush_code = FLUSH_LOC_LEN_ERR;
- qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
- case IRDMA_AE_IB_REMOTE_ACCESS_ERROR:
- qp_err.flush_code = FLUSH_REM_ACCESS_ERR;
- qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
- case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
- case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
- case IRDMA_AE_AMP_MWBIND_VALID_STAG:
- qp_err.flush_code = FLUSH_MW_BIND_ERR;
- qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR;
- break;
- case IRDMA_AE_LLP_TOO_MANY_RETRIES:
- qp_err.flush_code = FLUSH_RETRY_EXC_ERR;
- qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_IB_INVALID_REQUEST:
- qp_err.flush_code = FLUSH_REM_INV_REQ_ERR;
- qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR;
- break;
- case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
- case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
- case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
- case IRDMA_AE_IB_REMOTE_OP_ERROR:
- qp_err.flush_code = FLUSH_REM_OP_ERR;
- qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- case IRDMA_AE_LCE_QP_CATASTROPHIC:
- qp_err.flush_code = FLUSH_FATAL_ERR;
- qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- default:
- qp_err.flush_code = FLUSH_GENERAL_ERR;
- qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
- break;
- }
-
- return qp_err;
-}
#endif /* IRDMA_USER_H */
diff --git a/contrib/ofed/libirdma/irdma_uverbs.c b/contrib/ofed/libirdma/irdma_uverbs.c
--- a/contrib/ofed/libirdma/irdma_uverbs.c
+++ b/contrib/ofed/libirdma/irdma_uverbs.c
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#include <config.h>
#include <stdlib.h>
@@ -153,6 +152,7 @@
err_free:
free(iwupd);
+
errno = err;
return NULL;
}
@@ -164,7 +164,6 @@
int
irdma_ufree_pd(struct ibv_pd *pd)
{
- struct irdma_uvcontext *iwvctx = container_of(pd->context, struct irdma_uvcontext, ibv_ctx);
struct irdma_upd *iwupd;
int ret;
@@ -375,12 +374,12 @@
* @cqe_64byte_ena: enable 64byte cqe
*/
static inline int
-get_cq_size(int ncqe, u8 hw_rev, bool cqe_64byte_ena)
+get_cq_size(int ncqe, u8 hw_rev)
{
ncqe++;
/* Completions with immediate require 1 extra entry */
- if (!cqe_64byte_ena && hw_rev > IRDMA_GEN_1)
+ if (hw_rev > IRDMA_GEN_1)
ncqe *= 2;
if (ncqe < IRDMA_U_MINCQ_SIZE)
@@ -389,11 +388,8 @@
return ncqe;
}
-static inline size_t get_cq_total_bytes(u32 cq_size, bool cqe_64byte_ena){
- if (cqe_64byte_ena)
- return roundup(cq_size * sizeof(struct irdma_extended_cqe), IRDMA_HW_PAGE_SIZE);
- else
- return roundup(cq_size * sizeof(struct irdma_cqe), IRDMA_HW_PAGE_SIZE);
+static inline size_t get_cq_total_bytes(u32 cq_size) {
+ return roundup(cq_size * sizeof(struct irdma_cqe), IRDMA_HW_PAGE_SIZE);
}
/**
@@ -421,7 +417,6 @@
u32 cq_pages;
int ret, ncqe;
u8 hw_rev;
- bool cqe_64byte_ena;
iwvctx = container_of(context, struct irdma_uvcontext, ibv_ctx);
uk_attrs = &iwvctx->uk_attrs;
@@ -455,11 +450,10 @@
return NULL;
}
- cqe_64byte_ena = uk_attrs->feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? true : false;
- info.cq_size = get_cq_size(attr_ex->cqe, hw_rev, cqe_64byte_ena);
+ info.cq_size = get_cq_size(attr_ex->cqe, hw_rev);
+ total_size = get_cq_total_bytes(info.cq_size);
iwucq->comp_vector = attr_ex->comp_vector;
LIST_INIT(&iwucq->resize_list);
- total_size = get_cq_total_bytes(info.cq_size, cqe_64byte_ena);
cq_pages = total_size >> IRDMA_HW_PAGE_SHIFT;
if (!(uk_attrs->feature_flags & IRDMA_FEATURE_CQ_RESIZE))
@@ -529,8 +523,6 @@
info.cq_id = resp.cq_id;
/* Do not report the CQE's reserved for immediate and burned by HW */
iwucq->verbs_cq.cq.cqe = ncqe;
- if (cqe_64byte_ena)
- info.avoid_mem_cflct = true;
info.cqe_alloc_db = (u32 *)((u8 *)iwvctx->db + IRDMA_DB_CQ_OFFSET);
irdma_uk_cq_init(&iwucq->cq, &info);
return &iwucq->verbs_cq.cq_ex;
@@ -586,7 +578,7 @@
irdma_free_cq_buf(struct irdma_cq_buf *cq_buf)
{
ibv_cmd_dereg_mr(&cq_buf->vmr.ibv_mr);
- irdma_free_hw_buf(cq_buf->cq.cq_base, cq_buf->buf_size);
+ irdma_free_hw_buf(cq_buf->cq.cq_base, get_cq_total_bytes(cq_buf->cq.cq_size));
free(cq_buf);
}
@@ -1323,6 +1315,8 @@
cmd.user_wqe_bufs = (__u64) ((uintptr_t)info->sq);
cmd.user_compl_ctx = (__u64) (uintptr_t)&iwuqp->qp;
+ cmd.comp_mask |= IRDMA_CREATE_QP_USE_START_WQE_IDX;
+
ret = ibv_cmd_create_qp(pd, &iwuqp->ibv_qp, attr, &cmd.ibv_cmd,
sizeof(cmd), &resp.ibv_resp,
sizeof(struct irdma_ucreate_qp_resp));
@@ -1332,6 +1326,8 @@
info->sq_size = resp.actual_sq_size;
info->rq_size = resp.actual_rq_size;
info->first_sq_wq = legacy_mode ? 1 : resp.lsmm;
+ if (resp.comp_mask & IRDMA_CREATE_QP_USE_START_WQE_IDX)
+ info->start_wqe_idx = resp.start_wqe_idx;
info->qp_caps = resp.qp_caps;
info->qp_id = resp.qp_id;
iwuqp->irdma_drv_opt = resp.irdma_drv_opt;
@@ -1380,6 +1376,8 @@
if (attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags ||
+ attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
+ attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta ||
attr->cap.max_inline_data > uk_attrs->max_hw_inline) {
errno = EINVAL;
return NULL;
@@ -1431,18 +1429,12 @@
attr->cap.max_recv_wr = info.rq_size;
}
- iwuqp->recv_sges = calloc(attr->cap.max_recv_sge, sizeof(*iwuqp->recv_sges));
- if (!iwuqp->recv_sges) {
- status = errno; /* preserve errno */
- goto err_destroy_lock;
- }
-
info.wqe_alloc_db = (u32 *)iwvctx->db;
info.legacy_mode = iwvctx->legacy_mode;
info.sq_wrtrk_array = calloc(info.sq_depth, sizeof(*info.sq_wrtrk_array));
if (!info.sq_wrtrk_array) {
status = errno; /* preserve errno */
- goto err_free_rsges;
+ goto err_destroy_lock;
}
info.rq_wrid_array = calloc(info.rq_depth, sizeof(*info.rq_wrid_array));
@@ -1476,8 +1468,6 @@
free(info.rq_wrid_array);
err_free_sq_wrtrk:
free(info.sq_wrtrk_array);
-err_free_rsges:
- free(iwuqp->recv_sges);
err_destroy_lock:
pthread_spin_destroy(&iwuqp->lock);
err_free_qp:
@@ -1636,7 +1626,6 @@
free(iwuqp->qp.rq_wrid_array);
irdma_free_hw_buf(iwuqp->qp.sq_base, iwuqp->buf_size);
- free(iwuqp->recv_sges);
free(iwuqp);
return 0;
@@ -1646,26 +1635,6 @@
return ret;
}
-/**
- * irdma_copy_sg_list - copy sg list for qp
- * @sg_list: copied into sg_list
- * @sgl: copy from sgl
- * @num_sges: count of sg entries
- * @max_sges: count of max supported sg entries
- */
-static void
-irdma_copy_sg_list(struct irdma_sge *sg_list, struct ibv_sge *sgl,
- int num_sges)
-{
- int i;
-
- for (i = 0; i < num_sges; i++) {
- sg_list[i].tag_off = sgl[i].addr;
- sg_list[i].len = sgl[i].length;
- sg_list[i].stag = sgl[i].lkey;
- }
-}
-
/**
* calc_type2_mw_stag - calculate type 2 MW stag
* @rkey: desired rkey of the MW
@@ -1744,7 +1713,7 @@
info.stag_to_inv = ib_wr->imm_data;
}
info.op.send.num_sges = ib_wr->num_sge;
- info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
+ info.op.send.sg_list = (struct ibv_sge *)ib_wr->sg_list;
if (ib_qp->qp_type == IBV_QPT_UD) {
struct irdma_uah *ah = container_of(ib_wr->wr.ud.ah,
struct irdma_uah, ibv_ah);
@@ -1775,9 +1744,9 @@
info.op_type = IRDMA_OP_TYPE_RDMA_WRITE;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
- info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
- info.op.rdma_write.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
- info.op.rdma_write.rem_addr.stag = ib_wr->wr.rdma.rkey;
+ info.op.rdma_write.lo_sg_list = ib_wr->sg_list;
+ info.op.rdma_write.rem_addr.addr = ib_wr->wr.rdma.remote_addr;
+ info.op.rdma_write.rem_addr.lkey = ib_wr->wr.rdma.rkey;
if (ib_wr->send_flags & IBV_SEND_INLINE)
err = irdma_uk_inline_rdma_write(&iwuqp->qp, &info, false);
else
@@ -1789,10 +1758,10 @@
break;
}
info.op_type = IRDMA_OP_TYPE_RDMA_READ;
- info.op.rdma_read.rem_addr.tag_off = ib_wr->wr.rdma.remote_addr;
- info.op.rdma_read.rem_addr.stag = ib_wr->wr.rdma.rkey;
+ info.op.rdma_read.rem_addr.addr = ib_wr->wr.rdma.remote_addr;
+ info.op.rdma_read.rem_addr.lkey = ib_wr->wr.rdma.rkey;
- info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
+ info.op.rdma_read.lo_sg_list = ib_wr->sg_list;
info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
err = irdma_uk_rdma_read(&iwuqp->qp, &info, false, false);
break;
@@ -1874,14 +1843,11 @@
struct ibv_recv_wr **bad_wr)
{
struct irdma_post_rq_info post_recv = {};
- struct irdma_sge *sg_list;
struct irdma_uqp *iwuqp;
bool reflush = false;
int err = 0;
iwuqp = container_of(ib_qp, struct irdma_uqp, ibv_qp);
- sg_list = iwuqp->recv_sges;
-
err = pthread_spin_lock(&iwuqp->lock);
if (err)
return err;
@@ -1898,8 +1864,7 @@
}
post_recv.num_sges = ib_wr->num_sge;
post_recv.wr_id = ib_wr->wr_id;
- irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
- post_recv.sg_list = sg_list;
+ post_recv.sg_list = ib_wr->sg_list;
err = irdma_uk_post_receive(&iwuqp->qp, &post_recv);
if (err) {
*bad_wr = ib_wr;
@@ -2023,7 +1988,6 @@
u32 cq_pages;
int cqe_needed;
int ret = 0;
- bool cqe_64byte_ena;
iwucq = container_of(cq, struct irdma_ucq, verbs_cq.cq);
iwvctx = container_of(cq->context, struct irdma_uvcontext, ibv_ctx);
@@ -2035,14 +1999,11 @@
if (cqe < uk_attrs->min_hw_cq_size || cqe > uk_attrs->max_hw_cq_size - 1)
return EINVAL;
- cqe_64byte_ena = uk_attrs->feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? true : false;
-
- cqe_needed = get_cq_size(cqe, uk_attrs->hw_rev, cqe_64byte_ena);
-
+ cqe_needed = get_cq_size(cqe, uk_attrs->hw_rev);
if (cqe_needed == iwucq->cq.cq_size)
return 0;
- cq_size = get_cq_total_bytes(cqe_needed, cqe_64byte_ena);
+ cq_size = get_cq_total_bytes(cqe_needed);
cq_pages = cq_size >> IRDMA_HW_PAGE_SHIFT;
cq_base = irdma_alloc_hw_buf(cq_size);
if (!cq_base)
@@ -2078,7 +2039,6 @@
goto err_resize;
memcpy(&cq_buf->cq, &iwucq->cq, sizeof(cq_buf->cq));
- cq_buf->buf_size = cq_size;
cq_buf->vmr = iwucq->vmr;
iwucq->vmr = new_mr;
irdma_uk_cq_resize(&iwucq->cq, cq_base, cqe_needed);
diff --git a/contrib/ofed/libirdma/osdep.h b/contrib/ofed/libirdma/osdep.h
--- a/contrib/ofed/libirdma/osdep.h
+++ b/contrib/ofed/libirdma/osdep.h
@@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-/*$FreeBSD$*/
#ifndef _ICRDMA_OSDEP_H_
#define _ICRDMA_OSDEP_H_
@@ -78,7 +77,6 @@
/* constants */
#define STATS_TIMER_DELAY 60000
-/* a couple of linux size defines */
#define BIT_ULL(a) (1ULL << (a))
#define min(a, b) ((a) > (b) ? (b) : (a))
#ifndef likely
@@ -91,9 +89,6 @@
#define __aligned_u64 uint64_t __aligned(8)
#define VLAN_PRIO_SHIFT 13
-#if __FreeBSD_version < 1400000
-#define IB_USER_VERBS_EX_CMD_MODIFY_QP IB_USER_VERBS_CMD_MODIFY_QP
-#endif
/*
* debug definition section
diff --git a/sys/dev/irdma/fbsd_kcompat.h b/sys/dev/irdma/fbsd_kcompat.h
--- a/sys/dev/irdma/fbsd_kcompat.h
+++ b/sys/dev/irdma/fbsd_kcompat.h
@@ -48,33 +48,23 @@
#define from_tasklet(var, callback_tasklet, tasklet_fieldname) \
container_of(callback_tasklet, typeof(*var), tasklet_fieldname)
#endif
-
-#if __FreeBSD_version >= 1400000
#define IRDMA_SET_RDMA_OBJ_SIZE(ib_struct, drv_struct, member) \
(sizeof(struct drv_struct) + \
BUILD_BUG_ON_ZERO(offsetof(struct drv_struct, member)) + \
BUILD_BUG_ON_ZERO( \
!__same_type(((struct drv_struct *)NULL)->member, \
struct ib_struct)))
-#endif /* __FreeBSD_version > 1400000 */
#define set_ibdev_dma_device(ibdev, dev) \
ibdev.dma_device = (dev)
#define set_max_sge(props, rf) \
((props)->max_sge = (rf)->sc_dev.hw_attrs.uk_attrs.max_hw_wq_frags)
-#define rdma_query_gid(ibdev, port, index, gid) \
- ib_get_cached_gid(ibdev, port, index, gid, NULL)
#define kmap(pg) page_address(pg)
#define kmap_local_page(pg) page_address(pg)
#define kunmap(pg)
#define kunmap_local(pg)
#define IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION IB_CQ_FLAGS_TIMESTAMP_COMPLETION
-#if __FreeBSD_version < 1400026
-#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp)
-#else
-#define kc_irdma_destroy_qp(ibqp, udata) irdma_destroy_qp(ibqp, udata)
-#endif
#ifndef IB_QP_ATTR_STANDARD_BITS
#define IB_QP_ATTR_STANDARD_BITS GENMASK(20, 0)
#endif
@@ -135,53 +125,23 @@
return rdma_id->route.addr.dev_addr.net;
}
-#if __FreeBSD_version < 1400026
-struct ib_cq *irdma_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata);
-#else
int irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-#endif
struct ib_qp *irdma_create_qp(struct ib_pd *ibpd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
-#if __FreeBSD_version >= 1400026
int irdma_create_ah(struct ib_ah *ib_ah,
struct ib_ah_attr *attr, u32 flags,
struct ib_udata *udata);
int irdma_create_ah_stub(struct ib_ah *ib_ah,
struct ib_ah_attr *attr, u32 flags,
struct ib_udata *udata);
-#else
-struct ib_ah *irdma_create_ah(struct ib_pd *ibpd,
- struct ib_ah_attr *attr,
- struct ib_udata *udata);
-struct ib_ah *irdma_create_ah_stub(struct ib_pd *ibpd,
- struct ib_ah_attr *attr,
- struct ib_udata *udata);
-#endif
void irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr);
-
-#if __FreeBSD_version >= 1400026
void irdma_destroy_ah(struct ib_ah *ibah, u32 flags);
void irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags);
-#else
-int irdma_destroy_ah(struct ib_ah *ibah);
-int irdma_destroy_ah_stub(struct ib_ah *ibah);
-#endif
-#if __FreeBSD_version < 1400026
-int irdma_destroy_qp(struct ib_qp *ibqp);
-#else
int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
-#endif
-#if __FreeBSD_version < 1400026
-int irdma_dereg_mr(struct ib_mr *ib_mr);
-#else
int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
-#endif
int ib_get_eth_speed(struct ib_device *dev, u32 port_num, u8 *speed, u8 *width);
enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
u8 port_num);
@@ -208,10 +168,6 @@
void irdma_unregister_qset(struct irdma_sc_vsi *vsi,
struct irdma_ws_node *tc_node);
void ib_unregister_device(struct ib_device *ibdev);
-#if __FreeBSD_version < 1400026
-int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size, pgprot_t prot);
-#endif
void irdma_disassociate_ucontext(struct ib_ucontext *context);
int kc_irdma_set_roce_cm_info(struct irdma_qp *iwqp,
struct ib_qp_attr *attr,
@@ -245,13 +201,8 @@
struct irdma_mr;
struct irdma_cq;
struct irdma_cq_buf;
-#if __FreeBSD_version < 1400026
-struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg);
-#else
struct ib_mr *irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata);
-#endif
int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr,
u16 access);
struct ib_mr *irdma_rereg_mr_trans(struct irdma_mr *iwmr, u64 start, u64 len,
@@ -264,8 +215,8 @@
int irdma_validate_qp_attrs(struct ib_qp_init_attr *init_attr,
struct irdma_device *iwdev);
void irdma_setup_virt_qp(struct irdma_device *iwdev,
- struct irdma_qp *iwqp,
- struct irdma_qp_init_info *init_info);
+ struct irdma_qp *iwqp,
+ struct irdma_qp_init_info *init_info);
int irdma_setup_kmode_qp(struct irdma_device *iwdev,
struct irdma_qp *iwqp,
struct irdma_qp_init_info *info,
@@ -281,35 +232,14 @@
struct irdma_qp_host_ctx_info *ctx_info);
int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp);
void irdma_dealloc_push_page(struct irdma_pci_f *rf,
- struct irdma_sc_qp *qp);
+ struct irdma_qp *iwqp);
int irdma_process_resize_list(struct irdma_cq *iwcq, struct irdma_device *iwdev,
struct irdma_cq_buf *lcqe_buf);
-#if __FreeBSD_version < 1400026
-int irdma_destroy_cq(struct ib_cq *ib_cq);
-#else
void irdma_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
-#endif
-#if __FreeBSD_version < 1400026
-struct ib_ucontext *irdma_alloc_ucontext(struct ib_device *, struct ib_udata *);
-#else
int irdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
-#endif
-#if __FreeBSD_version < 1400026
-int irdma_dealloc_ucontext(struct ib_ucontext *);
-#else
void irdma_dealloc_ucontext(struct ib_ucontext *context);
-#endif
-#if __FreeBSD_version < 1400026
-struct ib_pd *irdma_alloc_pd(struct ib_device *, struct ib_ucontext *,
- struct ib_udata *);
-#else
int irdma_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
-#endif
-#if __FreeBSD_version < 1400026
-int irdma_dealloc_pd(struct ib_pd *);
-#else
void irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata);
-#endif
int irdma_add_gid(struct ib_device *, u8, unsigned int, const union ib_gid *,
const struct ib_gid_attr *, void **);
int irdma_del_gid(struct ib_device *, u8, unsigned int, void **);
diff --git a/sys/dev/irdma/fbsd_kcompat.c b/sys/dev/irdma/fbsd_kcompat.c
--- a/sys/dev/irdma/fbsd_kcompat.c
+++ b/sys/dev/irdma/fbsd_kcompat.c
@@ -662,8 +662,7 @@
"manage_hmc_pm_func_table", 0},
{IRDMA_OP_SUSPEND, "suspend", "suspend", 0},
{IRDMA_OP_RESUME, "resume", "resume", 0},
- {IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP, "manage_vchnl_req_pble_bp",
- "manage_vchnl_req_pble_bp", 0},
+ {25, "manage_vchnl_req_pble_bp", "manage_vchnl_req_pble_bp", 0},
{IRDMA_OP_QUERY_FPM_VAL, "query_fpm_val", "query_fpm_val", 0},
{IRDMA_OP_COMMIT_FPM_VAL, "commit_fpm_val", "commit_fpm_val", 0},
{IRDMA_OP_AH_CREATE, "ah_create", "ah_create", 0},
diff --git a/sys/dev/irdma/icrdma.c b/sys/dev/irdma/icrdma.c
--- a/sys/dev/irdma/icrdma.c
+++ b/sys/dev/irdma/icrdma.c
@@ -52,7 +52,7 @@
/**
* Driver version
*/
-char irdma_driver_version[] = "1.2.17-k";
+char irdma_driver_version[] = "1.2.36-k";
/**
* irdma_init_tunable - prepare tunables
@@ -245,7 +245,7 @@
mtu);
else if (mtu < IRDMA_MIN_MTU_IPV6)
irdma_dev_warn(to_ibdev(dev),
- "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n",
+ "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\n",
mtu);
}
diff --git a/sys/dev/irdma/icrdma_hw.h b/sys/dev/irdma/icrdma_hw.h
--- a/sys/dev/irdma/icrdma_hw.h
+++ b/sys/dev/irdma/icrdma_hw.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2022 Intel Corporation
+ * Copyright (c) 2017 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -123,6 +123,7 @@
ICRDMA_MAX_IRD_SIZE = 32,
ICRDMA_MAX_ORD_SIZE = 32,
ICRDMA_MIN_WQ_SIZE = 8 /* WQEs */,
+ ICRDMA_MAX_PUSH_PAGE_COUNT = 256,
};
diff --git a/sys/dev/irdma/icrdma_hw.c b/sys/dev/irdma/icrdma_hw.c
--- a/sys/dev/irdma/icrdma_hw.c
+++ b/sys/dev/irdma/icrdma_hw.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2022 Intel Corporation
+ * Copyright (c) 2017 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -76,7 +76,7 @@
ICRDMA_CQPSQ_CQ_CEQID,
ICRDMA_CQPSQ_CQ_CQID,
ICRDMA_COMMIT_FPM_CQCNT,
- ICRDMA_CQPSQ_UPESD_HMCFNID
+ ICRDMA_CQPSQ_UPESD_HMCFNID,
};
static u8 icrdma_shifts[IRDMA_MAX_SHIFTS] = {
@@ -86,7 +86,7 @@
ICRDMA_CQPSQ_CQ_CEQID_S,
ICRDMA_CQPSQ_CQ_CQID_S,
ICRDMA_COMMIT_FPM_CQCNT_S,
- ICRDMA_CQPSQ_UPESD_HMCFNID_S
+ ICRDMA_CQPSQ_UPESD_HMCFNID_S,
};
/**
@@ -210,8 +210,6 @@
dev->hw_regs[i] = (u32 IOMEM *) (hw_addr + icrdma_regs[i]);
}
- dev->hw_attrs.max_hw_vf_fpm_id = IRDMA_MAX_VF_FPM_ID;
- dev->hw_attrs.first_hw_vf_fpm_id = IRDMA_FIRST_VF_FPM_ID;
for (i = 0; i < IRDMA_MAX_SHIFTS; ++i)
dev->hw_shifts[i] = icrdma_shifts[i];
@@ -231,6 +229,7 @@
dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE;
dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT;
dev->hw_attrs.max_stat_idx = IRDMA_HW_STAT_INDEX_MAX_GEN_2;
+ dev->hw_attrs.max_hw_device_pages = ICRDMA_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.max_hw_wq_frags = ICRDMA_MAX_WQ_FRAGMENT_COUNT;
dev->hw_attrs.uk_attrs.max_hw_read_sges = ICRDMA_MAX_SGE_RD;
diff --git a/sys/dev/irdma/irdma-abi.h b/sys/dev/irdma/irdma-abi.h
--- a/sys/dev/irdma/irdma-abi.h
+++ b/sys/dev/irdma/irdma-abi.h
@@ -54,6 +54,11 @@
enum {
IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
+ IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
+};
+
+enum {
+ IRDMA_CREATE_QP_USE_START_WQE_IDX = 1 << 0,
};
struct irdma_alloc_ucontext_req {
@@ -82,6 +87,8 @@
__u8 hw_rev;
__u8 rsvd2;
__aligned_u64 comp_mask;
+ __u16 min_hw_wq_size;
+ __u8 rsvd3[6];
};
struct irdma_alloc_pd_resp {
@@ -101,6 +108,7 @@
struct irdma_create_qp_req {
__aligned_u64 user_wqe_bufs;
__aligned_u64 user_compl_ctx;
+ __aligned_u64 comp_mask;
};
struct irdma_mem_reg_req {
@@ -130,6 +138,9 @@
__u8 lsmm;
__u8 rsvd;
__u32 qp_caps;
+ __aligned_u64 comp_mask;
+ __u8 start_wqe_idx;
+ __u8 rsvd2[7];
};
struct irdma_modify_qp_resp {
diff --git a/sys/dev/irdma/irdma.h b/sys/dev/irdma/irdma.h
--- a/sys/dev/irdma/irdma.h
+++ b/sys/dev/irdma/irdma.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2023 Intel Corporation
+ * Copyright (c) 2017 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -183,7 +183,7 @@
IRDMA_GEN_RSVD = 0,
IRDMA_GEN_1 = 1,
IRDMA_GEN_2 = 2,
- IRDMA_GEN_MAX = 2,
+ IRDMA_GEN_MAX = IRDMA_GEN_2,
};
struct irdma_uk_attrs {
diff --git a/sys/dev/irdma/irdma_cm.c b/sys/dev/irdma/irdma_cm.c
--- a/sys/dev/irdma/irdma_cm.c
+++ b/sys/dev/irdma/irdma_cm.c
@@ -1079,14 +1079,14 @@
*type = IRDMA_MPA_REQUEST_ACCEPT;
- if (len < sizeof(struct ietf_mpa_v1)) {
+ if (len < sizeof(*mpa_frame)) {
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"ietf buffer small (%x)\n", len);
return -EINVAL;
}
mpa_frame = (struct ietf_mpa_v1 *)buf;
- mpa_hdr_len = sizeof(struct ietf_mpa_v1);
+ mpa_hdr_len = sizeof(*mpa_frame);
priv_data_len = ntohs(mpa_frame->priv_data_len);
if (priv_data_len > IETF_MAX_PRIV_DATA_LEN) {
@@ -1440,6 +1440,7 @@
sizeof(struct option_base) + TCP_OPTIONS_PADDING];
struct irdma_kmem_info opts;
int optionssize = 0;
+
/* Sending MSS option */
union all_known_options *options;
@@ -1615,7 +1616,8 @@
}
/**
- * irdma_netdev_vlan_ipv6 - Gets the netdev and mac
+ * irdma_get_vlan_mac_ipv6 - Get the vlan and mac for an IPv6
+ * address
* @addr: local IPv6 address
* @vlan_id: vlan id for the given IPv6 address
* @mac: mac address for the given IPv6 address
@@ -1623,14 +1625,12 @@
* Returns the net_device of the IPv6 address and also sets the
* vlan id and mac for that address.
*/
-if_t
-irdma_netdev_vlan_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id, u8 *mac)
+void
+irdma_get_vlan_mac_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id, u8 *mac)
{
if_t ip_dev = NULL;
struct in6_addr laddr6;
-#ifdef VIMAGE
- struct vnet *vnet = irdma_cmid_to_vnet(cm_id);
-#endif
+ struct vnet *vnet = &init_net;
struct ifaddr *ifa;
u16 scope_id = 0;
@@ -1645,10 +1645,9 @@
scope_id = ntohs(laddr6.__u6_addr.__u6_addr16[1]);
#ifdef VIMAGE
- ip_dev = ip6_ifp_find(vnet, laddr6, scope_id);
-#else
- ip_dev = ip6_ifp_find(&init_net, laddr6, scope_id);
+ vnet = irdma_cmid_to_vnet(cm_id);
#endif
+ ip_dev = ip6_ifp_find(vnet, laddr6, scope_id);
if (ip_dev) {
if (vlan_id)
*vlan_id = rdma_vlan_dev_vlan_id(ip_dev);
@@ -1656,8 +1655,6 @@
if (ifa && ifa->ifa_addr && mac)
ether_addr_copy(mac, if_getlladdr(ip_dev));
}
-
- return ip_dev;
}
/**
@@ -1668,16 +1665,13 @@
irdma_get_vlan_ipv4(struct iw_cm_id *cm_id, u32 *addr)
{
if_t netdev;
-#ifdef VIMAGE
- struct vnet *vnet = irdma_cmid_to_vnet(cm_id);
-#endif
+ struct vnet *vnet = &init_net;
u16 vlan_id = 0xFFFF;
#ifdef VIMAGE
- netdev = ip_ifp_find(vnet, htonl(addr[0]));
-#else
- netdev = ip_ifp_find(&init_net, htonl(addr[0]));
+ vnet = irdma_cmid_to_vnet(cm_id);
#endif
+ netdev = ip_ifp_find(vnet, htonl(addr[0]));
if (netdev) {
vlan_id = rdma_vlan_dev_vlan_id(netdev);
dev_put(netdev);
@@ -2546,7 +2540,7 @@
u32 inc_sequence;
int optionsize;
- optionsize = (tcph->th_off << 2) - sizeof(struct tcphdr);
+ optionsize = (tcph->th_off << 2) - sizeof(*tcph);
inc_sequence = ntohl(tcph->th_seq);
switch (cm_node->state) {
@@ -2613,7 +2607,7 @@
u32 inc_sequence;
int optionsize;
- optionsize = (tcph->th_off << 2) - sizeof(struct tcphdr);
+ optionsize = (tcph->th_off << 2) - sizeof(*tcph);
inc_sequence = ntohl(tcph->th_seq);
switch (cm_node->state) {
case IRDMA_CM_STATE_SYN_SENT:
@@ -2687,7 +2681,7 @@
int optionsize;
u32 datasize = rbuf->datalen;
- optionsize = (tcph->th_off << 2) - sizeof(struct tcphdr);
+ optionsize = (tcph->th_off << 2) - sizeof(*tcph);
if (irdma_check_seq(cm_node, tcph))
return -EINVAL;
@@ -3493,6 +3487,7 @@
if (iwqp->ietf_mem.va) {
if (iwqp->lsmm_mr)
iwdev->ibdev.dereg_mr(iwqp->lsmm_mr, NULL);
+
irdma_free_dma_mem(iwdev->rf->sc_dev.hw,
&iwqp->ietf_mem);
iwqp->ietf_mem.va = NULL;
@@ -3536,8 +3531,8 @@
cm_node->vlan_id = irdma_get_vlan_ipv4(cm_id, cm_node->loc_addr);
} else {
cm_node->ipv4 = false;
- irdma_netdev_vlan_ipv6(cm_id, cm_node->loc_addr,
- &cm_node->vlan_id, NULL);
+ irdma_get_vlan_mac_ipv6(cm_id, cm_node->loc_addr, &cm_node->vlan_id,
+ NULL);
}
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_CM, "Accept vlan_id=%d\n",
cm_node->vlan_id);
@@ -3744,7 +3739,7 @@
raddr6->sin6_addr.__u6_addr.__u6_addr32);
cm_info.loc_port = ntohs(laddr6->sin6_port);
cm_info.rem_port = ntohs(raddr6->sin6_port);
- irdma_netdev_vlan_ipv6(cm_id, cm_info.loc_addr, &cm_info.vlan_id, NULL);
+ irdma_get_vlan_mac_ipv6(cm_id, cm_info.loc_addr, &cm_info.vlan_id, NULL);
}
cm_info.cm_id = cm_id;
cm_info.qh_qpid = iwdev->vsi.ilq->qp_id;
@@ -3873,8 +3868,8 @@
laddr6->sin6_addr.__u6_addr.__u6_addr32);
cm_info.loc_port = ntohs(laddr6->sin6_port);
if (!IN6_IS_ADDR_UNSPECIFIED(&laddr6->sin6_addr)) {
- irdma_netdev_vlan_ipv6(cm_id, cm_info.loc_addr,
- &cm_info.vlan_id, NULL);
+ irdma_get_vlan_mac_ipv6(cm_id, cm_info.loc_addr,
+ &cm_info.vlan_id, NULL);
} else {
cm_info.vlan_id = 0xFFFF;
wildcard = true;
diff --git a/sys/dev/irdma/irdma_ctrl.c b/sys/dev/irdma/irdma_ctrl.c
--- a/sys/dev/irdma/irdma_ctrl.c
+++ b/sys/dev/irdma/irdma_ctrl.c
@@ -785,14 +785,14 @@
set_64bit_val(qp_ctx, IRDMA_BYTE_144,
FIELD_PREP(IRDMAQPC_STAT_INDEX, info->stats_idx));
set_64bit_val(qp_ctx, IRDMA_BYTE_152,
- FIELD_PREP(IRDMAQPC_MACADDRESS, irdma_mac_to_u64(roce_info->mac_addr)));
+ FIELD_PREP(IRDMAQPC_MACADDRESS,
+ irdma_mac_to_u64(roce_info->mac_addr)));
set_64bit_val(qp_ctx, IRDMA_BYTE_160,
FIELD_PREP(IRDMAQPC_ORDSIZE, roce_info->ord_size) |
FIELD_PREP(IRDMAQPC_IRDSIZE, irdma_sc_get_encoded_ird_size(roce_info->ird_size)) |
FIELD_PREP(IRDMAQPC_WRRDRSPOK, roce_info->wr_rdresp_en) |
FIELD_PREP(IRDMAQPC_RDOK, roce_info->rd_en) |
FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
- FIELD_PREP(IRDMAQPC_BINDEN, roce_info->bind_en) |
FIELD_PREP(IRDMAQPC_FASTREGEN, roce_info->fast_reg_en) |
FIELD_PREP(IRDMAQPC_DCQCNENABLE, roce_info->dcqcn_en) |
FIELD_PREP(IRDMAQPC_RCVNOICRC, roce_info->rcv_no_icrc) |
@@ -1016,7 +1016,6 @@
FIELD_PREP(IRDMAQPC_WRRDRSPOK, iw->wr_rdresp_en) |
FIELD_PREP(IRDMAQPC_RDOK, iw->rd_en) |
FIELD_PREP(IRDMAQPC_SNDMARKERS, iw->snd_mark_en) |
- FIELD_PREP(IRDMAQPC_BINDEN, iw->bind_en) |
FIELD_PREP(IRDMAQPC_FASTREGEN, iw->fast_reg_en) |
FIELD_PREP(IRDMAQPC_PRIVEN, iw->priv_mode_en) |
FIELD_PREP(IRDMAQPC_USESTATSINSTANCE, info->stats_idx_valid) |
@@ -1466,6 +1465,15 @@
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
irdma_debug_buf(qp->dev, IRDMA_DEBUG_QP, "CONN EST WQE", wqe,
IRDMA_QP_WQE_MIN_SIZE);
+ if (qp->qp_uk.start_wqe_idx) {
+ wqe = qp_uk->sq_base[3].elem;
+ hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_NOP) |
+ FIELD_PREP(IRDMAQPSQ_LOCALFENCE, 1) |
+ FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
+ irdma_wmb(); /* make sure WQE is written before valid bit is set */
+
+ set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
+ }
}
/**
@@ -1831,7 +1839,7 @@
if (copy_len)
irdma_memcpy(termhdr + 1, pkt, copy_len);
- return sizeof(struct irdma_terminate_hdr) + copy_len;
+ return sizeof(*termhdr) + copy_len;
}
/**
@@ -2124,10 +2132,6 @@
IRDMA_CQP_WQE_SIZE * 8);
irdma_sc_cqp_post_sq(cqp);
- irdma_debug(cqp->dev, IRDMA_DEBUG_STATS,
- "CQP SQ head 0x%x tail 0x%x size 0x%x\n", cqp->sq_ring.head,
- cqp->sq_ring.tail, cqp->sq_ring.size);
-
return 0;
}
@@ -2607,17 +2611,18 @@
set_64bit_val(wqe, IRDMA_BYTE_0, cq->cq_uk.cq_size);
set_64bit_val(wqe, IRDMA_BYTE_8, RS_64_1(cq, 1));
set_64bit_val(wqe, IRDMA_BYTE_16,
- FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD, cq->shadow_read_threshold));
- set_64bit_val(wqe, IRDMA_BYTE_32, (cq->virtual_map ? 0 : cq->cq_pa));
+ FIELD_PREP(IRDMA_CQPSQ_CQ_SHADOW_READ_THRESHOLD,
+ cq->shadow_read_threshold));
+ set_64bit_val(wqe, IRDMA_BYTE_32, cq->virtual_map ? 0 : cq->cq_pa);
set_64bit_val(wqe, IRDMA_BYTE_40, cq->shadow_area_pa);
set_64bit_val(wqe, IRDMA_BYTE_48,
- FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX, (cq->virtual_map ? cq->first_pm_pbl_idx : 0)));
+ FIELD_PREP(IRDMA_CQPSQ_CQ_FIRSTPMPBLIDX,
+ cq->virtual_map ? cq->first_pm_pbl_idx : 0));
set_64bit_val(wqe, IRDMA_BYTE_56,
FIELD_PREP(IRDMA_CQPSQ_TPHVAL, cq->tph_val) |
FIELD_PREP(IRDMA_CQPSQ_VSIIDX, cq->vsi->vsi_idx));
-
hdr = FLD_LS_64(cq->dev, cq->cq_uk.cq_id, IRDMA_CQPSQ_CQ_CQID) |
- FLD_LS_64(cq->dev, (cq->ceq_id_valid ? cq->ceq_id : 0),
+ FLD_LS_64(cq->dev, cq->ceq_id_valid ? cq->ceq_id : 0,
IRDMA_CQPSQ_CQ_CEQID) |
FIELD_PREP(IRDMA_CQPSQ_OPCODE, IRDMA_CQP_OP_CREATE_CQ) |
FIELD_PREP(IRDMA_CQPSQ_CQ_LPBLSIZE, cq->pbl_chunk_size) |
@@ -2935,10 +2940,12 @@
IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_152, info,
IRDMA_HMC_IW_MD);
- irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info,
- IRDMA_HMC_IW_OOISC);
- irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info,
- IRDMA_HMC_IW_OOISCFFL);
+ if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_160, info,
+ IRDMA_HMC_IW_OOISC);
+ irdma_sc_decode_fpm_commit(dev, buf, IRDMA_BYTE_168, info,
+ IRDMA_HMC_IW_OOISCFFL);
+ }
}
/* searching for the last object in HMC to find the size of the HMC area. */
@@ -3071,15 +3078,18 @@
irdma_sc_decode_fpm_query(buf, 144, obj_info, IRDMA_HMC_IW_HDR);
irdma_sc_decode_fpm_query(buf, 152, obj_info, IRDMA_HMC_IW_MD);
- irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
-
- get_64bit_val(buf, IRDMA_BYTE_168, &temp);
- obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
- obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
- hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
- if (!hmc_fpm_misc->ooiscf_block_size &&
- obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
- return -EINVAL;
+
+ if (dev->cqp->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) {
+ irdma_sc_decode_fpm_query(buf, 160, obj_info, IRDMA_HMC_IW_OOISC);
+
+ get_64bit_val(buf, IRDMA_BYTE_168, &temp);
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt = (u32)temp;
+ obj_info[IRDMA_HMC_IW_OOISCFFL].size = 4;
+ hmc_fpm_misc->ooiscf_block_size = FIELD_GET(IRDMA_QUERY_FPM_OOISCFBLOCKSIZE, temp);
+ if (!hmc_fpm_misc->ooiscf_block_size &&
+ obj_info[IRDMA_HMC_IW_OOISCFFL].max_cnt)
+ return -EINVAL;
+ }
return 0;
}
@@ -3251,6 +3261,7 @@
temp = FIELD_PREP(IRDMA_CQPHC_ENABLED_VFS, cqp->ena_vf_count) |
FIELD_PREP(IRDMA_CQPHC_HMC_PROFILE, cqp->hmc_profile);
+
if (hw_rev >= IRDMA_GEN_2)
temp |= FIELD_PREP(IRDMA_CQPHC_EN_REM_ENDPOINT_TRK,
cqp->en_rem_endpoint_trk);
@@ -3399,11 +3410,13 @@
void
irdma_sc_ccq_arm(struct irdma_sc_cq *ccq)
{
+ unsigned long flags;
u64 temp_val;
u16 sw_cq_sel;
u8 arm_next_se;
u8 arm_seq_num;
+ spin_lock_irqsave(&ccq->dev->cqp_lock, flags);
get_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, &temp_val);
sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val);
arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val);
@@ -3414,6 +3427,7 @@
FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) |
FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1);
set_64bit_val(ccq->cq_uk.shadow_area, IRDMA_BYTE_32, temp_val);
+ spin_unlock_irqrestore(&ccq->dev->cqp_lock, flags);
irdma_wmb(); /* make sure shadow area is updated before arming */
@@ -3436,6 +3450,7 @@
u32 error;
u8 polarity;
int ret_code = 0;
+ unsigned long flags;
if (ccq->cq_uk.avoid_mem_cflct)
cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(&ccq->cq_uk);
@@ -3484,7 +3499,9 @@
irdma_wmb(); /* make sure shadow area is updated before moving tail */
+ spin_lock_irqsave(&cqp->dev->cqp_lock, flags);
IRDMA_RING_MOVE_TAIL(cqp->sq_ring);
+ spin_unlock_irqrestore(&cqp->dev->cqp_lock, flags);
atomic64_inc(&cqp->completed_ops);
return ret_code;
@@ -3809,7 +3826,6 @@
if (ceq->reg_cq)
irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq);
-
cqp = ceq->dev->cqp;
cqp->process_cqp_sds = irdma_update_sds_noccq;
@@ -3834,7 +3850,6 @@
if (ret_code)
return ret_code;
}
-
ret_code = irdma_sc_ceq_create(ceq, scratch, true);
if (!ret_code)
return irdma_sc_cceq_create_done(ceq);
@@ -3924,7 +3939,6 @@
cq_idx = irdma_sc_find_reg_cq(ceq, cq);
spin_unlock_irqrestore(&ceq->req_cq_lock, flags);
}
-
IRDMA_RING_MOVE_TAIL(ceq->ceq_ring);
if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring))
ceq->polarity ^= 1;
@@ -4155,6 +4169,7 @@
case IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST:
case IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_ROCE_REQ_LENGTH_ERROR:
case IRDMA_AE_INVALID_ARP_ENTRY:
case IRDMA_AE_INVALID_TCP_OPTION_RCVD:
case IRDMA_AE_STALE_ARP_ENTRY:
@@ -4707,10 +4722,11 @@
u64 sd;
int i;
- for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++)
+ for (i = IRDMA_HMC_IW_QP; i < IRDMA_HMC_IW_MAX; i++) {
if (i != IRDMA_HMC_IW_PBLE)
size += round_up(hmc_info->hmc_obj[i].cnt *
hmc_info->hmc_obj[i].size, 512);
+ }
pble_info = &hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE];
size += round_up(pble_info->cnt * pble_info->size, 512);
@@ -4720,7 +4736,7 @@
sd = size >> 21;
if (sd > 0xFFFFFFFF) {
irdma_debug(dev, IRDMA_DEBUG_HMC, "sd overflow[%ld]\n", sd);
- sd = 0xFFFFFFFF - 1;
+ sd = 0xFFFFFFFE;
}
return (u32)sd;
@@ -4776,10 +4792,9 @@
int
irdma_get_rdma_features(struct irdma_sc_dev *dev)
{
- int ret_code;
+ int ret_code, byte_idx, feat_type, feat_cnt, feat_idx;
struct irdma_dma_mem feat_buf;
u64 temp;
- u16 byte_idx, feat_type, feat_cnt, feat_idx;
feat_buf.size = IRDMA_FEATURE_BUF_SIZE;
feat_buf.va = irdma_allocate_dma_mem(dev->hw, &feat_buf, feat_buf.size,
@@ -4822,7 +4837,7 @@
irdma_debug_buf(dev, IRDMA_DEBUG_WQE, "QUERY RDMA FEATURES", feat_buf.va,
feat_cnt * 8);
- for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, (u16)IRDMA_MAX_FEATURES);
+ for (byte_idx = 0, feat_idx = 0; feat_idx < min(feat_cnt, IRDMA_MAX_FEATURES);
feat_idx++, byte_idx += 8) {
get_64bit_val(feat_buf.va, byte_idx, &temp);
feat_type = FIELD_GET(IRDMA_FEATURE_TYPE, temp);
@@ -5409,6 +5424,7 @@
irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable)
{
u32 reg_val;
+
reg_val = FIELD_PREP(IRDMA_PFINT_AEQCTL_CAUSE_ENA, enable) |
FIELD_PREP(IRDMA_PFINT_AEQCTL_MSIX_INDX, idx) |
FIELD_PREP(IRDMA_PFINT_AEQCTL_ITR_INDX, IRDMA_IDX_NOITR);
@@ -5423,14 +5439,16 @@
void
sc_vsi_update_stats(struct irdma_sc_vsi *vsi)
{
- struct irdma_gather_stats *gather_stats;
- struct irdma_gather_stats *last_gather_stats;
-
- gather_stats = vsi->pestat->gather_info.gather_stats_va;
- last_gather_stats = vsi->pestat->gather_info.last_gather_stats_va;
- irdma_update_stats(&vsi->pestat->hw_stats, gather_stats,
- last_gather_stats, vsi->dev->hw_stats_map,
- vsi->dev->hw_attrs.max_stat_idx);
+ struct irdma_dev_hw_stats *hw_stats = &vsi->pestat->hw_stats;
+ struct irdma_gather_stats *gather_stats =
+ vsi->pestat->gather_info.gather_stats_va;
+ struct irdma_gather_stats *last_gather_stats =
+ vsi->pestat->gather_info.last_gather_stats_va;
+ const struct irdma_hw_stat_map *map = vsi->dev->hw_stats_map;
+ u16 max_stat_idx = vsi->dev->hw_attrs.max_stat_idx;
+
+ irdma_update_stats(hw_stats, gather_stats, last_gather_stats,
+ map, max_stat_idx);
}
/**
@@ -5500,7 +5518,6 @@
dev->hw_attrs.max_hw_outbound_msg_size = IRDMA_MAX_OUTBOUND_MSG_SIZE;
dev->hw_attrs.max_mr_size = IRDMA_MAX_MR_SIZE;
dev->hw_attrs.max_hw_inbound_msg_size = IRDMA_MAX_INBOUND_MSG_SIZE;
- dev->hw_attrs.max_hw_device_pages = IRDMA_MAX_PUSH_PAGE_COUNT;
dev->hw_attrs.uk_attrs.max_hw_inline = IRDMA_MAX_INLINE_DATA_SIZE;
dev->hw_attrs.max_hw_wqes = IRDMA_MAX_WQ_ENTRIES;
dev->hw_attrs.max_qp_wr = IRDMA_MAX_QP_WRS(IRDMA_MAX_QUANTA_PER_WR);
@@ -5528,7 +5545,6 @@
val, db_size);
return -ENODEV;
}
- dev->db_addr = dev->hw->hw_addr + (uintptr_t)dev->hw_regs[IRDMA_DB_ADDR_OFFSET];
return ret_code;
}
diff --git a/sys/dev/irdma/irdma_defs.h b/sys/dev/irdma/irdma_defs.h
--- a/sys/dev/irdma/irdma_defs.h
+++ b/sys/dev/irdma/irdma_defs.h
@@ -249,7 +249,6 @@
IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE = 22,
IRDMA_OP_SUSPEND = 23,
IRDMA_OP_RESUME = 24,
- IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP = 25,
IRDMA_OP_QUERY_FPM_VAL = 26,
IRDMA_OP_COMMIT_FPM_VAL = 27,
IRDMA_OP_AH_CREATE = 28,
@@ -292,7 +291,6 @@
#define IRDMA_CQP_OP_DEALLOC_STAG 0x0d
#define IRDMA_CQP_OP_MANAGE_LOC_MAC_TABLE 0x0e
#define IRDMA_CQP_OP_MANAGE_ARP 0x0f
-#define IRDMA_CQP_OP_MANAGE_VCHNL_REQ_PBLE_BP 0x10
#define IRDMA_CQP_OP_MANAGE_PUSH_PAGES 0x11
#define IRDMA_CQP_OP_QUERY_RDMA_FEATURES 0x12
#define IRDMA_CQP_OP_UPLOAD_CONTEXT 0x13
@@ -849,7 +847,6 @@
#define IRDMA_CQPSQ_UCTX_RAWFORMAT BIT_ULL(61)
#define IRDMA_CQPSQ_UCTX_FREEZEQP_S 62
#define IRDMA_CQPSQ_UCTX_FREEZEQP BIT_ULL(62)
-
#define IRDMA_CQPSQ_MHMC_VFIDX_S 0
#define IRDMA_CQPSQ_MHMC_VFIDX GENMASK_ULL(15, 0)
#define IRDMA_CQPSQ_MHMC_FREEPMFN_S 62
diff --git a/sys/dev/irdma/irdma_hw.c b/sys/dev/irdma/irdma_hw.c
--- a/sys/dev/irdma/irdma_hw.c
+++ b/sys/dev/irdma/irdma_hw.c
@@ -276,6 +276,7 @@
switch (info->ae_id) {
struct irdma_cm_node *cm_node;
+
case IRDMA_AE_LLP_CONNECTION_ESTABLISHED:
cm_node = iwqp->cm_node;
if (cm_node->accept_pend) {
@@ -393,6 +394,7 @@
case IRDMA_AE_LLP_TOO_MANY_RETRIES:
case IRDMA_AE_LCE_QP_CATASTROPHIC:
case IRDMA_AE_LCE_FUNCTION_CATASTROPHIC:
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG:
default:
irdma_dev_err(&iwdev->ibdev,
@@ -489,7 +491,7 @@
{
struct irdma_qvlist_info *iw_qvlist;
struct irdma_qv_info *iw_qvinfo;
- u32 ceq_idx;
+ u16 ceq_idx;
u32 i;
u32 size;
@@ -499,8 +501,8 @@
}
size = sizeof(struct irdma_msix_vector) * rf->msix_count;
- size += sizeof(struct irdma_qvlist_info);
- size += sizeof(struct irdma_qv_info) * rf->msix_count - 1;
+ size += sizeof(*iw_qvlist);
+ size += sizeof(*iw_qvinfo) * rf->msix_count - 1;
rf->iw_msixtbl = kzalloc(size, GFP_KERNEL);
if (!rf->iw_msixtbl)
return -ENOMEM;
@@ -599,6 +601,13 @@
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
irdma_free_irq(rf, msix_vec);
+ if (rf == dev_id) {
+ tasklet_kill(&rf->dpc_tasklet);
+ } else {
+ struct irdma_ceq *iwceq = (struct irdma_ceq *)dev_id;
+
+ tasklet_kill(&iwceq->dpc_tasklet);
+ }
}
/**
@@ -963,13 +972,13 @@
u16 maj_err, min_err;
int i, status;
- cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests), GFP_KERNEL);
- memset(cqp->cqp_requests, 0, sqsize * sizeof(*cqp->cqp_requests));
+ cqp->cqp_requests = kcalloc(sqsize, sizeof(*cqp->cqp_requests),
+ GFP_KERNEL);
if (!cqp->cqp_requests)
return -ENOMEM;
- cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array), GFP_KERNEL);
- memset(cqp->scratch_array, 0, sqsize * sizeof(*cqp->scratch_array));
+ cqp->scratch_array = kcalloc(sqsize, sizeof(*cqp->scratch_array),
+ GFP_KERNEL);
if (!cqp->scratch_array) {
status = -ENOMEM;
goto err_scratch;
@@ -1189,7 +1198,7 @@
*/
static int
irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
- u32 ceq_id, struct irdma_msix_vector *msix_vec)
+ u16 ceq_id, struct irdma_msix_vector *msix_vec)
{
int status;
@@ -1263,7 +1272,7 @@
*/
static int
irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
- u32 ceq_id, struct irdma_sc_vsi *vsi)
+ u16 ceq_id, struct irdma_sc_vsi *vsi)
{
int status;
struct irdma_ceq_init_info info = {0};
@@ -1380,7 +1389,7 @@
irdma_setup_ceqs(struct irdma_pci_f *rf, struct irdma_sc_vsi *vsi)
{
u32 i;
- u32 ceq_id;
+ u16 ceq_id;
struct irdma_ceq *iwceq;
struct irdma_msix_vector *msix_vec;
int status;
@@ -1750,6 +1759,7 @@
irdma_rt_deinit_hw(struct irdma_device *iwdev)
{
struct irdma_sc_qp qp = {{0}};
+
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_INIT, "state = %d\n", iwdev->init_state);
switch (iwdev->init_state) {
@@ -2011,6 +2021,7 @@
{
struct irdma_sc_dev *dev = &rf->sc_dev;
int status;
+
do {
status = irdma_setup_init_state(rf);
if (status)
@@ -2209,14 +2220,19 @@
cqp_request = (struct irdma_cqp_request *)
(uintptr_t)info.scratch;
- if (info.error && irdma_cqp_crit_err(dev, cqp_request->info.cqp_cmd,
+ if (info.error && irdma_cqp_crit_err(dev,
+ cqp_request->info.cqp_cmd,
info.maj_err_code,
info.min_err_code))
- irdma_dev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
- info.op_code, info.maj_err_code, info.min_err_code);
+ irdma_dev_err(&rf->iwdev->ibdev,
+ "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n",
+ info.op_code, info.maj_err_code,
+ info.min_err_code);
if (cqp_request) {
- cqp_request->compl_info.maj_err_code = info.maj_err_code;
- cqp_request->compl_info.min_err_code = info.min_err_code;
+ cqp_request->compl_info.maj_err_code =
+ info.maj_err_code;
+ cqp_request->compl_info.min_err_code =
+ info.min_err_code;
cqp_request->compl_info.op_ret_val = info.op_ret_val;
cqp_request->compl_info.error = info.error;
irdma_complete_cqp_request(&rf->cqp, cqp_request);
diff --git a/sys/dev/irdma/irdma_kcompat.c b/sys/dev/irdma/irdma_kcompat.c
--- a/sys/dev/irdma/irdma_kcompat.c
+++ b/sys/dev/irdma/irdma_kcompat.c
@@ -96,7 +96,6 @@
return 0;
}
-#if __FreeBSD_version >= 1400026
/**
* irdma_alloc_mr - register stag for fast memory registration
* @pd: ibpd pointer
@@ -108,18 +107,6 @@
irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
u32 max_num_sg, struct ib_udata *udata)
{
-#else
-/**
- * irdma_alloc_mr - register stag for fast memory registration
- * @pd: ibpd pointer
- * @mr_type: memory for stag registrion
- * @max_num_sg: man number of pages
- */
-struct ib_mr *
-irdma_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
- u32 max_num_sg)
-{
-#endif
struct irdma_device *iwdev = to_iwdev(pd->device);
struct irdma_pble_alloc *palloc;
struct irdma_pbl *iwpbl;
@@ -174,7 +161,6 @@
#define IRDMA_ALLOC_UCTX_MIN_REQ_LEN offsetofend(struct irdma_alloc_ucontext_req, rsvd8)
#define IRDMA_ALLOC_UCTX_MIN_RESP_LEN offsetofend(struct irdma_alloc_ucontext_resp, rsvd)
-#if __FreeBSD_version >= 1400026
/**
* irdma_alloc_ucontext - Allocate the user context data structure
* @uctx: context
@@ -236,6 +222,8 @@
uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
uresp.hw_rev = uk_attrs->hw_rev;
uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
+ uresp.min_hw_wq_size = uk_attrs->min_hw_wq_size;
+ uresp.comp_mask |= IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE;
bar_off =
(uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
@@ -269,119 +257,8 @@
req.userspace_ver, IRDMA_ABI_VER);
return -EINVAL;
}
-#endif
-
-#if __FreeBSD_version < 1400026
-/**
- * irdma_alloc_ucontext - Allocate the user context data structure
- * @ibdev: ib device pointer
- * @udata: user data
- *
- * This keeps track of all objects associated with a particular
- * user-mode client.
- */
-struct ib_ucontext *
-irdma_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata)
-{
- struct irdma_device *iwdev = to_iwdev(ibdev);
- struct irdma_alloc_ucontext_req req = {0};
- struct irdma_alloc_ucontext_resp uresp = {0};
- struct irdma_ucontext *ucontext;
- struct irdma_uk_attrs *uk_attrs = &iwdev->rf->sc_dev.hw_attrs.uk_attrs;
-
- if (udata->inlen < IRDMA_ALLOC_UCTX_MIN_REQ_LEN ||
- udata->outlen < IRDMA_ALLOC_UCTX_MIN_RESP_LEN)
- return ERR_PTR(-EINVAL);
-
- if (ib_copy_from_udata(&req, udata, min(sizeof(req), udata->inlen)))
- return ERR_PTR(-EINVAL);
-
- if (req.userspace_ver < 4 || req.userspace_ver > IRDMA_ABI_VER)
- goto ver_error;
-
- ucontext = kzalloc(sizeof(*ucontext), GFP_KERNEL);
- if (!ucontext)
- return ERR_PTR(-ENOMEM);
-
- ucontext->iwdev = iwdev;
- ucontext->abi_ver = req.userspace_ver;
-
- if (req.comp_mask & IRDMA_ALLOC_UCTX_USE_RAW_ATTR)
- ucontext->use_raw_attrs = true;
-
- /* GEN_1 legacy support with libi40iw */
- if (udata->outlen == IRDMA_ALLOC_UCTX_MIN_RESP_LEN) {
- if (uk_attrs->hw_rev != IRDMA_GEN_1) {
- kfree(ucontext);
- return ERR_PTR(-EOPNOTSUPP);
- }
-
- ucontext->legacy_mode = true;
- uresp.max_qps = iwdev->rf->max_qp;
- uresp.max_pds = iwdev->rf->sc_dev.hw_attrs.max_hw_pds;
- uresp.wq_size = iwdev->rf->sc_dev.hw_attrs.max_qp_wr * 2;
- uresp.kernel_ver = req.userspace_ver;
- if (ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen))) {
- kfree(ucontext);
- return ERR_PTR(-EFAULT);
- }
- } else {
- u64 bar_off;
-
- uresp.kernel_ver = IRDMA_ABI_VER;
- uresp.feature_flags = uk_attrs->feature_flags;
- uresp.max_hw_wq_frags = uk_attrs->max_hw_wq_frags;
- uresp.max_hw_read_sges = uk_attrs->max_hw_read_sges;
- uresp.max_hw_inline = uk_attrs->max_hw_inline;
- uresp.max_hw_rq_quanta = uk_attrs->max_hw_rq_quanta;
- uresp.max_hw_wq_quanta = uk_attrs->max_hw_wq_quanta;
- uresp.max_hw_sq_chunk = uk_attrs->max_hw_sq_chunk;
- uresp.max_hw_cq_size = uk_attrs->max_hw_cq_size;
- uresp.min_hw_cq_size = uk_attrs->min_hw_cq_size;
- uresp.hw_rev = uk_attrs->hw_rev;
- uresp.comp_mask |= IRDMA_ALLOC_UCTX_USE_RAW_ATTR;
-
- bar_off =
- (uintptr_t)iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET];
- spin_lock_init(&ucontext->mmap_tbl_lock);
- ucontext->db_mmap_entry =
- irdma_user_mmap_entry_add_hash(ucontext, bar_off,
- IRDMA_MMAP_IO_NC,
- &uresp.db_mmap_key);
- if (!ucontext->db_mmap_entry) {
- spin_lock_destroy(&ucontext->mmap_tbl_lock);
- kfree(ucontext);
- return ERR_PTR(-ENOMEM);
- }
- if (ib_copy_to_udata(udata, &uresp,
- min(sizeof(uresp), udata->outlen))) {
- irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry);
- spin_lock_destroy(&ucontext->mmap_tbl_lock);
- kfree(ucontext);
- return ERR_PTR(-EFAULT);
- }
- }
-
- INIT_LIST_HEAD(&ucontext->cq_reg_mem_list);
- spin_lock_init(&ucontext->cq_reg_mem_list_lock);
- INIT_LIST_HEAD(&ucontext->qp_reg_mem_list);
- spin_lock_init(&ucontext->qp_reg_mem_list_lock);
- INIT_LIST_HEAD(&ucontext->vma_list);
- mutex_init(&ucontext->vma_list_mutex);
-
- return &ucontext->ibucontext;
-
-ver_error:
- irdma_dev_err(&iwdev->ibdev,
- "Invalid userspace driver version detected. Detected version %d, should be %d\n",
- req.userspace_ver, IRDMA_ABI_VER);
- return ERR_PTR(-EINVAL);
-}
-#endif
-
-#if __FreeBSD_version >= 1400026
/**
* irdma_dealloc_ucontext - deallocate the user context data structure
* @context: user context created during alloc
@@ -395,28 +272,9 @@
return;
}
-#endif
-#if __FreeBSD_version < 1400026
-/**
- * irdma_dealloc_ucontext - deallocate the user context data structure
- * @context: user context created during alloc
- */
-int
-irdma_dealloc_ucontext(struct ib_ucontext *context)
-{
- struct irdma_ucontext *ucontext = to_ucontext(context);
-
- irdma_user_mmap_entry_del_hash(ucontext->db_mmap_entry);
- spin_lock_destroy(&ucontext->mmap_tbl_lock);
- kfree(ucontext);
-
- return 0;
-}
-#endif
#define IRDMA_ALLOC_PD_MIN_RESP_LEN offsetofend(struct irdma_alloc_pd_resp, rsvd)
-#if __FreeBSD_version >= 1400026
/**
* irdma_alloc_pd - allocate protection domain
* @pd: protection domain
@@ -469,70 +327,8 @@
return err;
}
-#endif
-#if __FreeBSD_version < 1400026
-/**
- * irdma_alloc_pd - allocate protection domain
- * @ibdev: IB device
- * @context: user context
- * @udata: user data
- */
-struct ib_pd *
-irdma_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata)
-{
- struct irdma_pd *iwpd;
- struct irdma_device *iwdev = to_iwdev(ibdev);
- struct irdma_sc_dev *dev = &iwdev->rf->sc_dev;
- struct irdma_pci_f *rf = iwdev->rf;
- struct irdma_alloc_pd_resp uresp = {0};
- struct irdma_sc_pd *sc_pd;
- u32 pd_id = 0;
- int err;
-
- err = irdma_alloc_rsrc(rf, rf->allocated_pds, rf->max_pd, &pd_id,
- &rf->next_pd);
- if (err)
- return ERR_PTR(err);
- iwpd = kzalloc(sizeof(*iwpd), GFP_KERNEL);
- if (!iwpd) {
- err = -ENOMEM;
- goto free_res;
- }
-
- sc_pd = &iwpd->sc_pd;
- if (udata) {
- struct irdma_ucontext *ucontext = to_ucontext(context);
-
- irdma_sc_pd_init(dev, sc_pd, pd_id, ucontext->abi_ver);
- uresp.pd_id = pd_id;
- if (ib_copy_to_udata(udata, &uresp,
- min(sizeof(uresp), udata->outlen))) {
- err = -EFAULT;
- goto error;
- }
- } else {
- irdma_sc_pd_init(dev, sc_pd, pd_id, IRDMA_ABI_VER);
- }
-
- spin_lock_init(&iwpd->udqp_list_lock);
- INIT_LIST_HEAD(&iwpd->udqp_list);
-
- return &iwpd->ibpd;
-
-error:
- kfree(iwpd);
-free_res:
-
- irdma_free_rsrc(rf, rf->allocated_pds, pd_id);
-
- return ERR_PTR(err);
-}
-
-#endif
-
-#if __FreeBSD_version >= 1400026
void
irdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
@@ -542,20 +338,7 @@
irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
}
-#endif
-
-#if __FreeBSD_version < 1400026
-int
-irdma_dealloc_pd(struct ib_pd *ibpd)
-{
- struct irdma_pd *iwpd = to_iwpd(ibpd);
- struct irdma_device *iwdev = to_iwdev(ibpd->device);
- irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_pds, iwpd->sc_pd.pd_id);
- kfree(iwpd);
- return 0;
-}
-#endif
/**
* irdma_find_qp_update_qs - update QS handle for UD QPs
@@ -684,24 +467,40 @@
irdma_create_ah_wait(struct irdma_pci_f *rf,
struct irdma_sc_ah *sc_ah, bool sleep)
{
+ int ret;
+
if (!sleep) {
int cnt = rf->sc_dev.hw_attrs.max_cqp_compl_wait_time_ms *
CQP_TIMEOUT_THRESHOLD;
+ struct irdma_cqp_request *cqp_request =
+ sc_ah->ah_info.cqp_request;
do {
irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq);
mdelay(1);
- } while (!sc_ah->ah_info.ah_valid && --cnt);
+ } while (!READ_ONCE(cqp_request->request_done) && --cnt);
- if (!cnt)
- return -ETIMEDOUT;
+ if (cnt && !cqp_request->compl_info.op_ret_val) {
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ sc_ah->ah_info.ah_valid = true;
+ } else {
+ ret = !cnt ? -ETIMEDOUT : -EINVAL;
+ irdma_dev_err(&rf->iwdev->ibdev, "CQP create AH error ret = %d opt_ret_val = %d",
+ ret, cqp_request->compl_info.op_ret_val);
+ irdma_put_cqp_request(&rf->cqp, cqp_request);
+ if (!cnt && !rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
+ return ret;
+ }
}
+
return 0;
}
#define IRDMA_CREATE_AH_MIN_RESP_LEN offsetofend(struct irdma_create_ah_resp, rsvd)
-#if __FreeBSD_version >= 1400026
/**
* irdma_create_ah - create address handle
* @ib_ah: ptr to AH
@@ -787,17 +586,15 @@
goto err_gid_l2;
err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
- sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
+ sleep, NULL, sc_ah);
if (err) {
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP-OP Create AH fail");
goto err_gid_l2;
}
err = irdma_create_ah_wait(rf, sc_ah, sleep);
- if (err) {
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out");
+ if (err)
goto err_gid_l2;
- }
if (udata) {
uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
@@ -815,7 +612,6 @@
return err;
}
-#endif
void
irdma_ether_copy(u8 *dmac, struct ib_ah_attr *attr)
@@ -823,162 +619,20 @@
ether_addr_copy(dmac, attr->dmac);
}
-#if __FreeBSD_version < 1400026
-struct ib_ah *
-irdma_create_ah_stub(struct ib_pd *ibpd,
- struct ib_ah_attr *attr,
- struct ib_udata *udata)
-#else
int
irdma_create_ah_stub(struct ib_ah *ib_ah,
struct ib_ah_attr *attr, u32 flags,
struct ib_udata *udata)
-#endif
{
-#if __FreeBSD_version >= 1400026
return -ENOSYS;
-#else
- return ERR_PTR(-ENOSYS);
-#endif
}
-#if __FreeBSD_version >= 1400026
void
irdma_destroy_ah_stub(struct ib_ah *ibah, u32 flags)
{
return;
}
-#else
-int
-irdma_destroy_ah_stub(struct ib_ah *ibah)
-{
- return -ENOSYS;
-}
-#endif
-#if __FreeBSD_version < 1400026
-/**
- * irdma_create_ah - create address handle
- * @ibpd: ptr to pd
- * @attr: address handle attributes
- * @udata: user data
- *
- * returns a pointer to an address handle
- */
-struct ib_ah *
-irdma_create_ah(struct ib_pd *ibpd,
- struct ib_ah_attr *attr,
- struct ib_udata *udata)
-{
- struct irdma_pd *pd = to_iwpd(ibpd);
- struct irdma_device *iwdev = to_iwdev(ibpd->device);
- struct irdma_ah *ah;
- union ib_gid sgid;
- struct ib_gid_attr sgid_attr;
- struct irdma_pci_f *rf = iwdev->rf;
- struct irdma_sc_ah *sc_ah;
- u32 ah_id = 0;
- struct irdma_ah_info *ah_info;
- struct irdma_create_ah_resp uresp = {};
- union irdma_sockaddr sgid_addr, dgid_addr;
- int err;
- u8 dmac[ETHER_ADDR_LEN];
- bool sleep = udata ? true : false;
-
- if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN)
- return ERR_PTR(-EINVAL);
-
- err = irdma_alloc_rsrc(rf, rf->allocated_ahs,
- rf->max_ah, &ah_id, &rf->next_ah);
-
- if (err)
- return ERR_PTR(err);
-
- ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
- if (!ah) {
- irdma_free_rsrc(rf, rf->allocated_ahs, ah_id);
- return ERR_PTR(-ENOMEM);
- }
-
- ah->pd = pd;
- sc_ah = &ah->sc_ah;
- sc_ah->ah_info.ah_idx = ah_id;
- sc_ah->ah_info.vsi = &iwdev->vsi;
- irdma_sc_init_ah(&rf->sc_dev, sc_ah);
- ah->sgid_index = attr->grh.sgid_index;
- memcpy(&ah->dgid, &attr->grh.dgid, sizeof(ah->dgid));
- rcu_read_lock();
- err = ib_get_cached_gid(&iwdev->ibdev, attr->port_num,
- attr->grh.sgid_index, &sgid, &sgid_attr);
- rcu_read_unlock();
- if (err) {
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "GID lookup at idx=%d with port=%d failed\n",
- attr->grh.sgid_index, attr->port_num);
- err = -EINVAL;
- goto err_gid_l2;
- }
- rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
- rdma_gid2ip((struct sockaddr *)&dgid_addr, &attr->grh.dgid);
- ah->av.attrs = *attr;
- ah->av.net_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
-
- if (sgid_attr.ndev)
- dev_put(sgid_attr.ndev);
-
- ah_info = &sc_ah->ah_info;
- ah_info->ah_idx = ah_id;
- ah_info->pd_idx = pd->sc_pd.pd_id;
-
- ether_addr_copy(ah_info->mac_addr, if_getlladdr(iwdev->netdev));
- if (attr->ah_flags & IB_AH_GRH) {
- ah_info->flow_label = attr->grh.flow_label;
- ah_info->hop_ttl = attr->grh.hop_limit;
- ah_info->tc_tos = attr->grh.traffic_class;
- }
-
- if (udata)
- ib_resolve_eth_dmac(ibpd->device, attr);
- irdma_ether_copy(dmac, attr);
-
- irdma_fill_ah_info(if_getvnet(iwdev->netdev), ah_info, &sgid_attr, &sgid_addr, &dgid_addr,
- dmac, ah->av.net_type);
-
- err = irdma_create_ah_vlan_tag(iwdev, pd, ah_info, &sgid_attr, dmac);
- if (err)
- goto err_gid_l2;
-
- err = irdma_ah_cqp_op(iwdev->rf, sc_ah, IRDMA_OP_AH_CREATE,
- sleep, irdma_gsi_ud_qp_ah_cb, sc_ah);
- if (err) {
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "CQP-OP Create AH fail");
- goto err_gid_l2;
- }
-
- err = irdma_create_ah_wait(rf, sc_ah, sleep);
- if (err) {
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_DEV, "CQP create AH timed out");
- goto err_gid_l2;
- }
-
- if (udata) {
- uresp.ah_id = ah->sc_ah.ah_info.ah_idx;
- err = ib_copy_to_udata(udata, &uresp, min(sizeof(uresp), udata->outlen));
- if (err) {
- irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah,
- IRDMA_OP_AH_DESTROY, false, NULL, ah);
- goto err_gid_l2;
- }
- }
-
- return &ah->ibah;
-err_gid_l2:
- kfree(ah);
- irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs, ah_id);
-
- return ERR_PTR(err);
-}
-#endif
/**
* irdma_free_qp_rsrc - free up memory resources for qp
@@ -992,7 +646,7 @@
u32 qp_num = iwqp->ibqp.qp_num;
irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
- irdma_dealloc_push_page(rf, &iwqp->sc_qp);
+ irdma_dealloc_push_page(rf, iwqp);
if (iwqp->sc_qp.vsi) {
irdma_qp_rem_qos(&iwqp->sc_qp);
iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
@@ -1186,12 +840,17 @@
if (udata) {
/* GEN_1 legacy support with libi40iw does not have expanded uresp struct */
- if (udata->outlen < sizeof(uresp)) {
+ if (udata->outlen == IRDMA_CREATE_QP_MIN_RESP_LEN) {
uresp.lsmm = 1;
uresp.push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX_GEN_1;
} else {
- if (rdma_protocol_iwarp(&iwdev->ibdev, 1))
+ if (rdma_protocol_iwarp(&iwdev->ibdev, 1)) {
uresp.lsmm = 1;
+ if (qp->qp_uk.start_wqe_idx) {
+ uresp.comp_mask |= IRDMA_CREATE_QP_USE_START_WQE_IDX;
+ uresp.start_wqe_idx = qp->qp_uk.start_wqe_idx;
+ }
+ }
}
uresp.actual_sq_size = init_info.qp_uk_init_info.sq_size;
uresp.actual_rq_size = init_info.qp_uk_init_info.rq_size;
@@ -1202,7 +861,7 @@
min(sizeof(uresp), udata->outlen));
if (err_code) {
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS, "copy_to_udata failed\n");
- kc_irdma_destroy_qp(&iwqp->ibqp, udata);
+ irdma_destroy_qp(&iwqp->ibqp, udata);
return ERR_PTR(err_code);
}
}
@@ -1221,13 +880,8 @@
* @ibqp: qp's ib pointer also to get to device's qp address
* @udata: user data
*/
-#if __FreeBSD_version >= 1400026
int
irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
-#else
-int
-irdma_destroy_qp(struct ib_qp *ibqp)
-#endif
{
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_device *iwdev = iwqp->iwdev;
@@ -1270,31 +924,17 @@
* @attr: attributes for cq
* @udata: user data
*/
-#if __FreeBSD_version >= 1400026
int
irdma_create_cq(struct ib_cq *ibcq,
const struct ib_cq_init_attr *attr,
struct ib_udata *udata)
-#else
-struct ib_cq *
-irdma_create_cq(struct ib_device *ibdev,
- const struct ib_cq_init_attr *attr,
- struct ib_ucontext *context,
- struct ib_udata *udata)
-#endif
{
#define IRDMA_CREATE_CQ_MIN_REQ_LEN offsetofend(struct irdma_create_cq_req, user_cq_buf)
#define IRDMA_CREATE_CQ_MIN_RESP_LEN offsetofend(struct irdma_create_cq_resp, cq_size)
-#if __FreeBSD_version >= 1400026
struct ib_device *ibdev = ibcq->device;
-#endif
struct irdma_device *iwdev = to_iwdev(ibdev);
struct irdma_pci_f *rf = iwdev->rf;
-#if __FreeBSD_version >= 1400026
struct irdma_cq *iwcq = to_iwcq(ibcq);
-#else
- struct irdma_cq *iwcq;
-#endif
u32 cq_num = 0;
struct irdma_sc_cq *cq;
struct irdma_sc_dev *dev = &rf->sc_dev;
@@ -1308,7 +948,6 @@
int entries = attr->cqe;
bool cqe_64byte_ena;
-#if __FreeBSD_version >= 1400026
err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
if (err_code)
return err_code;
@@ -1316,27 +955,10 @@
if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
return -EINVAL;
-#else
- err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev);
- if (err_code)
- return ERR_PTR(err_code);
-
- if (udata && (udata->inlen < IRDMA_CREATE_CQ_MIN_REQ_LEN ||
- udata->outlen < IRDMA_CREATE_CQ_MIN_RESP_LEN))
- return ERR_PTR(-EINVAL);
-
- iwcq = kzalloc(sizeof(*iwcq), GFP_KERNEL);
- if (!iwcq)
- return ERR_PTR(-ENOMEM);
-#endif
err_code = irdma_alloc_rsrc(rf, rf->allocated_cqs, rf->max_cq, &cq_num,
&rf->next_cq);
if (err_code)
-#if __FreeBSD_version >= 1400026
return err_code;
-#else
- goto error;
-#endif
cq = &iwcq->sc_cq;
cq->back_cq = iwcq;
atomic_set(&iwcq->refcnt, 1);
@@ -1366,11 +988,7 @@
struct irdma_cq_mr *cqmr_shadow;
iwcq->user_mode = true;
-#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
-#else
- ucontext = to_ucontext(context);
-#endif
if (ib_copy_from_udata(&req, udata,
min(sizeof(req), udata->inlen))) {
@@ -1499,11 +1117,7 @@
rf->cq_table[cq_num] = iwcq;
init_completion(&iwcq->free_cq);
-#if __FreeBSD_version >= 1400026
return 0;
-#else
- return &iwcq->ibcq;
-#endif
cq_destroy:
irdma_cq_wq_destroy(rf, cq);
cq_kmem_free:
@@ -1513,13 +1127,7 @@
}
cq_free_rsrc:
irdma_free_rsrc(rf, rf->allocated_cqs, cq_num);
-#if __FreeBSD_version >= 1400026
return err_code;
-#else
-error:
- kfree(iwcq);
- return ERR_PTR(err_code);
-#endif
}
/**
@@ -1569,7 +1177,6 @@
* @ah_flags: destroy flags
*/
-#if __FreeBSD_version >= 1400026
void
irdma_destroy_ah(struct ib_ah *ibah, u32 ah_flags)
{
@@ -1582,33 +1189,10 @@
irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
ah->sc_ah.ah_info.ah_idx);
}
-#endif
-#if __FreeBSD_version < 1400026
-int
-irdma_destroy_ah(struct ib_ah *ibah)
-{
- struct irdma_device *iwdev = to_iwdev(ibah->device);
- struct irdma_ah *ah = to_iwah(ibah);
-
- irdma_ah_cqp_op(iwdev->rf, &ah->sc_ah, IRDMA_OP_AH_DESTROY,
- false, NULL, ah);
-
- irdma_free_rsrc(iwdev->rf, iwdev->rf->allocated_ahs,
- ah->sc_ah.ah_info.ah_idx);
- kfree(ah);
- return 0;
-}
-#endif
-
-#if __FreeBSD_version >= 1400026
int
irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
-#else
-int
-irdma_dereg_mr(struct ib_mr *ib_mr)
-#endif
{
struct irdma_mr *iwmr = to_iwmr(ib_mr);
struct irdma_device *iwdev = to_iwdev(ib_mr->device);
@@ -1618,15 +1202,9 @@
if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
if (iwmr->region) {
struct irdma_ucontext *ucontext;
-#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
-#else
- struct ib_pd *ibpd = ib_mr->pd;
-
- ucontext = to_ucontext(ibpd->uobject->context);
-#endif
irdma_del_memlist(iwmr, ucontext);
}
goto done;
@@ -1734,7 +1312,6 @@
return 0;
}
-#if __FreeBSD_version >= 1400026
/**
* irdma_destroy_cq - destroy cq
* @ib_cq: cq pointer
@@ -1769,90 +1346,6 @@
irdma_cq_free_rsrc(iwdev->rf, iwcq);
}
-#endif
-#if __FreeBSD_version < 1400026
-/**
- * irdma_destroy_cq - destroy cq
- * @ib_cq: cq pointer
- */
-int
-irdma_destroy_cq(struct ib_cq *ib_cq)
-{
- struct irdma_device *iwdev = to_iwdev(ib_cq->device);
- struct irdma_cq *iwcq = to_iwcq(ib_cq);
- struct irdma_sc_cq *cq = &iwcq->sc_cq;
- struct irdma_sc_dev *dev = cq->dev;
- struct irdma_sc_ceq *ceq = dev->ceq[cq->ceq_id];
- struct irdma_ceq *iwceq = container_of(ceq, struct irdma_ceq, sc_ceq);
- unsigned long flags;
-
- spin_lock_irqsave(&iwcq->lock, flags);
- if (!list_empty(&iwcq->cmpl_generated))
- irdma_remove_cmpls_list(iwcq);
- if (!list_empty(&iwcq->resize_list))
- irdma_process_resize_list(iwcq, iwdev, NULL);
- spin_unlock_irqrestore(&iwcq->lock, flags);
-
- irdma_cq_rem_ref(ib_cq);
- wait_for_completion(&iwcq->free_cq);
-
- irdma_cq_wq_destroy(iwdev->rf, cq);
-
- spin_lock_irqsave(&iwceq->ce_lock, flags);
- irdma_sc_cleanup_ceqes(cq, ceq);
- spin_unlock_irqrestore(&iwceq->ce_lock, flags);
-
- irdma_cq_free_rsrc(iwdev->rf, iwcq);
- kfree(iwcq);
-
- return 0;
-}
-
-#endif
-/**
- * irdma_alloc_mw - Allocate memory window
- * @pd: Protection domain
- * @type: Window type
- * @udata: user data pointer
- */
-struct ib_mw *
-irdma_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
- struct ib_udata *udata)
-{
- struct irdma_device *iwdev = to_iwdev(pd->device);
- struct irdma_mr *iwmr;
- int err_code;
- u32 stag;
-
- if (type != IB_MW_TYPE_1 && type != IB_MW_TYPE_2)
- return ERR_PTR(-EINVAL);
-
- iwmr = kzalloc(sizeof(*iwmr), GFP_KERNEL);
- if (!iwmr)
- return ERR_PTR(-ENOMEM);
-
- stag = irdma_create_stag(iwdev);
- if (!stag) {
- kfree(iwmr);
- return ERR_PTR(-ENOMEM);
- }
-
- iwmr->stag = stag;
- iwmr->ibmw.rkey = stag;
- iwmr->ibmw.pd = pd;
- iwmr->ibmw.type = type;
- iwmr->ibmw.device = pd->device;
-
- err_code = irdma_hw_alloc_mw(iwdev, iwmr);
- if (err_code) {
- irdma_free_stag(iwdev, stag);
- kfree(iwmr);
- return ERR_PTR(err_code);
- }
-
- return &iwmr->ibmw;
-}
-
/**
* kc_set_loc_seq_num_mss - Set local seq number and mss
* @cm_node: cm node info
@@ -1875,113 +1368,6 @@
(cm_node->iwdev->vsi.mtu - IRDMA_MTU_TO_MSS_IPV6);
}
-#if __FreeBSD_version < 1400026
-struct irdma_vma_data {
- struct list_head list;
- struct vm_area_struct *vma;
- struct mutex *vma_list_mutex; /* protect the vma_list */
-};
-
-/**
- * irdma_vma_open -
- * @vma: User VMA
- */
-static void
-irdma_vma_open(struct vm_area_struct *vma)
-{
- vma->vm_ops = NULL;
-}
-
-/**
- * irdma_vma_close - Remove vma data from vma list
- * @vma: User VMA
- */
-static void
-irdma_vma_close(struct vm_area_struct *vma)
-{
- struct irdma_vma_data *vma_data;
-
- vma_data = vma->vm_private_data;
- vma->vm_private_data = NULL;
- vma_data->vma = NULL;
- mutex_lock(vma_data->vma_list_mutex);
- list_del(&vma_data->list);
- mutex_unlock(vma_data->vma_list_mutex);
- kfree(vma_data);
-}
-
-static const struct vm_operations_struct irdma_vm_ops = {
- .open = irdma_vma_open,
- .close = irdma_vma_close
-};
-
-/**
- * irdma_set_vma_data - Save vma data in context list
- * @vma: User VMA
- * @context: ib user context
- */
-static int
-irdma_set_vma_data(struct vm_area_struct *vma,
- struct irdma_ucontext *context)
-{
- struct list_head *vma_head = &context->vma_list;
- struct irdma_vma_data *vma_entry;
-
- vma_entry = kzalloc(sizeof(*vma_entry), GFP_KERNEL);
- if (!vma_entry)
- return -ENOMEM;
-
- vma->vm_private_data = vma_entry;
- vma->vm_ops = &irdma_vm_ops;
-
- vma_entry->vma = vma;
- vma_entry->vma_list_mutex = &context->vma_list_mutex;
-
- mutex_lock(&context->vma_list_mutex);
- list_add(&vma_entry->list, vma_head);
- mutex_unlock(&context->vma_list_mutex);
-
- return 0;
-}
-
-/**
- * irdma_disassociate_ucontext - Disassociate user context
- * @context: ib user context
- */
-void
-irdma_disassociate_ucontext(struct ib_ucontext *context)
-{
- struct irdma_ucontext *ucontext = to_ucontext(context);
-
- struct irdma_vma_data *vma_data, *n;
- struct vm_area_struct *vma;
-
- mutex_lock(&ucontext->vma_list_mutex);
- list_for_each_entry_safe(vma_data, n, &ucontext->vma_list, list) {
- vma = vma_data->vma;
- zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
-
- vma->vm_ops = NULL;
- list_del(&vma_data->list);
- kfree(vma_data);
- }
- mutex_unlock(&ucontext->vma_list_mutex);
-}
-
-int
-rdma_user_mmap_io(struct ib_ucontext *context, struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size, pgprot_t prot)
-{
- if (io_remap_pfn_range(vma,
- vma->vm_start,
- pfn,
- size,
- prot))
- return -EAGAIN;
-
- return irdma_set_vma_data(vma, to_ucontext(context));
-}
-#else
/**
* irdma_disassociate_ucontext - Disassociate user context
* @context: ib user context
@@ -1990,7 +1376,6 @@
irdma_disassociate_ucontext(struct ib_ucontext *context)
{
}
-#endif
struct ib_device *
ib_device_get_by_netdev(if_t netdev, int driver_id)
@@ -2032,7 +1417,7 @@
{
int ret;
- ret = rdma_query_gid(ibdev, port, index, gid);
+ ret = ib_get_cached_gid(ibdev, port, index, gid, NULL);
if (ret == -EAGAIN) {
memcpy(gid, &zgid, sizeof(*gid));
return 0;
@@ -2144,7 +1529,7 @@
if (rdma_protocol_roce(ibdev, 1)) {
props->gid_tbl_len = 32;
- props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
+ props->port_cap_flags |= IB_PORT_IP_BASED_GIDS;
props->pkey_tbl_len = IRDMA_PKEY_TBL_SZ;
} else {
props->gid_tbl_len = 1;
@@ -2325,9 +1710,6 @@
BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP) |
BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ) |
BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP) |
- BIT_ULL(IB_USER_VERBS_CMD_ALLOC_MW) |
- BIT_ULL(IB_USER_VERBS_CMD_BIND_MW) |
- BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_MW) |
BIT_ULL(IB_USER_VERBS_CMD_POST_RECV) |
BIT_ULL(IB_USER_VERBS_CMD_POST_SEND);
iwdev->ibdev.uverbs_ex_cmd_mask =
diff --git a/sys/dev/irdma/irdma_main.h b/sys/dev/irdma/irdma_main.h
--- a/sys/dev/irdma/irdma_main.h
+++ b/sys/dev/irdma/irdma_main.h
@@ -44,9 +44,7 @@
#include <netinet/if_ether.h>
#include <linux/slab.h>
#include <linux/rculist.h>
-#if __FreeBSD_version >= 1400000
#include <rdma/uverbs_ioctl.h>
-#endif
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
@@ -185,7 +183,7 @@
void (*callback_fcn)(struct irdma_cqp_request *cqp_request);
void *param;
struct irdma_cqp_compl_info compl_info;
- bool request_done; /* READ/WRITE_ONCE macros operate on it */
+ u8 request_done; /* READ/WRITE_ONCE macros operate on it */
bool waiting:1;
bool dynamic:1;
};
@@ -236,7 +234,7 @@
u32 idx;
u32 irq;
u32 cpu_affinity;
- u32 ceq_id;
+ u16 ceq_id;
char name[IRDMA_IRQ_NAME_STR_LEN];
struct resource *res;
void *tag;
@@ -378,7 +376,6 @@
u32 roce_ackcreds;
u32 vendor_id;
u32 vendor_part_id;
- u32 push_mode;
u32 rcv_wnd;
u16 mac_ip_table_idx;
u16 vsi_num;
@@ -392,6 +389,7 @@
bool override_ooo:1;
bool override_rd_fence_rate:1;
bool override_rtomin:1;
+ bool push_mode:1;
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb_vlan_mode:1;
@@ -419,7 +417,6 @@
return container_of(ibucontext, struct irdma_ucontext, ibucontext);
}
-#if __FreeBSD_version >= 1400026
static inline struct irdma_user_mmap_entry *
to_irdma_mmap_entry(struct rdma_user_mmap_entry *rdma_entry)
{
@@ -427,7 +424,6 @@
rdma_entry);
}
-#endif
static inline struct irdma_pd *to_iwpd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct irdma_pd, ibpd);
@@ -586,8 +582,8 @@
void irdma_copy_ip_ntohl(u32 *dst, __be32 *src);
void irdma_copy_ip_htonl(__be32 *dst, u32 *src);
u16 irdma_get_vlan_ipv4(struct iw_cm_id *cm_id, u32 *addr);
-if_t irdma_netdev_vlan_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id,
- u8 *mac);
+void irdma_get_vlan_mac_ipv6(struct iw_cm_id *cm_id, u32 *addr, u16 *vlan_id,
+ u8 *mac);
struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size,
int acc, u64 *iova_start);
int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw);
@@ -599,7 +595,6 @@
bool wait,
void (*callback_fcn)(struct irdma_cqp_request *cqp_request),
void *cb_param);
-void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request);
void irdma_udqp_qs_worker(struct work_struct *work);
bool irdma_cq_empty(struct irdma_cq *iwcq);
int irdma_netdevice_event(struct notifier_block *notifier, unsigned long event,
diff --git a/sys/dev/irdma/irdma_puda.h b/sys/dev/irdma/irdma_puda.h
--- a/sys/dev/irdma/irdma_puda.h
+++ b/sys/dev/irdma/irdma_puda.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2022 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/sys/dev/irdma/irdma_puda.c b/sys/dev/irdma/irdma_puda.c
--- a/sys/dev/irdma/irdma_puda.c
+++ b/sys/dev/irdma/irdma_puda.c
@@ -47,6 +47,7 @@
static void
irdma_ilq_putback_rcvbuf(struct irdma_sc_qp *qp,
struct irdma_puda_buf *buf, u32 wqe_idx);
+
/**
* irdma_puda_get_listbuf - get buffer from puda list
* @list: list to use for buffers (ILQ or IEQ)
@@ -181,7 +182,7 @@
struct irdma_puda_buf *buf;
struct irdma_virt_mem buf_mem;
- buf_mem.size = sizeof(struct irdma_puda_buf);
+ buf_mem.size = sizeof(*buf);
buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
if (!buf_mem.va)
return NULL;
@@ -935,6 +936,7 @@
struct irdma_sc_ceq *ceq;
ceq = vsi->dev->ceq[0];
+
switch (type) {
case IRDMA_PUDA_RSRC_TYPE_ILQ:
rsrc = vsi->ilq;
@@ -1008,7 +1010,7 @@
bool virtdma = false;
unsigned long flags;
- buf_mem.size = count * sizeof(struct irdma_puda_buf);
+ buf_mem.size = count * sizeof(*buf);
buf_mem.va = kzalloc(buf_mem.size, GFP_KERNEL);
if (!buf_mem.va) {
irdma_debug(rsrc->dev, IRDMA_DEBUG_PUDA,
@@ -1103,7 +1105,7 @@
struct irdma_virt_mem *vmem;
info->count = 1;
- pudasize = sizeof(struct irdma_puda_rsrc);
+ pudasize = sizeof(*rsrc);
sqwridsize = info->sq_size * sizeof(struct irdma_sq_uk_wr_trk_info);
rqwridsize = info->rq_size * 8;
switch (info->type) {
@@ -1703,6 +1705,7 @@
struct irdma_pfpdu *pfpdu = &qp->pfpdu;
u32 *hw_host_ctx = (u32 *)qp->hw_host_ctx;
u32 rcv_wnd = hw_host_ctx[23];
+
/* first partial seq # in q2 */
u32 fps = *(u32 *)(qp->q2_buf + Q2_FPSN_OFFSET);
struct list_head *rxlist = &pfpdu->rxlist;
diff --git a/sys/dev/irdma/irdma_type.h b/sys/dev/irdma/irdma_type.h
--- a/sys/dev/irdma/irdma_type.h
+++ b/sys/dev/irdma/irdma_type.h
@@ -42,6 +42,8 @@
#include "irdma_hmc.h"
#include "irdma_uda.h"
#include "irdma_ws.h"
+#include "irdma_pble.h"
+
enum irdma_debug_flag {
IRDMA_DEBUG_NONE = 0x00000000,
IRDMA_DEBUG_ERR = 0x00000001,
@@ -70,6 +72,8 @@
IRDMA_DEBUG_ALL = 0xFFFFFFFF,
};
+#define RSVD_OFFSET 0xFFFFFFFF
+
enum irdma_page_size {
IRDMA_PAGE_SIZE_4K = 0,
IRDMA_PAGE_SIZE_2M,
@@ -472,7 +476,7 @@
bool virtual_map:1;
bool check_overflow:1;
bool ceq_id_valid:1;
- bool tph_en;
+ bool tph_en:1;
};
struct irdma_sc_qp {
@@ -520,9 +524,9 @@
};
struct irdma_stats_inst_info {
- bool use_hmc_fcn_index;
u16 hmc_fn_id;
u16 stats_idx;
+ bool use_hmc_fcn_index:1;
};
struct irdma_up_info {
@@ -570,7 +574,7 @@
u8 traffic_class;
u8 rel_bw;
u8 prio_type;
- bool valid;
+ bool valid:1;
};
struct irdma_config_check {
@@ -623,7 +627,6 @@
__le64 *fpm_query_buf;
__le64 *fpm_commit_buf;
struct irdma_hw *hw;
- u8 IOMEM *db_addr;
u32 IOMEM *wqe_alloc_db;
u32 IOMEM *cq_arm_db;
u32 IOMEM *aeq_alloc_db;
@@ -649,8 +652,6 @@
u32 debug_mask;
u16 num_vfs;
u16 hmc_fn_id;
- u8 vf_id;
- bool vchnl_up:1;
bool ceq_valid:1;
u8 pci_rev;
int (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
@@ -666,7 +667,7 @@
u8 pbl_chunk_size;
u32 first_pm_pbl_idx;
bool virtual_map:1;
- bool check_overflow;
+ bool check_overflow:1;
bool cq_resize:1;
};
@@ -676,7 +677,7 @@
bool cq_num_valid:1;
bool arp_cache_idx_valid:1;
bool mac_valid:1;
- bool force_lpb;
+ bool force_lpb:1;
u8 next_iwarp_state;
};
@@ -709,7 +710,7 @@
u16 maj_err_code;
u16 min_err_code;
u8 op_code;
- bool error;
+ bool error:1;
};
struct irdma_qos_tc_info {
@@ -751,7 +752,7 @@
struct irdma_vsi_stats_info {
struct irdma_vsi_pestat *pestat;
u8 fcn_id;
- bool alloc_stats_inst;
+ bool alloc_stats_inst:1;
};
struct irdma_device_init_info {
@@ -789,7 +790,7 @@
u32 *aeqe_base;
void *pbl_list;
u32 elem_cnt;
- bool virtual_map;
+ bool virtual_map:1;
u8 pbl_chunk_size;
u32 first_pm_pbl_idx;
u32 msix_idx;
@@ -856,7 +857,6 @@
bool dcqcn_en:1;
bool rcv_no_icrc:1;
bool wr_rdresp_en:1;
- bool bind_en:1;
bool fast_reg_en:1;
bool priv_mode_en:1;
bool rd_en:1;
@@ -888,7 +888,6 @@
bool snd_mark_en:1;
bool rcv_mark_en:1;
bool wr_rdresp_en:1;
- bool bind_en:1;
bool fast_reg_en:1;
bool priv_mode_en:1;
bool rd_en:1;
@@ -1134,12 +1133,12 @@
u8 mac_addr[ETHER_ADDR_LEN];
u32 reach_max;
u16 arp_index;
- bool permanent;
+ bool permanent:1;
};
struct irdma_apbvt_info {
u16 port;
- bool add;
+ bool add:1;
};
struct irdma_qhash_table_info {
diff --git a/sys/dev/irdma/irdma_uda.h b/sys/dev/irdma/irdma_uda.h
--- a/sys/dev/irdma/irdma_uda.h
+++ b/sys/dev/irdma/irdma_uda.h
@@ -43,6 +43,7 @@
struct irdma_ah_info {
struct irdma_sc_vsi *vsi;
+ struct irdma_cqp_request *cqp_request;
u32 pd_idx;
u32 dst_arpindex;
u32 dest_ip_addr[4];
diff --git a/sys/dev/irdma/irdma_uk.c b/sys/dev/irdma/irdma_uk.c
--- a/sys/dev/irdma/irdma_uk.c
+++ b/sys/dev/irdma/irdma_uk.c
@@ -45,16 +45,16 @@
* @valid: The wqe valid
*/
static void
-irdma_set_fragment(__le64 * wqe, u32 offset, struct irdma_sge *sge,
+irdma_set_fragment(__le64 * wqe, u32 offset, struct ib_sge *sge,
u8 valid)
{
if (sge) {
set_64bit_val(wqe, offset,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
FIELD_PREP(IRDMAQPSQ_VALID, valid) |
- FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
- FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
@@ -71,14 +71,14 @@
*/
static void
irdma_set_fragment_gen_1(__le64 * wqe, u32 offset,
- struct irdma_sge *sge, u8 valid)
+ struct ib_sge *sge, u8 valid)
{
if (sge) {
set_64bit_val(wqe, offset,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
set_64bit_val(wqe, offset + IRDMA_BYTE_8,
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + IRDMA_BYTE_8, 0);
@@ -209,8 +209,7 @@
if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
!qp->push_mode) {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
+ irdma_uk_qp_post_wr(qp);
} else {
push = (__le64 *) ((uintptr_t)qp->push_wqe +
(wqe_idx & 0x7) * 0x20);
@@ -338,7 +337,7 @@
return -EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
read_fence |= info->read_fence;
@@ -357,7 +356,7 @@
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
set_64bit_val(wqe, IRDMA_BYTE_16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
if (info->imm_data_valid) {
set_64bit_val(wqe, IRDMA_BYTE_0,
@@ -386,7 +385,7 @@
++addl_frag_cnt;
}
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
@@ -437,7 +436,7 @@
return -EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
if (ret_code)
@@ -475,8 +474,8 @@
++addl_frag_cnt;
}
set_64bit_val(wqe, IRDMA_BYTE_16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
FIELD_PREP(IRDMAQPSQ_OPCODE,
@@ -525,7 +524,7 @@
return -EINVAL;
for (i = 0; i < op_info->num_sges; i++)
- total_size += op_info->sg_list[i].len;
+ total_size += op_info->sg_list[i].length;
if (info->imm_data_valid)
frag_cnt = op_info->num_sges + 1;
@@ -604,15 +603,15 @@
* @polarity: compatibility parameter
*/
static void
-irdma_copy_inline_data_gen_1(u8 *wqe, struct irdma_sge *sge_list,
+irdma_copy_inline_data_gen_1(u8 *wqe, struct ib_sge *sge_list,
u32 num_sges, u8 polarity)
{
u32 quanta_bytes_remaining = 16;
u32 i;
for (i = 0; i < num_sges; i++) {
- u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
- u32 sge_len = sge_list[i].len;
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
while (sge_len) {
u32 bytes_copied;
@@ -651,7 +650,7 @@
* @polarity: polarity of wqe valid bit
*/
static void
-irdma_copy_inline_data(u8 *wqe, struct irdma_sge *sge_list,
+irdma_copy_inline_data(u8 *wqe, struct ib_sge *sge_list,
u32 num_sges, u8 polarity)
{
u8 inline_valid = polarity << IRDMA_INLINE_VALID_S;
@@ -662,8 +661,8 @@
wqe += 8;
for (i = 0; i < num_sges; i++) {
- u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].tag_off;
- u32 sge_len = sge_list[i].len;
+ u8 *cur_sge = (u8 *)(uintptr_t)sge_list[i].addr;
+ u32 sge_len = sge_list[i].length;
while (sge_len) {
u32 bytes_copied;
@@ -743,7 +742,7 @@
return -EINVAL;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
if (unlikely(total_size > qp->max_inline_data))
return -EINVAL;
@@ -756,9 +755,9 @@
qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
read_fence |= info->read_fence;
set_64bit_val(wqe, IRDMA_BYTE_16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, total_size) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
@@ -814,7 +813,7 @@
return -EINVAL;
for (i = 0; i < op_info->num_sges; i++)
- total_size += op_info->sg_list[i].len;
+ total_size += op_info->sg_list[i].length;
if (unlikely(total_size > qp->max_inline_data))
return -EINVAL;
@@ -879,7 +878,7 @@
u64 hdr;
u32 wqe_idx;
bool local_fence = false;
- struct irdma_sge sge = {0};
+ struct ib_sge sge = {0};
u16 quanta = IRDMA_QP_WQE_MIN_QUANTA;
info->push_wqe = qp->push_db ? true : false;
@@ -890,7 +889,7 @@
if (!wqe)
return -ENOSPC;
- sge.stag = op_info->target_stag;
+ sge.lkey = op_info->target_stag;
qp->wqe_ops.iw_set_fragment(wqe, IRDMA_BYTE_0, &sge, 0);
set_64bit_val(wqe, IRDMA_BYTE_16, 0);
@@ -1327,8 +1326,7 @@
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
- memset(info, 0,
- sizeof(struct irdma_cq_poll_info));
+ memset(info, 0, sizeof(*info));
return irdma_uk_cq_poll_cmpl(cq, info);
}
}
@@ -1484,10 +1482,12 @@
int
irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
*sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
- if (*sqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
- *sqdepth = uk_attrs->min_hw_wq_size << shift;
+ if (*sqdepth < min_size)
+ *sqdepth = min_size;
else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
return -EINVAL;
@@ -1501,10 +1501,12 @@
int
irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
{
+ u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+
*rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
- if (*rqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
- *rqdepth = uk_attrs->min_hw_wq_size << shift;
+ if (*rqdepth < min_size)
+ *rqdepth = min_size;
else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
return -EINVAL;
@@ -1535,9 +1537,10 @@
{
u16 move_cnt = 1;
- if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
+ if (info->start_wqe_idx)
+ move_cnt = info->start_wqe_idx;
+ else if (qp->uk_attrs->feature_flags & IRDMA_FEATURE_RTS_AE)
move_cnt = 3;
-
qp->conn_wqes = move_cnt;
IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt);
IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt);
@@ -1582,6 +1585,7 @@
{
bool imm_support = ukinfo->uk_attrs->hw_rev >= IRDMA_GEN_2 ? true : false;
int status;
+
irdma_get_wqe_shift(ukinfo->uk_attrs,
imm_support ? ukinfo->max_sq_frag_cnt + 1 :
ukinfo->max_sq_frag_cnt,
@@ -1676,6 +1680,8 @@
qp->wqe_ops = iw_wqe_uk_ops_gen_1;
else
qp->wqe_ops = iw_wqe_uk_ops;
+ qp->start_wqe_idx = info->start_wqe_idx;
+
return ret_code;
}
diff --git a/sys/dev/irdma/irdma_user.h b/sys/dev/irdma/irdma_user.h
--- a/sys/dev/irdma/irdma_user.h
+++ b/sys/dev/irdma/irdma_user.h
@@ -35,6 +35,8 @@
#ifndef IRDMA_USER_H
#define IRDMA_USER_H
+#include <rdma/ib_verbs.h>
+
#define irdma_handle void *
#define irdma_adapter_handle irdma_handle
#define irdma_qp_handle irdma_handle
@@ -48,7 +50,7 @@
#define irdma_access_privileges u32
#define irdma_physical_fragment u64
#define irdma_address_list u64 *
-#define irdma_sgl struct irdma_sge *
+#define irdma_sgl struct ib_sge *
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
@@ -78,8 +80,6 @@
#define IRDMA_OP_TYPE_REC_IMM 0x3f
#define IRDMA_FLUSH_MAJOR_ERR 1
-#define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe
-
/* Async Events codes */
#define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102
#define IRDMA_AE_AMP_INVALID_STAG 0x0103
@@ -140,6 +140,7 @@
#define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313
#define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314
#define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316
+#define IRDMA_AE_ROCE_REQ_LENGTH_ERROR 0x0318
#define IRDMA_AE_ROCE_EMPTY_MCG 0x0380
#define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381
#define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382
@@ -160,6 +161,7 @@
#define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b
#define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c
#define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e
+#define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f
#define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520
#define IRDMA_AE_RESET_SENT 0x0601
#define IRDMA_AE_TERMINATE_SENT 0x0602
@@ -199,8 +201,7 @@
IRDMA_MAX_OUTBOUND_MSG_SIZE = 65537,
/* 64K +1 */
IRDMA_MAX_INBOUND_MSG_SIZE = 65537,
- IRDMA_MAX_PUSH_PAGE_COUNT = 1024,
- IRDMA_MAX_PE_ENA_VF_COUNT = 32,
+ IRDMA_MAX_PE_ENA_VF_COUNT = 32,
IRDMA_MAX_VF_FPM_ID = 47,
IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496,
IRDMA_MAX_INLINE_DATA_SIZE = 101,
@@ -227,6 +228,7 @@
FLUSH_RETRY_EXC_ERR,
FLUSH_MW_BIND_ERR,
FLUSH_REM_INV_REQ_ERR,
+ FLUSH_RNR_RETRY_EXC_ERR,
};
enum irdma_qp_event_type {
@@ -280,12 +282,6 @@
struct irdma_qp_uk_init_info;
struct irdma_cq_uk_init_info;
-struct irdma_sge {
- irdma_tagged_offset tag_off;
- u32 len;
- irdma_stag stag;
-};
-
struct irdma_ring {
volatile u32 head;
volatile u32 tail; /* effective tail */
@@ -317,13 +313,13 @@
struct irdma_rdma_write {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
- struct irdma_sge rem_addr;
+ struct ib_sge rem_addr;
};
struct irdma_rdma_read {
irdma_sgl lo_sg_list;
u32 num_lo_sges;
- struct irdma_sge rem_addr;
+ struct ib_sge rem_addr;
};
struct irdma_bind_window {
@@ -422,9 +418,9 @@
bool post_sq);
struct irdma_wqe_uk_ops {
- void (*iw_copy_inline_data)(u8 *dest, struct irdma_sge *sge_list, u32 num_sges, u8 polarity);
+ void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list, u32 num_sges, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
- void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
+ void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
u8 valid);
void (*iw_set_mw_bind_wqe)(__le64 *wqe,
struct irdma_bind_window *op_info);
@@ -490,6 +486,7 @@
u8 rwqe_polarity;
u8 rq_wqe_size;
u8 rq_wqe_size_multiplier;
+ u8 start_wqe_idx;
bool deferred_flag:1;
bool push_mode:1; /* whether the last post wqe was pushed */
bool push_dropped:1;
@@ -537,6 +534,7 @@
u32 sq_depth;
u32 rq_depth;
u8 first_sq_wq;
+ u8 start_wqe_idx;
u8 type;
u8 sq_shift;
u8 rq_shift;
@@ -625,10 +623,15 @@
case IRDMA_AE_LLP_SEGMENT_TOO_SMALL:
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
case IRDMA_AE_ROCE_RSP_LENGTH_ERROR:
+ case IRDMA_AE_ROCE_REQ_LENGTH_ERROR:
case IRDMA_AE_IB_REMOTE_OP_ERROR:
qp_err.flush_code = FLUSH_REM_OP_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
break;
+ case IRDMA_AE_LLP_TOO_MANY_RNRS:
+ qp_err.flush_code = FLUSH_RNR_RETRY_EXC_ERR;
+ qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
+ break;
case IRDMA_AE_LCE_QP_CATASTROPHIC:
qp_err.flush_code = FLUSH_FATAL_ERR;
qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC;
diff --git a/sys/dev/irdma/irdma_utils.c b/sys/dev/irdma/irdma_utils.c
--- a/sys/dev/irdma/irdma_utils.c
+++ b/sys/dev/irdma/irdma_utils.c
@@ -164,6 +164,7 @@
"Connection error: Doubt reachability (usually occurs after the max number of retries has been reached)"},
{IRDMA_AE_LLP_CONNECTION_ESTABLISHED,
"iWARP event: Connection established"},
+ {IRDMA_AE_LLP_TOO_MANY_RNRS, "RoCEv2: Too many RNR NACKs"},
{IRDMA_AE_RESOURCE_EXHAUSTION,
"QP error: Resource exhaustion"},
{IRDMA_AE_RESET_SENT,
@@ -437,11 +438,11 @@
irdma_free_pending_cqp_request(struct irdma_cqp *cqp,
struct irdma_cqp_request *cqp_request)
{
- if (cqp_request->waiting) {
- cqp_request->compl_info.error = true;
- WRITE_ONCE(cqp_request->request_done, true);
+ cqp_request->compl_info.error = true;
+ WRITE_ONCE(cqp_request->request_done, true);
+
+ if (cqp_request->waiting)
wake_up(&cqp_request->waitq);
- }
wait_event_timeout(cqp->remove_wq,
atomic_read(&cqp_request->refcnt) == 1, 1000);
irdma_put_cqp_request(cqp, cqp_request);
@@ -558,8 +559,6 @@
[IRDMA_OP_MANAGE_HMC_PM_FUNC_TABLE] = "Manage HMC PM Function Table Cmd",
[IRDMA_OP_SUSPEND] = "Suspend QP Cmd",
[IRDMA_OP_RESUME] = "Resume QP Cmd",
- [IRDMA_OP_MANAGE_VCHNL_REQ_PBLE_BP] =
- "Manage Virtual Channel Requester Function PBLE Backing Pages Cmd",
[IRDMA_OP_QUERY_FPM_VAL] = "Query FPM Values Cmd",
[IRDMA_OP_COMMIT_FPM_VAL] = "Commit FPM Values Cmd",
[IRDMA_OP_AH_CREATE] = "Create Address Handle Cmd",
@@ -591,7 +590,7 @@
{0xffff, 0x8007, "Modify QP Bad Close"},
{0xffff, 0x8009, "LLP Closed"},
{0xffff, 0x800a, "Reset Not Sent"},
- {0xffff, 0x200, "Failover Pending"}
+ {0xffff, 0x0200, "Failover Pending"},
};
/**
@@ -1055,15 +1054,16 @@
/**
* irdma_dealloc_push_page - free a push page for qp
* @rf: RDMA PCI function
- * @qp: hardware control qp
+ * @iwqp: QP pointer
*/
void
irdma_dealloc_push_page(struct irdma_pci_f *rf,
- struct irdma_sc_qp *qp)
+ struct irdma_qp *iwqp)
{
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
int status;
+ struct irdma_sc_qp *qp = &iwqp->sc_qp;
if (qp->push_idx == IRDMA_INVALID_PUSH_PAGE_INDEX)
return;
@@ -1564,16 +1564,6 @@
del_timer_sync(&devstat->stats_timer);
}
-/**
- * irdma_process_stats - Checking for wrap and update stats
- * @pestat: stats structure pointer
- */
-static inline void
-irdma_process_stats(struct irdma_vsi_pestat *pestat)
-{
- sc_vsi_update_stats(pestat->vsi);
-}
-
/**
* irdma_process_cqp_stats - Checking for wrap and update stats
* @cqp_request: cqp_request structure pointer
@@ -1583,7 +1573,7 @@
{
struct irdma_vsi_pestat *pestat = cqp_request->param;
- irdma_process_stats(pestat);
+ sc_vsi_update_stats(pestat->vsi);
}
/**
@@ -1619,7 +1609,7 @@
cqp_request->callback_fcn = irdma_process_cqp_stats;
status = irdma_handle_cqp_op(rf, cqp_request);
if (wait)
- irdma_process_stats(pestat);
+ sc_vsi_update_stats(pestat->vsi);
irdma_put_cqp_request(&rf->cqp, cqp_request);
return status;
@@ -1814,6 +1804,10 @@
cqp_info->cqp_cmd = cmd;
cqp_info->post_sq = 1;
if (cmd == IRDMA_OP_AH_CREATE) {
+ if (!wait)
+ irdma_get_cqp_request(cqp_request);
+ sc_ah->ah_info.cqp_request = cqp_request;
+
cqp_info->in.u.ah_create.info = sc_ah->ah_info;
cqp_info->in.u.ah_create.scratch = (uintptr_t)cqp_request;
cqp_info->in.u.ah_create.cqp = &rf->cqp.sc_cqp;
@@ -1948,21 +1942,6 @@
kfree(ah);
}
-/**
- * irdma_gsi_ud_qp_ah_cb - callback after creation of AH for GSI/ID QP
- * @cqp_request: pointer to cqp_request of create AH
- */
-void
-irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request)
-{
- struct irdma_sc_ah *sc_ah = cqp_request->param;
-
- if (!cqp_request->compl_info.op_ret_val)
- sc_ah->ah_info.ah_valid = true;
- else
- sc_ah->ah_info.ah_valid = false;
-}
-
/**
* irdma_prm_add_pble_mem - add moemory to pble resources
* @pprm: pble resource manager
@@ -2010,6 +1989,7 @@
struct list_head *chunk_entry = (&pprm->clist)->next;
u32 offset;
unsigned long flags;
+
*vaddr = NULL;
*fpm_addr = 0;
diff --git a/sys/dev/irdma/irdma_verbs.h b/sys/dev/irdma/irdma_verbs.h
--- a/sys/dev/irdma/irdma_verbs.h
+++ b/sys/dev/irdma/irdma_verbs.h
@@ -48,13 +48,7 @@
struct irdma_ucontext {
struct ib_ucontext ibucontext;
struct irdma_device *iwdev;
-#if __FreeBSD_version >= 1400026
struct rdma_user_mmap_entry *db_mmap_entry;
-#else
- struct irdma_user_mmap_entry *db_mmap_entry;
- DECLARE_HASHTABLE(mmap_hash_tbl, 6);
- spinlock_t mmap_tbl_lock; /* protect mmap hash table entries */
-#endif
struct list_head cq_reg_mem_list;
spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */
struct list_head qp_reg_mem_list;
@@ -215,13 +209,8 @@
struct irdma_cq *iwscq;
struct irdma_cq *iwrcq;
struct irdma_pd *iwpd;
-#if __FreeBSD_version >= 1400026
struct rdma_user_mmap_entry *push_wqe_mmap_entry;
struct rdma_user_mmap_entry *push_db_mmap_entry;
-#else
- struct irdma_user_mmap_entry *push_wqe_mmap_entry;
- struct irdma_user_mmap_entry *push_db_mmap_entry;
-#endif
struct irdma_qp_host_ctx_info ctx_info;
union {
struct irdma_iwarp_offload_info iwarp_info;
@@ -263,7 +252,7 @@
struct irdma_dma_mem host_ctx;
struct timer_list terminate_timer;
struct irdma_pbl *iwpbl;
- struct irdma_sge *sg_list;
+ struct ib_sge *sg_list;
struct irdma_dma_mem q2_ctx_mem;
struct irdma_dma_mem ietf_mem;
struct completion free_qp;
@@ -292,13 +281,7 @@
};
struct irdma_user_mmap_entry {
-#if __FreeBSD_version >= 1400026
struct rdma_user_mmap_entry rdma_entry;
-#else
- struct irdma_ucontext *ucontext;
- struct hlist_node hlist;
- u64 pgoff_key; /* Used to compute offset (in bytes) returned to user libc's mmap */
-#endif
u64 bar_offset;
u8 mmap_flag;
};
@@ -399,16 +382,9 @@
ether_addr_copy(mac, mac6);
}
-#if __FreeBSD_version >= 1400026
struct rdma_user_mmap_entry*
irdma_user_mmap_entry_insert(struct irdma_ucontext *ucontext, u64 bar_offset,
enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
-#else
-struct irdma_user_mmap_entry *
-irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset,
- enum irdma_mmap_flag mmap_flag, u64 *mmap_offset);
-void irdma_user_mmap_entry_del_hash(struct irdma_user_mmap_entry *entry);
-#endif
int irdma_ib_register_device(struct irdma_device *iwdev);
void irdma_ib_unregister_device(struct irdma_device *iwdev);
void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event);
diff --git a/sys/dev/irdma/irdma_verbs.c b/sys/dev/irdma/irdma_verbs.c
--- a/sys/dev/irdma/irdma_verbs.c
+++ b/sys/dev/irdma/irdma_verbs.c
@@ -72,7 +72,6 @@
props->max_cq = rf->max_cq - rf->used_cqs;
props->max_cqe = rf->max_cqe - 1;
props->max_mr = rf->max_mr - rf->used_mrs;
- props->max_mw = props->max_mr;
props->max_pd = rf->max_pd - rf->used_pds;
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
@@ -107,16 +106,10 @@
pfn = ((uintptr_t)ucontext->iwdev->rf->sc_dev.hw_regs[IRDMA_DB_ADDR_OFFSET] +
pci_resource_start(ucontext->iwdev->rf->pcidev, 0)) >> PAGE_SHIFT;
-#if __FreeBSD_version >= 1400026
return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot), NULL);
-#else
- return rdma_user_mmap_io(&ucontext->ibucontext, vma, pfn, PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
-#endif
}
-#if __FreeBSD_version >= 1400026
static void
irdma_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
{
@@ -149,104 +142,6 @@
return &entry->rdma_entry;
}
-#else
-static inline bool
-find_key_in_mmap_tbl(struct irdma_ucontext *ucontext, u64 key)
-{
- struct irdma_user_mmap_entry *entry;
-
- HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, key) {
- if (entry->pgoff_key == key)
- return true;
- }
-
- return false;
-}
-
-struct irdma_user_mmap_entry *
-irdma_user_mmap_entry_add_hash(struct irdma_ucontext *ucontext, u64 bar_offset,
- enum irdma_mmap_flag mmap_flag, u64 *mmap_offset)
-{
- struct irdma_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- unsigned long flags;
- int retry_cnt = 0;
-
- if (!entry)
- return NULL;
-
- entry->bar_offset = bar_offset;
- entry->mmap_flag = mmap_flag;
- entry->ucontext = ucontext;
- do {
- get_random_bytes(&entry->pgoff_key, sizeof(entry->pgoff_key));
-
- /* The key is a page offset */
- entry->pgoff_key >>= PAGE_SHIFT;
-
- /* In the event of a collision in the hash table, retry a new key */
- spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
- if (!find_key_in_mmap_tbl(ucontext, entry->pgoff_key)) {
- HASH_ADD(ucontext->mmap_hash_tbl, &entry->hlist, entry->pgoff_key);
- spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
- goto hash_add_done;
- }
- spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
- } while (retry_cnt++ < 10);
-
- irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "mmap table add failed: Cannot find a unique key\n");
- kfree(entry);
- return NULL;
-
-hash_add_done:
- /* libc mmap uses a byte offset */
- *mmap_offset = entry->pgoff_key << PAGE_SHIFT;
-
- return entry;
-}
-
-static struct irdma_user_mmap_entry *
-irdma_find_user_mmap_entry(struct irdma_ucontext *ucontext,
- struct vm_area_struct *vma)
-{
- struct irdma_user_mmap_entry *entry;
- unsigned long flags;
-
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
- return NULL;
-
- spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
- HASH_FOR_EACH_POSSIBLE(ucontext->mmap_hash_tbl, entry, hlist, vma->vm_pgoff) {
- if (entry->pgoff_key == vma->vm_pgoff) {
- spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
- return entry;
- }
- }
-
- spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
-
- return NULL;
-}
-
-void
-irdma_user_mmap_entry_del_hash(struct irdma_user_mmap_entry *entry)
-{
- struct irdma_ucontext *ucontext;
- unsigned long flags;
-
- if (!entry)
- return;
-
- ucontext = entry->ucontext;
-
- spin_lock_irqsave(&ucontext->mmap_tbl_lock, flags);
- HASH_DEL(ucontext->mmap_hash_tbl, &entry->hlist);
- spin_unlock_irqrestore(&ucontext->mmap_tbl_lock, flags);
-
- kfree(entry);
-}
-
-#endif
/**
* irdma_mmap - user memory map
* @context: context created during alloc
@@ -255,9 +150,7 @@
static int
irdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
-#if __FreeBSD_version >= 1400026
struct rdma_user_mmap_entry *rdma_entry;
-#endif
struct irdma_user_mmap_entry *entry;
struct irdma_ucontext *ucontext;
u64 pfn;
@@ -269,7 +162,6 @@
if (ucontext->legacy_mode)
return irdma_mmap_legacy(ucontext, vma);
-#if __FreeBSD_version >= 1400026
rdma_entry = rdma_user_mmap_entry_get(&ucontext->ibucontext, vma);
if (!rdma_entry) {
irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
@@ -279,15 +171,6 @@
}
entry = to_irdma_mmap_entry(rdma_entry);
-#else
- entry = irdma_find_user_mmap_entry(ucontext, vma);
- if (!entry) {
- irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
- "pgoff[0x%lx] does not have valid entry\n",
- vma->vm_pgoff);
- return -EINVAL;
- }
-#endif
irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"bar_offset [0x%lx] mmap_flag [%d]\n", entry->bar_offset,
entry->mmap_flag);
@@ -297,24 +180,14 @@
switch (entry->mmap_flag) {
case IRDMA_MMAP_IO_NC:
-#if __FreeBSD_version >= 1400026
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
pgprot_noncached(vma->vm_page_prot),
rdma_entry);
-#else
- ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
-#endif
break;
case IRDMA_MMAP_IO_WC:
-#if __FreeBSD_version >= 1400026
ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
pgprot_writecombine(vma->vm_page_prot),
rdma_entry);
-#else
- ret = rdma_user_mmap_io(context, vma, pfn, PAGE_SIZE,
- pgprot_writecombine(vma->vm_page_prot));
-#endif
break;
default:
ret = -EINVAL;
@@ -324,9 +197,7 @@
irdma_debug(&ucontext->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"bar_offset [0x%lx] mmap_flag[%d] err[%d]\n",
entry->bar_offset, entry->mmap_flag, ret);
-#if __FreeBSD_version >= 1400026
rdma_user_mmap_entry_put(rdma_entry);
-#endif
return ret;
}
@@ -428,19 +299,11 @@
irdma_remove_push_mmap_entries(struct irdma_qp *iwqp)
{
if (iwqp->push_db_mmap_entry) {
-#if __FreeBSD_version >= 1400026
rdma_user_mmap_entry_remove(iwqp->push_db_mmap_entry);
-#else
- irdma_user_mmap_entry_del_hash(iwqp->push_db_mmap_entry);
-#endif
iwqp->push_db_mmap_entry = NULL;
}
if (iwqp->push_wqe_mmap_entry) {
-#if __FreeBSD_version >= 1400026
rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
-#else
- irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry);
-#endif
iwqp->push_wqe_mmap_entry = NULL;
}
}
@@ -458,36 +321,19 @@
bar_off = irdma_compute_push_wqe_offset(iwdev, iwqp->sc_qp.push_idx);
-#if __FreeBSD_version >= 1400026
iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
bar_off, IRDMA_MMAP_IO_WC,
push_wqe_mmap_key);
-#else
- iwqp->push_wqe_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
- IRDMA_MMAP_IO_WC,
- push_wqe_mmap_key);
-#endif
if (!iwqp->push_wqe_mmap_entry)
return -ENOMEM;
/* push doorbell page */
bar_off += IRDMA_HW_PAGE_SIZE;
-#if __FreeBSD_version >= 1400026
iwqp->push_db_mmap_entry = irdma_user_mmap_entry_insert(ucontext,
bar_off, IRDMA_MMAP_IO_NC,
push_db_mmap_key);
-#else
-
- iwqp->push_db_mmap_entry = irdma_user_mmap_entry_add_hash(ucontext, bar_off,
- IRDMA_MMAP_IO_NC,
- push_db_mmap_key);
-#endif
if (!iwqp->push_db_mmap_entry) {
-#if __FreeBSD_version >= 1400026
rdma_user_mmap_entry_remove(iwqp->push_wqe_mmap_entry);
-#else
- irdma_user_mmap_entry_del_hash(iwqp->push_wqe_mmap_entry);
-#endif
return -ENOMEM;
}
@@ -535,11 +381,7 @@
struct irdma_qp_init_info *info,
struct ib_qp_init_attr *init_attr)
{
-#if __FreeBSD_version >= 1400026
struct irdma_ucontext *ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
-#else
- struct irdma_ucontext *ucontext = to_ucontext(iwqp->iwpd->ibpd.uobject->context);
-#endif
struct irdma_qp_uk_init_info *ukinfo = &info->qp_uk_init_info;
struct irdma_create_qp_req req = {0};
unsigned long flags;
@@ -598,6 +440,9 @@
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
}
+ if (req.comp_mask & IRDMA_CREATE_QP_USE_START_WQE_IDX &&
+ iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_RTS_AE)
+ ukinfo->start_wqe_idx = 4;
irdma_setup_virt_qp(iwdev, iwqp, info);
return 0;
@@ -740,7 +585,6 @@
roce_info->rd_en = true;
roce_info->wr_rdresp_en = true;
- roce_info->bind_en = true;
roce_info->dcqcn_en = false;
roce_info->rtomin = iwdev->roce_rtomin;
@@ -772,7 +616,6 @@
ether_addr_copy(iwarp_info->mac_addr, if_getlladdr(iwdev->netdev));
iwarp_info->rd_en = true;
iwarp_info->wr_rdresp_en = true;
- iwarp_info->bind_en = true;
iwarp_info->ecn_en = true;
iwarp_info->rtomin = 5;
@@ -803,6 +646,8 @@
if (init_attr->cap.max_inline_data > uk_attrs->max_hw_inline ||
init_attr->cap.max_send_sge > uk_attrs->max_hw_wq_frags ||
+ init_attr->cap.max_send_wr > uk_attrs->max_hw_wq_quanta ||
+ init_attr->cap.max_recv_wr > uk_attrs->max_hw_rq_quanta ||
init_attr->cap.max_recv_sge > uk_attrs->max_hw_wq_frags)
return -EINVAL;
@@ -857,8 +702,6 @@
}
if (iwqp->roce_info.rd_en)
acc_flags |= IB_ACCESS_REMOTE_READ;
- if (iwqp->roce_info.bind_en)
- acc_flags |= IB_ACCESS_MW_BIND;
} else {
if (iwqp->iwarp_info.wr_rdresp_en) {
acc_flags |= IB_ACCESS_LOCAL_WRITE;
@@ -866,8 +709,6 @@
}
if (iwqp->iwarp_info.rd_en)
acc_flags |= IB_ACCESS_REMOTE_READ;
- if (iwqp->iwarp_info.bind_en)
- acc_flags |= IB_ACCESS_MW_BIND;
}
return acc_flags;
}
@@ -1267,11 +1108,7 @@
if (udata && udata->outlen && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
-#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
-#else
- ucontext = to_ucontext(ibqp->uobject->context);
-#endif
if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
!iwqp->push_wqe_mmap_entry &&
!irdma_setup_push_mmap_entries(ucontext, iwqp,
@@ -1284,7 +1121,8 @@
udata->outlen));
if (ret) {
irdma_remove_push_mmap_entries(iwqp);
- irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
+ irdma_debug(&iwdev->rf->sc_dev,
+ IRDMA_DEBUG_VERBS,
"copy_to_udata failed\n");
return ret;
}
@@ -1517,11 +1355,7 @@
dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) {
struct irdma_ucontext *ucontext;
-#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
-#else
- ucontext = to_ucontext(ibqp->uobject->context);
-#endif
if (iwqp->sc_qp.push_idx != IRDMA_INVALID_PUSH_PAGE_INDEX &&
!iwqp->push_wqe_mmap_entry &&
!irdma_setup_push_mmap_entries(ucontext, iwqp,
@@ -1660,11 +1494,7 @@
if (udata) {
struct irdma_resize_cq_req req = {};
struct irdma_ucontext *ucontext =
-#if __FreeBSD_version >= 1400026
rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
-#else
- to_ucontext(ibcq->uobject->context);
-#endif
/* CQ resize not supported with legacy GEN_1 libi40iw */
if (ucontext->legacy_mode)
@@ -1758,8 +1588,10 @@
/**
* irdma_get_mr_access - get hw MR access permissions from IB access flags
* @access: IB access flags
+ * @hw_rev: Hardware version
*/
-static inline u16 irdma_get_mr_access(int access){
+static inline u16 irdma_get_mr_access(int access, u8 hw_rev)
+{
u16 hw_access = 0;
hw_access |= (access & IB_ACCESS_LOCAL_WRITE) ?
@@ -1768,8 +1600,6 @@
IRDMA_ACCESS_FLAGS_REMOTEWRITE : 0;
hw_access |= (access & IB_ACCESS_REMOTE_READ) ?
IRDMA_ACCESS_FLAGS_REMOTEREAD : 0;
- hw_access |= (access & IB_ACCESS_MW_BIND) ?
- IRDMA_ACCESS_FLAGS_BIND_WINDOW : 0;
hw_access |= (access & IB_ZERO_BASED) ?
IRDMA_ACCESS_FLAGS_ZERO_BASED : 0;
hw_access |= IRDMA_ACCESS_FLAGS_LOCALREAD;
@@ -2004,81 +1834,6 @@
return err;
}
-/**
- * irdma_hw_alloc_mw - create the hw memory window
- * @iwdev: irdma device
- * @iwmr: pointer to memory window info
- */
-int
-irdma_hw_alloc_mw(struct irdma_device *iwdev, struct irdma_mr *iwmr)
-{
- struct irdma_mw_alloc_info *info;
- struct irdma_pd *iwpd = to_iwpd(iwmr->ibmr.pd);
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- int status;
-
- cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- info = &cqp_info->in.u.mw_alloc.info;
- memset(info, 0, sizeof(*info));
- if (iwmr->ibmw.type == IB_MW_TYPE_1)
- info->mw_wide = true;
-
- info->page_size = PAGE_SIZE;
- info->mw_stag_index = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S;
- info->pd_id = iwpd->sc_pd.pd_id;
- info->remote_access = true;
- cqp_info->cqp_cmd = IRDMA_OP_MW_ALLOC;
- cqp_info->post_sq = 1;
- cqp_info->in.u.mw_alloc.dev = &iwdev->rf->sc_dev;
- cqp_info->in.u.mw_alloc.scratch = (uintptr_t)cqp_request;
- status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
- irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
-
- return status;
-}
-
-/**
- * irdma_dealloc_mw - Dealloc memory window
- * @ibmw: memory window structure.
- */
-static int
-irdma_dealloc_mw(struct ib_mw *ibmw)
-{
- struct ib_pd *ibpd = ibmw->pd;
- struct irdma_pd *iwpd = to_iwpd(ibpd);
- struct irdma_mr *iwmr = to_iwmr((struct ib_mr *)ibmw);
- struct irdma_device *iwdev = to_iwdev(ibmw->device);
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- struct irdma_dealloc_stag_info *info;
-
- cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, true);
- if (!cqp_request)
- return -ENOMEM;
-
- cqp_info = &cqp_request->info;
- info = &cqp_info->in.u.dealloc_stag.info;
- memset(info, 0, sizeof(*info));
- info->pd_id = iwpd->sc_pd.pd_id;
- info->stag_idx = RS_64_1(ibmw->rkey, IRDMA_CQPSQ_STAG_IDX_S);
- info->mr = false;
- cqp_info->cqp_cmd = IRDMA_OP_DEALLOC_STAG;
- cqp_info->post_sq = 1;
- cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
- cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
- irdma_handle_cqp_op(iwdev->rf, cqp_request);
- irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
- irdma_free_stag(iwdev, iwmr->stag);
- kfree(iwmr);
-
- return 0;
-}
-
/**
* irdma_hw_alloc_stag - cqp command to allocate stag
* @iwdev: irdma device
@@ -2199,7 +1954,8 @@
stag_info->stag_key = (u8)iwmr->stag;
stag_info->total_len = iwmr->len;
stag_info->all_memory = (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) ? true : false;
- stag_info->access_rights = irdma_get_mr_access(access);
+ stag_info->access_rights = irdma_get_mr_access(access,
+ iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev);
stag_info->pd_id = iwpd->sc_pd.pd_id;
if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED)
stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED;
@@ -2273,14 +2029,18 @@
}
/*
- * irdma_reg_user_mr_type_mem - Handle memory registration @iwmr - irdma mr @access - access rights
+ * irdma_reg_user_mr_type_mem - Handle memory registration
+ * @iwmr - irdma mr
+ * @access - access rights
+ * @create_stag - flag to create stag or not
*/
static int
-irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access)
+irdma_reg_user_mr_type_mem(struct irdma_mr *iwmr, int access,
+ bool create_stag)
{
struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
- u32 stag;
+ u32 stag = 0;
int err;
u8 lvl;
@@ -2299,15 +2059,17 @@
}
}
- stag = irdma_create_stag(iwdev);
- if (!stag) {
- err = -ENOMEM;
- goto free_pble;
- }
+ if (create_stag) {
+ stag = irdma_create_stag(iwdev);
+ if (!stag) {
+ err = -ENOMEM;
+ goto free_pble;
+ }
- iwmr->stag = stag;
- iwmr->ibmr.rkey = stag;
- iwmr->ibmr.lkey = stag;
+ iwmr->stag = stag;
+ iwmr->ibmr.rkey = stag;
+ iwmr->ibmr.lkey = stag;
+ }
iwmr->access = access;
err = irdma_hwreg_mr(iwdev, iwmr, access);
if (err)
@@ -2316,7 +2078,8 @@
return 0;
err_hwreg:
- irdma_free_stag(iwdev, stag);
+ if (stag)
+ irdma_free_stag(iwdev, stag);
free_pble:
if (iwpbl->pble_alloc.level != PBLE_LEVEL_0 && iwpbl->pbl_allocated)
@@ -2351,11 +2114,7 @@
if (err)
return err;
-#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
-#else
- ucontext = to_ucontext(iwmr->ibpd.pd->uobject->context);
-#endif
spin_lock_irqsave(&ucontext->qp_reg_mem_list_lock, flags);
list_add_tail(&iwpbl->list, &ucontext->qp_reg_mem_list);
iwpbl->on_list = true;
@@ -2390,11 +2149,7 @@
if (err)
return err;
-#if __FreeBSD_version >= 1400026
ucontext = rdma_udata_to_drv_context(udata, struct irdma_ucontext, ibucontext);
-#else
- ucontext = to_ucontext(iwmr->ibmr.pd->uobject->context);
-#endif
spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags);
list_add_tail(&iwpbl->list, &ucontext->cq_reg_mem_list);
iwpbl->on_list = true;
@@ -2463,7 +2218,7 @@
break;
case IRDMA_MEMREG_TYPE_MEM:
- err = irdma_reg_user_mr_type_mem(iwmr, access);
+ err = irdma_reg_user_mr_type_mem(iwmr, access, true);
if (err)
goto error;
@@ -2540,10 +2295,8 @@
{
struct irdma_device *iwdev = to_iwdev(iwmr->ibmr.device);
struct irdma_pbl *iwpbl = &iwmr->iwpbl;
- struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct ib_pd *pd = iwmr->ibmr.pd;
struct ib_umem *region;
- u8 lvl;
int err;
region = ib_umem_get(pd->uobject->context, start, len, iwmr->access, 0);
@@ -2564,35 +2317,14 @@
iwmr->page_cnt = irdma_ib_umem_num_dma_blocks(region, iwmr->page_size,
virt);
- lvl = iwmr->page_cnt != 1 ? PBLE_LEVEL_1 | PBLE_LEVEL_2 : PBLE_LEVEL_0;
-
- err = irdma_setup_pbles(iwdev->rf, iwmr, lvl);
- if (err)
- goto error;
-
- if (lvl) {
- err = irdma_check_mr_contiguous(palloc,
- iwmr->page_size);
- if (err) {
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
- iwpbl->pbl_allocated = false;
- }
- }
-
- err = irdma_hwreg_mr(iwdev, iwmr, iwmr->access);
+ err = irdma_reg_user_mr_type_mem(iwmr, iwmr->access, false);
if (err)
- goto error;
+ goto err;
return &iwmr->ibmr;
-error:
- if (palloc->level != PBLE_LEVEL_0 && iwpbl->pbl_allocated) {
- irdma_free_pble(iwdev->rf->pble_rsrc, palloc);
- iwpbl->pbl_allocated = false;
- }
+err:
ib_umem_release(region);
- iwmr->region = NULL;
-
return ERR_PTR(err);
}
@@ -2699,25 +2431,6 @@
}
}
-/**
- * irdma_copy_sg_list - copy sg list for qp
- * @sg_list: copied into sg_list
- * @sgl: copy from sgl
- * @num_sges: count of sg entries
- */
-static void
-irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
- int num_sges)
-{
- unsigned int i;
-
- for (i = 0; i < num_sges; i++) {
- sg_list[i].tag_off = sgl[i].addr;
- sg_list[i].len = sgl[i].length;
- sg_list[i].stag = sgl[i].lkey;
- }
-}
-
/**
* irdma_post_send - kernel application wr
* @ibqp: qp ptr for wr
@@ -2778,7 +2491,7 @@
}
info.op.send.num_sges = ib_wr->num_sge;
- info.op.send.sg_list = (struct irdma_sge *)ib_wr->sg_list;
+ info.op.send.sg_list = ib_wr->sg_list;
if (iwqp->ibqp.qp_type == IB_QPT_UD ||
iwqp->ibqp.qp_type == IB_QPT_GSI) {
ah = to_iwah(ud_wr(ib_wr)->ah);
@@ -2809,8 +2522,8 @@
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
- info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
if (ib_wr->send_flags & IB_SEND_INLINE)
err = irdma_uk_inline_rdma_write(ukqp, &info, false);
else
@@ -2826,8 +2539,8 @@
break;
}
info.op_type = IRDMA_OP_TYPE_RDMA_READ;
- info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
err = irdma_uk_rdma_read(ukqp, &info, inv_stag, false);
@@ -2845,7 +2558,9 @@
stag_info.signaled = info.signaled;
stag_info.read_fence = info.read_fence;
- stag_info.access_rights = irdma_get_mr_access(reg_wr(ib_wr)->access);
+ stag_info.access_rights =
+ irdma_get_mr_access(reg_wr(ib_wr)->access,
+ dev->hw_attrs.uk_attrs.hw_rev);
stag_info.stag_key = reg_wr(ib_wr)->key & 0xff;
stag_info.stag_idx = reg_wr(ib_wr)->key >> 8;
stag_info.page_size = reg_wr(ib_wr)->mr->page_size;
@@ -2907,7 +2622,6 @@
struct irdma_qp *iwqp = to_iwqp(ibqp);
struct irdma_qp_uk *ukqp = &iwqp->sc_qp.qp_uk;
struct irdma_post_rq_info post_recv = {0};
- struct irdma_sge *sg_list = iwqp->sg_list;
unsigned long flags;
int err = 0;
@@ -2920,8 +2634,7 @@
}
post_recv.num_sges = ib_wr->num_sge;
post_recv.wr_id = ib_wr->wr_id;
- irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
- post_recv.sg_list = sg_list;
+ post_recv.sg_list = ib_wr->sg_list;
err = irdma_uk_post_receive(ukqp, &post_recv);
if (err) {
irdma_debug(&iwqp->iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
@@ -3336,7 +3049,7 @@
if (!ipv6_addr_v4mapped((struct in6_addr *)ibgid)) {
irdma_copy_ip_ntohl(ip_addr,
sgid_addr.saddr_in6.sin6_addr.__u6_addr.__u6_addr32);
- irdma_netdev_vlan_ipv6(iwqp->cm_id, ip_addr, &vlan_id, NULL);
+ irdma_get_vlan_mac_ipv6(iwqp->cm_id, ip_addr, &vlan_id, NULL);
ipv4 = false;
irdma_debug(&iwdev->rf->sc_dev, IRDMA_DEBUG_VERBS,
"qp_id=%d, IP6address=%x:%x:%x:%x\n", ibqp->qp_num,
@@ -3539,9 +3252,7 @@
return 0;
}
-static if_t
-irdma_get_netdev(struct ib_device *ibdev, u8 port_num)
-{
+static if_t irdma_get_netdev(struct ib_device *ibdev, u8 port_num){
struct irdma_device *iwdev = to_iwdev(ibdev);
if (iwdev->netdev) {
@@ -3557,7 +3268,6 @@
{
struct ib_device *dev_ops = ibdev;
-#if __FreeBSD_version >= 1400000
dev_ops->ops.driver_id = RDMA_DRIVER_I40IW;
dev_ops->ops.size_ib_ah = IRDMA_SET_RDMA_OBJ_SIZE(ib_ah, irdma_ah, ibah);
dev_ops->ops.size_ib_cq = IRDMA_SET_RDMA_OBJ_SIZE(ib_cq, irdma_cq, ibcq);
@@ -3566,15 +3276,12 @@
irdma_ucontext,
ibucontext);
-#endif /* __FreeBSD_version >= 1400000 */
dev_ops->alloc_hw_stats = irdma_alloc_hw_stats;
dev_ops->alloc_mr = irdma_alloc_mr;
- dev_ops->alloc_mw = irdma_alloc_mw;
dev_ops->alloc_pd = irdma_alloc_pd;
dev_ops->alloc_ucontext = irdma_alloc_ucontext;
dev_ops->create_cq = irdma_create_cq;
dev_ops->create_qp = irdma_create_qp;
- dev_ops->dealloc_mw = irdma_dealloc_mw;
dev_ops->dealloc_pd = irdma_dealloc_pd;
dev_ops->dealloc_ucontext = irdma_dealloc_ucontext;
dev_ops->dereg_mr = irdma_dereg_mr;
@@ -3587,9 +3294,7 @@
dev_ops->get_netdev = irdma_get_netdev;
dev_ops->map_mr_sg = irdma_map_mr_sg;
dev_ops->mmap = irdma_mmap;
-#if __FreeBSD_version >= 1400026
dev_ops->mmap_free = irdma_mmap_free;
-#endif
dev_ops->poll_cq = irdma_poll_cq;
dev_ops->post_recv = irdma_post_recv;
dev_ops->post_send = irdma_post_send;
@@ -3607,6 +3312,7 @@
irdma_set_device_mcast_ops(struct ib_device *ibdev)
{
struct ib_device *dev_ops = ibdev;
+
dev_ops->attach_mcast = irdma_attach_mcast;
dev_ops->detach_mcast = irdma_detach_mcast;
}
@@ -3615,6 +3321,7 @@
irdma_set_device_roce_ops(struct ib_device *ibdev)
{
struct ib_device *dev_ops = ibdev;
+
dev_ops->create_ah = irdma_create_ah;
dev_ops->destroy_ah = irdma_destroy_ah;
dev_ops->get_link_layer = irdma_get_link_layer;
diff --git a/sys/dev/irdma/irdma_ws.h b/sys/dev/irdma/irdma_ws.h
--- a/sys/dev/irdma/irdma_ws.h
+++ b/sys/dev/irdma/irdma_ws.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2022 Intel Corporation
+ * Copyright (c) 2015 - 2023 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/sys/dev/irdma/irdma_ws.c b/sys/dev/irdma/irdma_ws.c
--- a/sys/dev/irdma/irdma_ws.c
+++ b/sys/dev/irdma/irdma_ws.c
@@ -57,7 +57,7 @@
struct irdma_ws_node *node;
u16 node_index = 0;
- ws_mem.size = sizeof(struct irdma_ws_node);
+ ws_mem.size = sizeof(*node);
ws_mem.va = kzalloc(ws_mem.size, GFP_KERNEL);
if (!ws_mem.va)
return NULL;
@@ -109,7 +109,7 @@
irdma_free_ws_node_id(vsi->dev, node->index);
ws_mem.va = node;
- ws_mem.size = sizeof(struct irdma_ws_node);
+ ws_mem.size = sizeof(*node);
kfree(ws_mem.va);
}
diff --git a/sys/dev/irdma/osdep.h b/sys/dev/irdma/osdep.h
--- a/sys/dev/irdma/osdep.h
+++ b/sys/dev/irdma/osdep.h
@@ -85,7 +85,7 @@
#define STATS_TIMER_DELAY 60000
/* a couple of linux size defines */
-#define SZ_128 128
+#define SZ_128 128
#define SPEED_1000 1000
#define SPEED_10000 10000
#define SPEED_20000 20000
@@ -95,17 +95,11 @@
#define irdma_mb() mb()
#define irdma_wmb() wmb()
-#ifndef smp_mb
-#define smp_mb() mb()
-#endif
#define irdma_get_virt_to_phy vtophys
#define __aligned_u64 uint64_t __aligned(8)
#define VLAN_PRIO_SHIFT 13
-#if __FreeBSD_version < 1400000
-#define IB_USER_VERBS_EX_CMD_MODIFY_QP IB_USER_VERBS_CMD_MODIFY_QP
-#endif
/*
* debug definition section
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Thu, Jan 9, 3:54 AM (7 h, 11 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15729172
Default Alt Text
D43567.diff (143 KB)
Attached To
Mode
D43567: irdma(4): upgrade to 1.2.36-k
Attached
Detach File
Event Timeline
Log In to Comment