Skip to content

Commit 8218c4d

Browse files
committed
libxscale: Add support for qp management
This patch adds support for following qp management verbs: 1. create_qp 2. query_qp 3. modify_qp 4. destroy_qp Signed-off-by: Tian Xin <tianx@yunsilicon.com> Signed-off-by: Wei Honggang <weihg@yunsilicon.com> Signed-off-by: Zhao Qianwei <zhaoqw@yunsilicon.com> Signed-off-by: Li Qiang <liq@yunsilicon.com> Signed-off-by: Yan Lei <jacky@yunsilicon.com>
1 parent 27e1833 commit 8218c4d

File tree

5 files changed

+831
-10
lines changed

5 files changed

+831
-10
lines changed

providers/xscale/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ rdma_provider(xscale
22
xscale.c
33
verbs.c
44
cq.c
5+
qp.c
56
xsc_hsi.c
67
buf.c
78
)

providers/xscale/cq.c

Lines changed: 90 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -76,16 +76,6 @@ static inline u8 xsc_get_cqe_opcode(struct xsc_context *ctx,
7676
return xsc_msg_opcode[msg_opcode][cqe->type][cqe->with_immdt];
7777
}
7878

79-
struct xsc_qp *xsc_find_qp(struct xsc_context *ctx, u32 qpn)
80-
{
81-
int tind = qpn >> XSC_QP_TABLE_SHIFT;
82-
83-
if (ctx->qp_table[tind].refcnt)
84-
return ctx->qp_table[tind].table[qpn & XSC_QP_TABLE_MASK];
85-
else
86-
return NULL;
87-
}
88-
8979
static inline int get_qp_ctx(struct xsc_context *xctx,
9080
struct xsc_resource **cur_rsc,
9181
u32 qpn) ALWAYS_INLINE;
@@ -529,3 +519,93 @@ int xsc_free_cq_buf(struct xsc_context *ctx, struct xsc_buf *buf)
529519
{
530520
return xsc_free_actual_buf(ctx, buf);
531521
}
522+
523+
static int is_equal_rsn(struct xsc_cqe64 *cqe64, uint32_t rsn)
524+
{
525+
return rsn == (be32toh(cqe64->sop_drop_qpn) & 0xffffff);
526+
}
527+
528+
static inline int is_equal_uidx(struct xsc_cqe64 *cqe64, uint32_t uidx)
529+
{
530+
return uidx == (be32toh(cqe64->srqn_uidx) & 0xffffff);
531+
}
532+
533+
static inline int free_res_cqe(struct xsc_cqe64 *cqe64, uint32_t rsn,
534+
int cqe_version)
535+
{
536+
if (cqe_version) {
537+
if (is_equal_uidx(cqe64, rsn))
538+
return 1;
539+
} else {
540+
if (is_equal_rsn(cqe64, rsn))
541+
return 1;
542+
}
543+
544+
return 0;
545+
}
546+
547+
void __xsc_cq_clean(struct xsc_cq *cq, u32 rsn)
548+
{
549+
u32 prod_index;
550+
int nfreed = 0;
551+
struct xsc_cqe64 *cqe64, *dest64;
552+
void *cqe, *dest;
553+
u8 owner_bit;
554+
int cqe_version;
555+
556+
if (!cq || cq->flags & XSC_CQ_FLAGS_DV_OWNED)
557+
return;
558+
xsc_dbg(to_xctx(cq->verbs_cq.cq_ex.context)->dbg_fp, XSC_DBG_CQ, "\n");
559+
560+
/*
561+
* First we need to find the current producer index, so we
562+
* know where to start cleaning from. It doesn't matter if HW
563+
* adds new entries after this loop -- the QP we're worried
564+
* about is already in RESET, so the new entries won't come
565+
* from our QP and therefore don't need to be checked.
566+
*/
567+
for (prod_index = cq->cons_index; get_sw_cqe(cq, prod_index);
568+
++prod_index)
569+
if (prod_index == cq->cons_index + cq->verbs_cq.cq_ex.cqe)
570+
break;
571+
572+
/*
573+
* Now sweep backwards through the CQ, removing CQ entries
574+
* that match our QP by copying older entries on top of them.
575+
*/
576+
cqe_version = (to_xctx(cq->verbs_cq.cq_ex.context))->cqe_version;
577+
while ((int)(--prod_index) - (int)cq->cons_index >= 0) {
578+
cqe = get_cqe(cq, prod_index & (cq->verbs_cq.cq_ex.cqe - 1));
579+
cqe64 = (cq->cqe_sz == 64) ? cqe : cqe + 64;
580+
if (free_res_cqe(cqe64, rsn, cqe_version)) {
581+
++nfreed;
582+
} else if (nfreed) {
583+
dest = get_cqe(cq,
584+
(prod_index + nfreed) &
585+
(cq->verbs_cq.cq_ex.cqe - 1));
586+
dest64 = (cq->cqe_sz == 64) ? dest : dest + 64;
587+
owner_bit = dest64->op_own & XSC_CQE_OWNER_MASK;
588+
memcpy(dest, cqe, cq->cqe_sz);
589+
dest64->op_own = owner_bit |
590+
(dest64->op_own & ~XSC_CQE_OWNER_MASK);
591+
}
592+
}
593+
594+
if (nfreed) {
595+
cq->cons_index += nfreed;
596+
/*
597+
* Make sure update of buffer contents is done before
598+
* updating consumer index.
599+
*/
600+
udma_to_device_barrier();
601+
update_cons_index(cq);
602+
}
603+
}
604+
605+
void xsc_cq_clean(struct xsc_cq *cq, uint32_t qpn)
606+
{
607+
xsc_spin_lock(&cq->lock);
608+
__xsc_cq_clean(cq, qpn);
609+
xsc_spin_unlock(&cq->lock);
610+
}
611+

providers/xscale/qp.c

Lines changed: 110 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,110 @@
1+
// SPDX-License-Identifier: GPL-2.0
2+
/*
3+
* Copyright (c) 2021 - 2022, Shanghai Yunsilicon Technology Co., Ltd.
4+
* All rights reserved.
5+
*/
6+
7+
#include <config.h>
8+
9+
#include <stdlib.h>
10+
#include <pthread.h>
11+
#include <string.h>
12+
#include <errno.h>
13+
#include <stdio.h>
14+
#include <util/compiler.h>
15+
16+
#include "xscale.h"
17+
#include "xsc_hsi.h"
18+
19+
struct xsc_qp *xsc_find_qp(struct xsc_context *ctx, uint32_t qpn)
20+
{
21+
int tind = qpn >> XSC_QP_TABLE_SHIFT;
22+
23+
if (ctx->qp_table[tind].refcnt)
24+
return ctx->qp_table[tind].table[qpn & XSC_QP_TABLE_MASK];
25+
else
26+
return NULL;
27+
}
28+
29+
int xsc_store_qp(struct xsc_context *ctx, uint32_t qpn, struct xsc_qp *qp)
30+
{
31+
int tind = qpn >> XSC_QP_TABLE_SHIFT;
32+
33+
if (!ctx->qp_table[tind].refcnt) {
34+
ctx->qp_table[tind].table =
35+
calloc(XSC_QP_TABLE_MASK + 1, sizeof(struct xsc_qp *));
36+
if (!ctx->qp_table[tind].table)
37+
return -1;
38+
}
39+
40+
++ctx->qp_table[tind].refcnt;
41+
ctx->qp_table[tind].table[qpn & XSC_QP_TABLE_MASK] = qp;
42+
return 0;
43+
}
44+
45+
void xsc_clear_qp(struct xsc_context *ctx, uint32_t qpn)
46+
{
47+
int tind = qpn >> XSC_QP_TABLE_SHIFT;
48+
49+
if (!--ctx->qp_table[tind].refcnt)
50+
free(ctx->qp_table[tind].table);
51+
else
52+
ctx->qp_table[tind].table[qpn & XSC_QP_TABLE_MASK] = NULL;
53+
}
54+
55+
int xsc_err_state_qp(struct ibv_qp *qp, enum ibv_qp_state cur_state,
56+
enum ibv_qp_state state)
57+
{
58+
struct xsc_err_state_qp_node *tmp, *err_rq_node, *err_sq_node;
59+
struct xsc_qp *xqp = to_xqp(qp);
60+
int ret = 0;
61+
62+
xsc_dbg(to_xctx(qp->context)->dbg_fp, XSC_DBG_QP,
63+
"modify qp: qpid %d, cur_qp_state %d, qp_state %d\n",
64+
xqp->rsc.rsn, cur_state, state);
65+
if (cur_state == IBV_QPS_ERR && state != IBV_QPS_ERR) {
66+
if (qp->recv_cq) {
67+
list_for_each_safe(&to_xcq(qp->recv_cq)->err_state_qp_list,
68+
err_rq_node, tmp, entry) {
69+
if (err_rq_node->qp_id == xqp->rsc.rsn) {
70+
list_del(&err_rq_node->entry);
71+
free(err_rq_node);
72+
}
73+
}
74+
}
75+
76+
if (qp->send_cq) {
77+
list_for_each_safe(&to_xcq(qp->send_cq)->err_state_qp_list,
78+
err_sq_node, tmp, entry) {
79+
if (err_sq_node->qp_id == xqp->rsc.rsn) {
80+
list_del(&err_sq_node->entry);
81+
free(err_sq_node);
82+
}
83+
}
84+
}
85+
return ret;
86+
}
87+
88+
if (cur_state != IBV_QPS_ERR && state == IBV_QPS_ERR) {
89+
if (qp->recv_cq) {
90+
err_rq_node = calloc(1, sizeof(*err_rq_node));
91+
if (!err_rq_node)
92+
return ENOMEM;
93+
err_rq_node->qp_id = xqp->rsc.rsn;
94+
err_rq_node->is_sq = false;
95+
list_add_tail(&to_xcq(qp->recv_cq)->err_state_qp_list,
96+
&err_rq_node->entry);
97+
}
98+
99+
if (qp->send_cq) {
100+
err_sq_node = calloc(1, sizeof(*err_sq_node));
101+
if (!err_sq_node)
102+
return ENOMEM;
103+
err_sq_node->qp_id = xqp->rsc.rsn;
104+
err_sq_node->is_sq = true;
105+
list_add_tail(&to_xcq(qp->send_cq)->err_state_qp_list,
106+
&err_sq_node->entry);
107+
}
108+
}
109+
return ret;
110+
}

0 commit comments

Comments
 (0)