Skip to content

Commit cfef452

Browse files
Konstantin Taranovrleon
authored andcommitted
RDMA/mana_ib: polling of CQs for GSI/UD
Add polling for the kernel CQs. Process completion events for UD/GSI QPs. Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com> Link: https://patch.msgid.link/1737394039-28772-13-git-send-email-kotaranov@linux.microsoft.com Reviewed-by: Shiraz Saleem <shirazsaleem@microsoft.com> Reviewed-by: Long Li <longli@microsoft.com> Signed-off-by: Leon Romanovsky <leon@kernel.org>
1 parent 8001e92 commit cfef452

File tree

5 files changed

+202
-0
lines changed

5 files changed

+202
-0
lines changed

drivers/infiniband/hw/mana/cq.c

Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -90,6 +90,10 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9090
}
9191
}
9292

93+
spin_lock_init(&cq->cq_lock);
94+
INIT_LIST_HEAD(&cq->list_send_qp);
95+
INIT_LIST_HEAD(&cq->list_recv_qp);
96+
9397
return 0;
9498

9599
err_remove_cq_cb:
@@ -180,3 +184,134 @@ int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
180184
mana_gd_ring_cq(gdma_cq, SET_ARM_BIT);
181185
return 0;
182186
}
187+
188+
static inline void handle_ud_sq_cqe(struct mana_ib_qp *qp, struct gdma_comp *cqe)
189+
{
190+
struct mana_rdma_cqe *rdma_cqe = (struct mana_rdma_cqe *)cqe->cqe_data;
191+
struct gdma_queue *wq = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].kmem;
192+
struct ud_sq_shadow_wqe *shadow_wqe;
193+
194+
shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_sq);
195+
if (!shadow_wqe)
196+
return;
197+
198+
shadow_wqe->header.error_code = rdma_cqe->ud_send.vendor_error;
199+
200+
wq->tail += shadow_wqe->header.posted_wqe_size;
201+
shadow_queue_advance_next_to_complete(&qp->shadow_sq);
202+
}
203+
204+
static inline void handle_ud_rq_cqe(struct mana_ib_qp *qp, struct gdma_comp *cqe)
205+
{
206+
struct mana_rdma_cqe *rdma_cqe = (struct mana_rdma_cqe *)cqe->cqe_data;
207+
struct gdma_queue *wq = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].kmem;
208+
struct ud_rq_shadow_wqe *shadow_wqe;
209+
210+
shadow_wqe = shadow_queue_get_next_to_complete(&qp->shadow_rq);
211+
if (!shadow_wqe)
212+
return;
213+
214+
shadow_wqe->byte_len = rdma_cqe->ud_recv.msg_len;
215+
shadow_wqe->src_qpn = rdma_cqe->ud_recv.src_qpn;
216+
shadow_wqe->header.error_code = IB_WC_SUCCESS;
217+
218+
wq->tail += shadow_wqe->header.posted_wqe_size;
219+
shadow_queue_advance_next_to_complete(&qp->shadow_rq);
220+
}
221+
222+
static void mana_handle_cqe(struct mana_ib_dev *mdev, struct gdma_comp *cqe)
223+
{
224+
struct mana_ib_qp *qp = mana_get_qp_ref(mdev, cqe->wq_num, cqe->is_sq);
225+
226+
if (!qp)
227+
return;
228+
229+
if (qp->ibqp.qp_type == IB_QPT_GSI || qp->ibqp.qp_type == IB_QPT_UD) {
230+
if (cqe->is_sq)
231+
handle_ud_sq_cqe(qp, cqe);
232+
else
233+
handle_ud_rq_cqe(qp, cqe);
234+
}
235+
236+
mana_put_qp_ref(qp);
237+
}
238+
239+
static void fill_verbs_from_shadow_wqe(struct mana_ib_qp *qp, struct ib_wc *wc,
240+
const struct shadow_wqe_header *shadow_wqe)
241+
{
242+
const struct ud_rq_shadow_wqe *ud_wqe = (const struct ud_rq_shadow_wqe *)shadow_wqe;
243+
244+
wc->wr_id = shadow_wqe->wr_id;
245+
wc->status = shadow_wqe->error_code;
246+
wc->opcode = shadow_wqe->opcode;
247+
wc->vendor_err = shadow_wqe->error_code;
248+
wc->wc_flags = 0;
249+
wc->qp = &qp->ibqp;
250+
wc->pkey_index = 0;
251+
252+
if (shadow_wqe->opcode == IB_WC_RECV) {
253+
wc->byte_len = ud_wqe->byte_len;
254+
wc->src_qp = ud_wqe->src_qpn;
255+
wc->wc_flags |= IB_WC_GRH;
256+
}
257+
}
258+
259+
static int mana_process_completions(struct mana_ib_cq *cq, int nwc, struct ib_wc *wc)
260+
{
261+
struct shadow_wqe_header *shadow_wqe;
262+
struct mana_ib_qp *qp;
263+
int wc_index = 0;
264+
265+
/* process send shadow queue completions */
266+
list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
267+
while ((shadow_wqe = shadow_queue_get_next_to_consume(&qp->shadow_sq))
268+
!= NULL) {
269+
if (wc_index >= nwc)
270+
goto out;
271+
272+
fill_verbs_from_shadow_wqe(qp, &wc[wc_index], shadow_wqe);
273+
shadow_queue_advance_consumer(&qp->shadow_sq);
274+
wc_index++;
275+
}
276+
}
277+
278+
/* process recv shadow queue completions */
279+
list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
280+
while ((shadow_wqe = shadow_queue_get_next_to_consume(&qp->shadow_rq))
281+
!= NULL) {
282+
if (wc_index >= nwc)
283+
goto out;
284+
285+
fill_verbs_from_shadow_wqe(qp, &wc[wc_index], shadow_wqe);
286+
shadow_queue_advance_consumer(&qp->shadow_rq);
287+
wc_index++;
288+
}
289+
}
290+
291+
out:
292+
return wc_index;
293+
}
294+
295+
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
296+
{
297+
struct mana_ib_cq *cq = container_of(ibcq, struct mana_ib_cq, ibcq);
298+
struct mana_ib_dev *mdev = container_of(ibcq->device, struct mana_ib_dev, ib_dev);
299+
struct gdma_queue *queue = cq->queue.kmem;
300+
struct gdma_comp gdma_cqe;
301+
unsigned long flags;
302+
int num_polled = 0;
303+
int comp_read, i;
304+
305+
spin_lock_irqsave(&cq->cq_lock, flags);
306+
for (i = 0; i < num_entries; i++) {
307+
comp_read = mana_gd_poll_cq(queue, &gdma_cqe, 1);
308+
if (comp_read < 1)
309+
break;
310+
mana_handle_cqe(mdev, &gdma_cqe);
311+
}
312+
313+
num_polled = mana_process_completions(cq, num_entries, wc);
314+
spin_unlock_irqrestore(&cq->cq_lock, flags);
315+
316+
return num_polled;
317+
}

drivers/infiniband/hw/mana/device.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,7 @@ static const struct ib_device_ops mana_ib_dev_ops = {
4040
.mmap = mana_ib_mmap,
4141
.modify_qp = mana_ib_modify_qp,
4242
.modify_wq = mana_ib_modify_wq,
43+
.poll_cq = mana_ib_poll_cq,
4344
.post_recv = mana_ib_post_recv,
4445
.post_send = mana_ib_post_send,
4546
.query_device = mana_ib_query_device,

drivers/infiniband/hw/mana/mana_ib.h

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,6 +127,10 @@ struct mana_ib_mr {
127127
struct mana_ib_cq {
128128
struct ib_cq ibcq;
129129
struct mana_ib_queue queue;
130+
/* protects CQ polling */
131+
spinlock_t cq_lock;
132+
struct list_head list_send_qp;
133+
struct list_head list_recv_qp;
130134
int cqe;
131135
u32 comp_vector;
132136
mana_handle_t cq_handle;
@@ -169,6 +173,8 @@ struct mana_ib_qp {
169173
/* The port on the IB device, starting with 1 */
170174
u32 port;
171175

176+
struct list_head cq_send_list;
177+
struct list_head cq_recv_list;
172178
struct shadow_queue shadow_rq;
173179
struct shadow_queue shadow_sq;
174180

@@ -435,6 +441,31 @@ struct rdma_send_oob {
435441
};
436442
}; /* HW DATA */
437443

444+
struct mana_rdma_cqe {
445+
union {
446+
struct {
447+
u8 cqe_type;
448+
u8 data[GDMA_COMP_DATA_SIZE - 1];
449+
};
450+
struct {
451+
u32 cqe_type : 8;
452+
u32 vendor_error : 9;
453+
u32 reserved1 : 15;
454+
u32 sge_offset : 5;
455+
u32 tx_wqe_offset : 27;
456+
} ud_send;
457+
struct {
458+
u32 cqe_type : 8;
459+
u32 reserved1 : 24;
460+
u32 msg_len;
461+
u32 src_qpn : 24;
462+
u32 reserved2 : 8;
463+
u32 imm_data;
464+
u32 rx_wqe_offset;
465+
} ud_recv;
466+
};
467+
}; /* HW DATA */
468+
438469
static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
439470
{
440471
return mdev->gdma_dev->gdma_context;
@@ -602,5 +633,6 @@ int mana_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
602633
int mana_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
603634
const struct ib_send_wr **bad_wr);
604635

636+
int mana_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
605637
int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
606638
#endif

drivers/infiniband/hw/mana/qp.c

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -600,6 +600,36 @@ static int mana_ib_create_rc_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
600600
return err;
601601
}
602602

603+
static void mana_add_qp_to_cqs(struct mana_ib_qp *qp)
604+
{
605+
struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
606+
struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
607+
unsigned long flags;
608+
609+
spin_lock_irqsave(&send_cq->cq_lock, flags);
610+
list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
611+
spin_unlock_irqrestore(&send_cq->cq_lock, flags);
612+
613+
spin_lock_irqsave(&recv_cq->cq_lock, flags);
614+
list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
615+
spin_unlock_irqrestore(&recv_cq->cq_lock, flags);
616+
}
617+
618+
static void mana_remove_qp_from_cqs(struct mana_ib_qp *qp)
619+
{
620+
struct mana_ib_cq *send_cq = container_of(qp->ibqp.send_cq, struct mana_ib_cq, ibcq);
621+
struct mana_ib_cq *recv_cq = container_of(qp->ibqp.recv_cq, struct mana_ib_cq, ibcq);
622+
unsigned long flags;
623+
624+
spin_lock_irqsave(&send_cq->cq_lock, flags);
625+
list_del(&qp->cq_send_list);
626+
spin_unlock_irqrestore(&send_cq->cq_lock, flags);
627+
628+
spin_lock_irqsave(&recv_cq->cq_lock, flags);
629+
list_del(&qp->cq_recv_list);
630+
spin_unlock_irqrestore(&recv_cq->cq_lock, flags);
631+
}
632+
603633
static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
604634
struct ib_qp_init_attr *attr, struct ib_udata *udata)
605635
{
@@ -654,6 +684,8 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
654684
if (err)
655685
goto destroy_qp;
656686

687+
mana_add_qp_to_cqs(qp);
688+
657689
return 0;
658690

659691
destroy_qp:
@@ -840,6 +872,7 @@ static int mana_ib_destroy_ud_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
840872
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
841873
int i;
842874

875+
mana_remove_qp_from_cqs(qp);
843876
mana_table_remove_qp(mdev, qp);
844877

845878
destroy_shadow_queue(&qp->shadow_rq);

drivers/net/ethernet/microsoft/mana/gdma_main.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1222,6 +1222,7 @@ int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe)
12221222

12231223
return cqe_idx;
12241224
}
1225+
EXPORT_SYMBOL_NS(mana_gd_poll_cq, "NET_MANA");
12251226

12261227
static irqreturn_t mana_gd_intr(int irq, void *arg)
12271228
{

0 commit comments

Comments
 (0)