Skip to content

Commit 8001e92

Browse files
Konstantin Taranovrleon
authored andcommitted
RDMA/mana_ib: extend mana QP table
Enable mana QP table to store UD/GSI QPs. For send queues, set the most significant bit to one, as send and receive WQs can have the same ID in mana. Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com> Link: https://patch.msgid.link/1737394039-28772-12-git-send-email-kotaranov@linux.microsoft.com Reviewed-by: Shiraz Saleem <shirazsaleem@microsoft.com> Reviewed-by: Long Li <longli@microsoft.com> Signed-off-by: Leon Romanovsky <leon@kernel.org>
1 parent 40ebdac commit 8001e92

File tree

3 files changed

+83
-5
lines changed

3 files changed

+83
-5
lines changed

drivers/infiniband/hw/mana/main.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -704,7 +704,7 @@ mana_ib_event_handler(void *ctx, struct gdma_queue *q, struct gdma_event *event)
704704
switch (event->type) {
705705
case GDMA_EQE_RNIC_QP_FATAL:
706706
qpn = event->details[0];
707-
qp = mana_get_qp_ref(mdev, qpn);
707+
qp = mana_get_qp_ref(mdev, qpn, false);
708708
if (!qp)
709709
break;
710710
if (qp->ibqp.event_handler) {

drivers/infiniband/hw/mana/mana_ib.h

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,9 @@
2323
/* MANA doesn't have any limit for MR size */
2424
#define MANA_IB_MAX_MR_SIZE U64_MAX
2525

26+
/* Send queue ID mask */
27+
#define MANA_SENDQ_MASK BIT(31)
28+
2629
/*
2730
* The hardware limit of number of MRs is greater than maximum number of MRs
2831
* that can possibly represent in 24 bits
@@ -438,11 +441,14 @@ static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
438441
}
439442

440443
static inline struct mana_ib_qp *mana_get_qp_ref(struct mana_ib_dev *mdev,
441-
uint32_t qid)
444+
u32 qid, bool is_sq)
442445
{
443446
struct mana_ib_qp *qp;
444447
unsigned long flag;
445448

449+
if (is_sq)
450+
qid |= MANA_SENDQ_MASK;
451+
446452
xa_lock_irqsave(&mdev->qp_table_wq, flag);
447453
qp = xa_load(&mdev->qp_table_wq, qid);
448454
if (qp)

drivers/infiniband/hw/mana/qp.c

Lines changed: 75 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -444,18 +444,82 @@ static enum gdma_queue_type mana_ib_queue_type(struct ib_qp_init_attr *attr, u32
444444
return type;
445445
}
446446

447+
static int mana_table_store_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
448+
{
449+
return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp,
450+
GFP_KERNEL);
451+
}
452+
453+
static void mana_table_remove_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
454+
{
455+
xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num);
456+
}
457+
458+
static int mana_table_store_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
459+
{
460+
u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK;
461+
u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
462+
int err;
463+
464+
err = xa_insert_irq(&mdev->qp_table_wq, qids, qp, GFP_KERNEL);
465+
if (err)
466+
return err;
467+
468+
err = xa_insert_irq(&mdev->qp_table_wq, qidr, qp, GFP_KERNEL);
469+
if (err)
470+
goto remove_sq;
471+
472+
return 0;
473+
474+
remove_sq:
475+
xa_erase_irq(&mdev->qp_table_wq, qids);
476+
return err;
477+
}
478+
479+
static void mana_table_remove_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
480+
{
481+
u32 qids = qp->ud_qp.queues[MANA_UD_SEND_QUEUE].id | MANA_SENDQ_MASK;
482+
u32 qidr = qp->ud_qp.queues[MANA_UD_RECV_QUEUE].id;
483+
484+
xa_erase_irq(&mdev->qp_table_wq, qids);
485+
xa_erase_irq(&mdev->qp_table_wq, qidr);
486+
}
487+
447488
static int mana_table_store_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
448489
{
449490
refcount_set(&qp->refcount, 1);
450491
init_completion(&qp->free);
451-
return xa_insert_irq(&mdev->qp_table_wq, qp->ibqp.qp_num, qp,
452-
GFP_KERNEL);
492+
493+
switch (qp->ibqp.qp_type) {
494+
case IB_QPT_RC:
495+
return mana_table_store_rc_qp(mdev, qp);
496+
case IB_QPT_UD:
497+
case IB_QPT_GSI:
498+
return mana_table_store_ud_qp(mdev, qp);
499+
default:
500+
ibdev_dbg(&mdev->ib_dev, "Unknown QP type for storing in mana table, %d\n",
501+
qp->ibqp.qp_type);
502+
}
503+
504+
return -EINVAL;
453505
}
454506

455507
static void mana_table_remove_qp(struct mana_ib_dev *mdev,
456508
struct mana_ib_qp *qp)
457509
{
458-
xa_erase_irq(&mdev->qp_table_wq, qp->ibqp.qp_num);
510+
switch (qp->ibqp.qp_type) {
511+
case IB_QPT_RC:
512+
mana_table_remove_rc_qp(mdev, qp);
513+
break;
514+
case IB_QPT_UD:
515+
case IB_QPT_GSI:
516+
mana_table_remove_ud_qp(mdev, qp);
517+
break;
518+
default:
519+
ibdev_dbg(&mdev->ib_dev, "Unknown QP type for removing from mana table, %d\n",
520+
qp->ibqp.qp_type);
521+
return;
522+
}
459523
mana_put_qp_ref(qp);
460524
wait_for_completion(&qp->free);
461525
}
@@ -586,8 +650,14 @@ static int mana_ib_create_ud_qp(struct ib_qp *ibqp, struct ib_pd *ibpd,
586650
for (i = 0; i < MANA_UD_QUEUE_TYPE_MAX; ++i)
587651
qp->ud_qp.queues[i].kmem->id = qp->ud_qp.queues[i].id;
588652

653+
err = mana_table_store_qp(mdev, qp);
654+
if (err)
655+
goto destroy_qp;
656+
589657
return 0;
590658

659+
destroy_qp:
660+
mana_ib_gd_destroy_ud_qp(mdev, qp);
591661
destroy_shadow_queues:
592662
destroy_shadow_queue(&qp->shadow_rq);
593663
destroy_shadow_queue(&qp->shadow_sq);
@@ -770,6 +840,8 @@ static int mana_ib_destroy_ud_qp(struct mana_ib_qp *qp, struct ib_udata *udata)
770840
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
771841
int i;
772842

843+
mana_table_remove_qp(mdev, qp);
844+
773845
destroy_shadow_queue(&qp->shadow_rq);
774846
destroy_shadow_queue(&qp->shadow_sq);
775847

0 commit comments

Comments
 (0)