@@ -90,6 +90,10 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
9090 }
9191 }
9292
93+ spin_lock_init (& cq -> cq_lock );
94+ INIT_LIST_HEAD (& cq -> list_send_qp );
95+ INIT_LIST_HEAD (& cq -> list_recv_qp );
96+
9397 return 0 ;
9498
9599err_remove_cq_cb :
@@ -180,3 +184,134 @@ int mana_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
180184 mana_gd_ring_cq (gdma_cq , SET_ARM_BIT );
181185 return 0 ;
182186}
187+
188+ static inline void handle_ud_sq_cqe (struct mana_ib_qp * qp , struct gdma_comp * cqe )
189+ {
190+ struct mana_rdma_cqe * rdma_cqe = (struct mana_rdma_cqe * )cqe -> cqe_data ;
191+ struct gdma_queue * wq = qp -> ud_qp .queues [MANA_UD_SEND_QUEUE ].kmem ;
192+ struct ud_sq_shadow_wqe * shadow_wqe ;
193+
194+ shadow_wqe = shadow_queue_get_next_to_complete (& qp -> shadow_sq );
195+ if (!shadow_wqe )
196+ return ;
197+
198+ shadow_wqe -> header .error_code = rdma_cqe -> ud_send .vendor_error ;
199+
200+ wq -> tail += shadow_wqe -> header .posted_wqe_size ;
201+ shadow_queue_advance_next_to_complete (& qp -> shadow_sq );
202+ }
203+
204+ static inline void handle_ud_rq_cqe (struct mana_ib_qp * qp , struct gdma_comp * cqe )
205+ {
206+ struct mana_rdma_cqe * rdma_cqe = (struct mana_rdma_cqe * )cqe -> cqe_data ;
207+ struct gdma_queue * wq = qp -> ud_qp .queues [MANA_UD_RECV_QUEUE ].kmem ;
208+ struct ud_rq_shadow_wqe * shadow_wqe ;
209+
210+ shadow_wqe = shadow_queue_get_next_to_complete (& qp -> shadow_rq );
211+ if (!shadow_wqe )
212+ return ;
213+
214+ shadow_wqe -> byte_len = rdma_cqe -> ud_recv .msg_len ;
215+ shadow_wqe -> src_qpn = rdma_cqe -> ud_recv .src_qpn ;
216+ shadow_wqe -> header .error_code = IB_WC_SUCCESS ;
217+
218+ wq -> tail += shadow_wqe -> header .posted_wqe_size ;
219+ shadow_queue_advance_next_to_complete (& qp -> shadow_rq );
220+ }
221+
222+ static void mana_handle_cqe (struct mana_ib_dev * mdev , struct gdma_comp * cqe )
223+ {
224+ struct mana_ib_qp * qp = mana_get_qp_ref (mdev , cqe -> wq_num , cqe -> is_sq );
225+
226+ if (!qp )
227+ return ;
228+
229+ if (qp -> ibqp .qp_type == IB_QPT_GSI || qp -> ibqp .qp_type == IB_QPT_UD ) {
230+ if (cqe -> is_sq )
231+ handle_ud_sq_cqe (qp , cqe );
232+ else
233+ handle_ud_rq_cqe (qp , cqe );
234+ }
235+
236+ mana_put_qp_ref (qp );
237+ }
238+
239+ static void fill_verbs_from_shadow_wqe (struct mana_ib_qp * qp , struct ib_wc * wc ,
240+ const struct shadow_wqe_header * shadow_wqe )
241+ {
242+ const struct ud_rq_shadow_wqe * ud_wqe = (const struct ud_rq_shadow_wqe * )shadow_wqe ;
243+
244+ wc -> wr_id = shadow_wqe -> wr_id ;
245+ wc -> status = shadow_wqe -> error_code ;
246+ wc -> opcode = shadow_wqe -> opcode ;
247+ wc -> vendor_err = shadow_wqe -> error_code ;
248+ wc -> wc_flags = 0 ;
249+ wc -> qp = & qp -> ibqp ;
250+ wc -> pkey_index = 0 ;
251+
252+ if (shadow_wqe -> opcode == IB_WC_RECV ) {
253+ wc -> byte_len = ud_wqe -> byte_len ;
254+ wc -> src_qp = ud_wqe -> src_qpn ;
255+ wc -> wc_flags |= IB_WC_GRH ;
256+ }
257+ }
258+
259+ static int mana_process_completions (struct mana_ib_cq * cq , int nwc , struct ib_wc * wc )
260+ {
261+ struct shadow_wqe_header * shadow_wqe ;
262+ struct mana_ib_qp * qp ;
263+ int wc_index = 0 ;
264+
265+ /* process send shadow queue completions */
266+ list_for_each_entry (qp , & cq -> list_send_qp , cq_send_list ) {
267+ while ((shadow_wqe = shadow_queue_get_next_to_consume (& qp -> shadow_sq ))
268+ != NULL ) {
269+ if (wc_index >= nwc )
270+ goto out ;
271+
272+ fill_verbs_from_shadow_wqe (qp , & wc [wc_index ], shadow_wqe );
273+ shadow_queue_advance_consumer (& qp -> shadow_sq );
274+ wc_index ++ ;
275+ }
276+ }
277+
278+ /* process recv shadow queue completions */
279+ list_for_each_entry (qp , & cq -> list_recv_qp , cq_recv_list ) {
280+ while ((shadow_wqe = shadow_queue_get_next_to_consume (& qp -> shadow_rq ))
281+ != NULL ) {
282+ if (wc_index >= nwc )
283+ goto out ;
284+
285+ fill_verbs_from_shadow_wqe (qp , & wc [wc_index ], shadow_wqe );
286+ shadow_queue_advance_consumer (& qp -> shadow_rq );
287+ wc_index ++ ;
288+ }
289+ }
290+
291+ out :
292+ return wc_index ;
293+ }
294+
295+ int mana_ib_poll_cq (struct ib_cq * ibcq , int num_entries , struct ib_wc * wc )
296+ {
297+ struct mana_ib_cq * cq = container_of (ibcq , struct mana_ib_cq , ibcq );
298+ struct mana_ib_dev * mdev = container_of (ibcq -> device , struct mana_ib_dev , ib_dev );
299+ struct gdma_queue * queue = cq -> queue .kmem ;
300+ struct gdma_comp gdma_cqe ;
301+ unsigned long flags ;
302+ int num_polled = 0 ;
303+ int comp_read , i ;
304+
305+ spin_lock_irqsave (& cq -> cq_lock , flags );
306+ for (i = 0 ; i < num_entries ; i ++ ) {
307+ comp_read = mana_gd_poll_cq (queue , & gdma_cqe , 1 );
308+ if (comp_read < 1 )
309+ break ;
310+ mana_handle_cqe (mdev , & gdma_cqe );
311+ }
312+
313+ num_polled = mana_process_completions (cq , num_entries , wc );
314+ spin_unlock_irqrestore (& cq -> cq_lock , flags );
315+
316+ return num_polled ;
317+ }
0 commit comments