@@ -169,12 +169,12 @@ static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt,
169
169
170
170
static bool svc_rdma_prealloc_ctxts (struct svcxprt_rdma * xprt )
171
171
{
172
- int i ;
172
+ unsigned int i ;
173
173
174
174
/* Each RPC/RDMA credit can consume a number of send
175
175
* and receive WQEs. One ctxt is allocated for each.
176
176
*/
177
- i = xprt -> sc_sq_depth + xprt -> sc_max_requests ;
177
+ i = xprt -> sc_sq_depth + xprt -> sc_rq_depth ;
178
178
179
179
while (i -- ) {
180
180
struct svc_rdma_op_ctxt * ctxt ;
@@ -285,7 +285,7 @@ static struct svc_rdma_req_map *alloc_req_map(gfp_t flags)
285
285
286
286
static bool svc_rdma_prealloc_maps (struct svcxprt_rdma * xprt )
287
287
{
288
- int i ;
288
+ unsigned int i ;
289
289
290
290
/* One for each receive buffer on this connection. */
291
291
i = xprt -> sc_max_requests ;
@@ -1016,8 +1016,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1016
1016
struct ib_device * dev ;
1017
1017
int uninitialized_var (dma_mr_acc );
1018
1018
int need_dma_mr = 0 ;
1019
+ unsigned int i ;
1019
1020
int ret = 0 ;
1020
- int i ;
1021
1021
1022
1022
listen_rdma = container_of (xprt , struct svcxprt_rdma , sc_xprt );
1023
1023
clear_bit (XPT_CONN , & xprt -> xpt_flags );
@@ -1046,9 +1046,13 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1046
1046
newxprt -> sc_max_sge_rd = min_t (size_t , dev -> attrs .max_sge_rd ,
1047
1047
RPCSVC_MAXPAGES );
1048
1048
newxprt -> sc_max_req_size = svcrdma_max_req_size ;
1049
- newxprt -> sc_max_requests = min ((size_t )dev -> attrs .max_qp_wr ,
1050
- (size_t )svcrdma_max_requests );
1051
- newxprt -> sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt -> sc_max_requests ;
1049
+ newxprt -> sc_max_requests = min_t (u32 , dev -> attrs .max_qp_wr ,
1050
+ svcrdma_max_requests );
1051
+ newxprt -> sc_max_bc_requests = min_t (u32 , dev -> attrs .max_qp_wr ,
1052
+ svcrdma_max_bc_requests );
1053
+ newxprt -> sc_rq_depth = newxprt -> sc_max_requests +
1054
+ newxprt -> sc_max_bc_requests ;
1055
+ newxprt -> sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt -> sc_rq_depth ;
1052
1056
1053
1057
if (!svc_rdma_prealloc_ctxts (newxprt ))
1054
1058
goto errout ;
@@ -1077,7 +1081,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1077
1081
dprintk ("svcrdma: error creating SQ CQ for connect request\n" );
1078
1082
goto errout ;
1079
1083
}
1080
- cq_attr .cqe = newxprt -> sc_max_requests ;
1084
+ cq_attr .cqe = newxprt -> sc_rq_depth ;
1081
1085
newxprt -> sc_rq_cq = ib_create_cq (dev ,
1082
1086
rq_comp_handler ,
1083
1087
cq_event_handler ,
@@ -1092,7 +1096,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1092
1096
qp_attr .event_handler = qp_event_handler ;
1093
1097
qp_attr .qp_context = & newxprt -> sc_xprt ;
1094
1098
qp_attr .cap .max_send_wr = newxprt -> sc_sq_depth ;
1095
- qp_attr .cap .max_recv_wr = newxprt -> sc_max_requests ;
1099
+ qp_attr .cap .max_recv_wr = newxprt -> sc_rq_depth ;
1096
1100
qp_attr .cap .max_send_sge = newxprt -> sc_max_sge ;
1097
1101
qp_attr .cap .max_recv_sge = newxprt -> sc_max_sge ;
1098
1102
qp_attr .sq_sig_type = IB_SIGNAL_REQ_WR ;
@@ -1183,7 +1187,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
1183
1187
newxprt -> sc_dma_lkey = dev -> local_dma_lkey ;
1184
1188
1185
1189
/* Post receive buffers */
1186
- for (i = 0 ; i < newxprt -> sc_max_requests ; i ++ ) {
1190
+ for (i = 0 ; i < newxprt -> sc_rq_depth ; i ++ ) {
1187
1191
ret = svc_rdma_post_recv (newxprt , GFP_KERNEL );
1188
1192
if (ret ) {
1189
1193
dprintk ("svcrdma: failure posting receive buffers\n" );
0 commit comments