Skip to content

Commit 03fe993

Browse files
chuckleverdledford
authored andcommitted
svcrdma: Define maximum number of backchannel requests
Extra resources for handling backchannel requests have to be pre-allocated when a transport instance is created. Set up additional fields in svcxprt_rdma to track these resources. The max_requests fields are elements of the RPC-over-RDMA protocol, so they should be u32. To ensure that unsigned arithmetic is used everywhere, some other fields in the svcxprt_rdma struct are updated. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Acked-by: Bruce Fields <bfields@fieldses.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
1 parent ba986c9 commit 03fe993

File tree

3 files changed

+28
-15
lines changed

3 files changed

+28
-15
lines changed

include/linux/sunrpc/svc_rdma.h

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@
5151
/* RPC/RDMA parameters and stats */
5252
extern unsigned int svcrdma_ord;
5353
extern unsigned int svcrdma_max_requests;
54+
extern unsigned int svcrdma_max_bc_requests;
5455
extern unsigned int svcrdma_max_req_size;
5556

5657
extern atomic_t rdma_stat_recv;
@@ -134,10 +135,11 @@ struct svcxprt_rdma {
134135
int sc_max_sge;
135136
int sc_max_sge_rd; /* max sge for read target */
136137

137-
int sc_sq_depth; /* Depth of SQ */
138138
atomic_t sc_sq_count; /* Number of SQ WR on queue */
139-
140-
int sc_max_requests; /* Depth of RQ */
139+
unsigned int sc_sq_depth; /* Depth of SQ */
140+
unsigned int sc_rq_depth; /* Depth of RQ */
141+
u32 sc_max_requests; /* Forward credits */
142+
u32 sc_max_bc_requests;/* Backward credits */
141143
int sc_max_req_size; /* Size of each RQ WR buf */
142144

143145
struct ib_pd *sc_pd;
@@ -186,6 +188,11 @@ struct svcxprt_rdma {
186188
#define RPCRDMA_MAX_REQUESTS 32
187189
#define RPCRDMA_MAX_REQ_SIZE 4096
188190

191+
/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
192+
* current NFSv4.1 implementation supports one backchannel slot.
193+
*/
194+
#define RPCRDMA_MAX_BC_REQUESTS 2
195+
189196
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
190197

191198
/* svc_rdma_marshal.c */

net/sunrpc/xprtrdma/svc_rdma.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ unsigned int svcrdma_ord = RPCRDMA_ORD;
5555
static unsigned int min_ord = 1;
5656
static unsigned int max_ord = 4096;
5757
unsigned int svcrdma_max_requests = RPCRDMA_MAX_REQUESTS;
58+
unsigned int svcrdma_max_bc_requests = RPCRDMA_MAX_BC_REQUESTS;
5859
static unsigned int min_max_requests = 4;
5960
static unsigned int max_max_requests = 16384;
6061
unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
@@ -245,9 +246,10 @@ int svc_rdma_init(void)
245246
{
246247
dprintk("SVCRDMA Module Init, register RPC RDMA transport\n");
247248
dprintk("\tsvcrdma_ord : %d\n", svcrdma_ord);
248-
dprintk("\tmax_requests : %d\n", svcrdma_max_requests);
249-
dprintk("\tsq_depth : %d\n",
249+
dprintk("\tmax_requests : %u\n", svcrdma_max_requests);
250+
dprintk("\tsq_depth : %u\n",
250251
svcrdma_max_requests * RPCRDMA_SQ_DEPTH_MULT);
252+
dprintk("\tmax_bc_requests : %u\n", svcrdma_max_bc_requests);
251253
dprintk("\tmax_inline : %d\n", svcrdma_max_req_size);
252254

253255
svc_rdma_wq = alloc_workqueue("svc_rdma", 0, 0);

net/sunrpc/xprtrdma/svc_rdma_transport.c

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -169,12 +169,12 @@ static struct svc_rdma_op_ctxt *alloc_ctxt(struct svcxprt_rdma *xprt,
169169

170170
static bool svc_rdma_prealloc_ctxts(struct svcxprt_rdma *xprt)
171171
{
172-
int i;
172+
unsigned int i;
173173

174174
/* Each RPC/RDMA credit can consume a number of send
175175
* and receive WQEs. One ctxt is allocated for each.
176176
*/
177-
i = xprt->sc_sq_depth + xprt->sc_max_requests;
177+
i = xprt->sc_sq_depth + xprt->sc_rq_depth;
178178

179179
while (i--) {
180180
struct svc_rdma_op_ctxt *ctxt;
@@ -285,7 +285,7 @@ static struct svc_rdma_req_map *alloc_req_map(gfp_t flags)
285285

286286
static bool svc_rdma_prealloc_maps(struct svcxprt_rdma *xprt)
287287
{
288-
int i;
288+
unsigned int i;
289289

290290
/* One for each receive buffer on this connection. */
291291
i = xprt->sc_max_requests;
@@ -1016,8 +1016,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
10161016
struct ib_device *dev;
10171017
int uninitialized_var(dma_mr_acc);
10181018
int need_dma_mr = 0;
1019+
unsigned int i;
10191020
int ret = 0;
1020-
int i;
10211021

10221022
listen_rdma = container_of(xprt, struct svcxprt_rdma, sc_xprt);
10231023
clear_bit(XPT_CONN, &xprt->xpt_flags);
@@ -1046,9 +1046,13 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
10461046
newxprt->sc_max_sge_rd = min_t(size_t, dev->attrs.max_sge_rd,
10471047
RPCSVC_MAXPAGES);
10481048
newxprt->sc_max_req_size = svcrdma_max_req_size;
1049-
newxprt->sc_max_requests = min((size_t)dev->attrs.max_qp_wr,
1050-
(size_t)svcrdma_max_requests);
1051-
newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_max_requests;
1049+
newxprt->sc_max_requests = min_t(u32, dev->attrs.max_qp_wr,
1050+
svcrdma_max_requests);
1051+
newxprt->sc_max_bc_requests = min_t(u32, dev->attrs.max_qp_wr,
1052+
svcrdma_max_bc_requests);
1053+
newxprt->sc_rq_depth = newxprt->sc_max_requests +
1054+
newxprt->sc_max_bc_requests;
1055+
newxprt->sc_sq_depth = RPCRDMA_SQ_DEPTH_MULT * newxprt->sc_rq_depth;
10521056

10531057
if (!svc_rdma_prealloc_ctxts(newxprt))
10541058
goto errout;
@@ -1077,7 +1081,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
10771081
dprintk("svcrdma: error creating SQ CQ for connect request\n");
10781082
goto errout;
10791083
}
1080-
cq_attr.cqe = newxprt->sc_max_requests;
1084+
cq_attr.cqe = newxprt->sc_rq_depth;
10811085
newxprt->sc_rq_cq = ib_create_cq(dev,
10821086
rq_comp_handler,
10831087
cq_event_handler,
@@ -1092,7 +1096,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
10921096
qp_attr.event_handler = qp_event_handler;
10931097
qp_attr.qp_context = &newxprt->sc_xprt;
10941098
qp_attr.cap.max_send_wr = newxprt->sc_sq_depth;
1095-
qp_attr.cap.max_recv_wr = newxprt->sc_max_requests;
1099+
qp_attr.cap.max_recv_wr = newxprt->sc_rq_depth;
10961100
qp_attr.cap.max_send_sge = newxprt->sc_max_sge;
10971101
qp_attr.cap.max_recv_sge = newxprt->sc_max_sge;
10981102
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
@@ -1183,7 +1187,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
11831187
newxprt->sc_dma_lkey = dev->local_dma_lkey;
11841188

11851189
/* Post receive buffers */
1186-
for (i = 0; i < newxprt->sc_max_requests; i++) {
1190+
for (i = 0; i < newxprt->sc_rq_depth; i++) {
11871191
ret = svc_rdma_post_recv(newxprt, GFP_KERNEL);
11881192
if (ret) {
11891193
dprintk("svcrdma: failure posting receive buffers\n");

0 commit comments

Comments
 (0)