@@ -384,6 +384,7 @@ static void rnbd_softirq_done_fn(struct request *rq)
384
384
struct rnbd_iu * iu ;
385
385
386
386
iu = blk_mq_rq_to_pdu (rq );
387
+ sg_free_table_chained (& iu -> sgt , RNBD_INLINE_SG_CNT );
387
388
rnbd_put_permit (sess , iu -> permit );
388
389
blk_mq_end_request (rq , errno_to_blk_status (iu -> errno ));
389
390
}
@@ -477,7 +478,7 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
477
478
iu -> buf = NULL ;
478
479
iu -> dev = dev ;
479
480
480
- sg_mark_end (& iu -> sglist [ 0 ] );
481
+ sg_alloc_table (& iu -> sgt , 1 , GFP_KERNEL );
481
482
482
483
msg .hdr .type = cpu_to_le16 (RNBD_MSG_CLOSE );
483
484
msg .device_id = cpu_to_le32 (device_id );
@@ -492,6 +493,7 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
492
493
err = errno ;
493
494
}
494
495
496
+ sg_free_table (& iu -> sgt );
495
497
rnbd_put_iu (sess , iu );
496
498
return err ;
497
499
}
@@ -564,15 +566,16 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
564
566
iu -> buf = rsp ;
565
567
iu -> dev = dev ;
566
568
567
- sg_init_one (iu -> sglist , rsp , sizeof (* rsp ));
569
+ sg_alloc_table (& iu -> sgt , 1 , GFP_KERNEL );
570
+ sg_init_one (iu -> sgt .sgl , rsp , sizeof (* rsp ));
568
571
569
572
msg .hdr .type = cpu_to_le16 (RNBD_MSG_OPEN );
570
573
msg .access_mode = dev -> access_mode ;
571
574
strlcpy (msg .dev_name , dev -> pathname , sizeof (msg .dev_name ));
572
575
573
576
WARN_ON (!rnbd_clt_get_dev (dev ));
574
577
err = send_usr_msg (sess -> rtrs , READ , iu ,
575
- & vec , sizeof (* rsp ), iu -> sglist , 1 ,
578
+ & vec , sizeof (* rsp ), iu -> sgt . sgl , 1 ,
576
579
msg_open_conf , & errno , wait );
577
580
if (err ) {
578
581
rnbd_clt_put_dev (dev );
@@ -582,6 +585,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
582
585
err = errno ;
583
586
}
584
587
588
+ sg_free_table (& iu -> sgt );
585
589
rnbd_put_iu (sess , iu );
586
590
return err ;
587
591
}
@@ -610,7 +614,8 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
610
614
iu -> buf = rsp ;
611
615
iu -> sess = sess ;
612
616
613
- sg_init_one (iu -> sglist , rsp , sizeof (* rsp ));
617
+ sg_alloc_table (& iu -> sgt , 1 , GFP_KERNEL );
618
+ sg_init_one (iu -> sgt .sgl , rsp , sizeof (* rsp ));
614
619
615
620
msg .hdr .type = cpu_to_le16 (RNBD_MSG_SESS_INFO );
616
621
msg .ver = RNBD_PROTO_VER_MAJOR ;
@@ -626,7 +631,7 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
626
631
goto put_iu ;
627
632
}
628
633
err = send_usr_msg (sess -> rtrs , READ , iu ,
629
- & vec , sizeof (* rsp ), iu -> sglist , 1 ,
634
+ & vec , sizeof (* rsp ), iu -> sgt . sgl , 1 ,
630
635
msg_sess_info_conf , & errno , wait );
631
636
if (err ) {
632
637
rnbd_clt_put_sess (sess );
@@ -636,7 +641,7 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
636
641
} else {
637
642
err = errno ;
638
643
}
639
-
644
+ sg_free_table ( & iu -> sgt );
640
645
rnbd_put_iu (sess , iu );
641
646
return err ;
642
647
}
@@ -1016,11 +1021,10 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
1016
1021
* See queue limits.
1017
1022
*/
1018
1023
if (req_op (rq ) != REQ_OP_DISCARD )
1019
- sg_cnt = blk_rq_map_sg (dev -> queue , rq , iu -> sglist );
1024
+ sg_cnt = blk_rq_map_sg (dev -> queue , rq , iu -> sgt . sgl );
1020
1025
1021
1026
if (sg_cnt == 0 )
1022
- /* Do not forget to mark the end */
1023
- sg_mark_end (& iu -> sglist [0 ]);
1027
+ sg_mark_end (& iu -> sgt .sgl [0 ]);
1024
1028
1025
1029
msg .hdr .type = cpu_to_le16 (RNBD_MSG_IO );
1026
1030
msg .device_id = cpu_to_le32 (dev -> device_id );
@@ -1029,13 +1033,13 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
1029
1033
.iov_base = & msg ,
1030
1034
.iov_len = sizeof (msg )
1031
1035
};
1032
- size = rnbd_clt_get_sg_size (iu -> sglist , sg_cnt );
1036
+ size = rnbd_clt_get_sg_size (iu -> sgt . sgl , sg_cnt );
1033
1037
req_ops = (struct rtrs_clt_req_ops ) {
1034
1038
.priv = iu ,
1035
1039
.conf_fn = msg_io_conf ,
1036
1040
};
1037
1041
err = rtrs_clt_request (rq_data_dir (rq ), & req_ops , rtrs , permit ,
1038
- & vec , 1 , size , iu -> sglist , sg_cnt );
1042
+ & vec , 1 , size , iu -> sgt . sgl , sg_cnt );
1039
1043
if (unlikely (err )) {
1040
1044
rnbd_clt_err_rl (dev , "RTRS failed to transfer IO, err: %d\n" ,
1041
1045
err );
@@ -1122,6 +1126,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1122
1126
struct rnbd_clt_dev * dev = rq -> rq_disk -> private_data ;
1123
1127
struct rnbd_iu * iu = blk_mq_rq_to_pdu (rq );
1124
1128
int err ;
1129
+ blk_status_t ret = BLK_STS_IOERR ;
1125
1130
1126
1131
if (unlikely (dev -> dev_state != DEV_STATE_MAPPED ))
1127
1132
return BLK_STS_IOERR ;
@@ -1133,32 +1138,35 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1133
1138
return BLK_STS_RESOURCE ;
1134
1139
}
1135
1140
1141
+ iu -> sgt .sgl = iu -> first_sgl ;
1142
+ err = sg_alloc_table_chained (& iu -> sgt ,
1143
+ /* Even-if the request has no segment,
1144
+ * sglist must have one entry at least */
1145
+ blk_rq_nr_phys_segments (rq ) ? : 1 ,
1146
+ iu -> sgt .sgl ,
1147
+ RNBD_INLINE_SG_CNT );
1148
+ if (err ) {
1149
+ rnbd_clt_err_rl (dev , "sg_alloc_table_chained ret=%d\n" , err );
1150
+ rnbd_clt_dev_kick_mq_queue (dev , hctx , 10 /*ms*/ );
1151
+ rnbd_put_permit (dev -> sess , iu -> permit );
1152
+ return BLK_STS_RESOURCE ;
1153
+ }
1154
+
1136
1155
blk_mq_start_request (rq );
1137
1156
err = rnbd_client_xfer_request (dev , rq , iu );
1138
1157
if (likely (err == 0 ))
1139
1158
return BLK_STS_OK ;
1140
1159
if (unlikely (err == - EAGAIN || err == - ENOMEM )) {
1141
1160
rnbd_clt_dev_kick_mq_queue (dev , hctx , 10 /*ms*/ );
1142
- rnbd_put_permit (dev -> sess , iu -> permit );
1143
- return BLK_STS_RESOURCE ;
1161
+ ret = BLK_STS_RESOURCE ;
1144
1162
}
1145
-
1163
+ sg_free_table_chained ( & iu -> sgt , RNBD_INLINE_SG_CNT );
1146
1164
rnbd_put_permit (dev -> sess , iu -> permit );
1147
- return BLK_STS_IOERR ;
1148
- }
1149
-
1150
- static int rnbd_init_request (struct blk_mq_tag_set * set , struct request * rq ,
1151
- unsigned int hctx_idx , unsigned int numa_node )
1152
- {
1153
- struct rnbd_iu * iu = blk_mq_rq_to_pdu (rq );
1154
-
1155
- sg_init_table (iu -> sglist , BMAX_SEGMENTS );
1156
- return 0 ;
1165
+ return ret ;
1157
1166
}
1158
1167
1159
1168
static struct blk_mq_ops rnbd_mq_ops = {
1160
1169
.queue_rq = rnbd_queue_rq ,
1161
- .init_request = rnbd_init_request ,
1162
1170
.complete = rnbd_softirq_done_fn ,
1163
1171
};
1164
1172
@@ -1172,7 +1180,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
1172
1180
tag_set -> numa_node = NUMA_NO_NODE ;
1173
1181
tag_set -> flags = BLK_MQ_F_SHOULD_MERGE |
1174
1182
BLK_MQ_F_TAG_QUEUE_SHARED ;
1175
- tag_set -> cmd_size = sizeof (struct rnbd_iu );
1183
+ tag_set -> cmd_size = sizeof (struct rnbd_iu ) + RNBD_RDMA_SGL_SIZE ;
1176
1184
tag_set -> nr_hw_queues = num_online_cpus ();
1177
1185
1178
1186
return blk_mq_alloc_tag_set (tag_set );
0 commit comments