@@ -1094,6 +1094,25 @@ static void ublk_complete_rq(struct kref *ref)
1094
1094
__ublk_complete_rq (req );
1095
1095
}
1096
1096
1097
+ static void ublk_do_fail_rq (struct request * req )
1098
+ {
1099
+ struct ublk_queue * ubq = req -> mq_hctx -> driver_data ;
1100
+
1101
+ if (ublk_nosrv_should_reissue_outstanding (ubq -> dev ))
1102
+ blk_mq_requeue_request (req , false);
1103
+ else
1104
+ __ublk_complete_rq (req );
1105
+ }
1106
+
1107
+ static void ublk_fail_rq_fn (struct kref * ref )
1108
+ {
1109
+ struct ublk_rq_data * data = container_of (ref , struct ublk_rq_data ,
1110
+ ref );
1111
+ struct request * req = blk_mq_rq_from_pdu (data );
1112
+
1113
+ ublk_do_fail_rq (req );
1114
+ }
1115
+
1097
1116
/*
1098
1117
* Since __ublk_rq_task_work always fails requests immediately during
1099
1118
* exiting, __ublk_fail_req() is only called from abort context during
@@ -1107,10 +1126,13 @@ static void __ublk_fail_req(struct ublk_queue *ubq, struct ublk_io *io,
1107
1126
{
1108
1127
WARN_ON_ONCE (io -> flags & UBLK_IO_FLAG_ACTIVE );
1109
1128
1110
- if (ublk_nosrv_should_reissue_outstanding (ubq -> dev ))
1111
- blk_mq_requeue_request (req , false);
1112
- else
1113
- ublk_put_req_ref (ubq , req );
1129
+ if (ublk_need_req_ref (ubq )) {
1130
+ struct ublk_rq_data * data = blk_mq_rq_to_pdu (req );
1131
+
1132
+ kref_put (& data -> ref , ublk_fail_rq_fn );
1133
+ } else {
1134
+ ublk_do_fail_rq (req );
1135
+ }
1114
1136
}
1115
1137
1116
1138
static void ubq_complete_io_cmd (struct ublk_io * io , int res ,
0 commit comments