Skip to content

Commit 3228f48

Browse files
Christoph Hellwigaxboe
Christoph Hellwig
authored andcommitted
blk-mq: fix for flush deadlock
The flush state machine takes in a struct request, which then is submitted multiple times to the underling driver. The old block code requeses the same request for each of those, so it does not have an issue with tapping into the request pool. The new one on the other hand allocates a new request for each of the actualy steps of the flush sequence. If have already allocated all of the tags for IO, we will fail allocating the flush request. Set aside a reserved request just for flushes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent 280d45f commit 3228f48

File tree

4 files changed

+15
-5
lines changed

4 files changed

+15
-5
lines changed

block/blk-core.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -1102,7 +1102,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
11021102
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
11031103
{
11041104
if (q->mq_ops)
1105-
return blk_mq_alloc_request(q, rw, gfp_mask);
1105+
return blk_mq_alloc_request(q, rw, gfp_mask, false);
11061106
else
11071107
return blk_old_get_request(q, rw, gfp_mask);
11081108
}

block/blk-flush.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,7 @@ static void mq_flush_work(struct work_struct *work)
286286

287287
/* We don't need set REQ_FLUSH_SEQ, it's for consistency */
288288
rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
289-
__GFP_WAIT|GFP_ATOMIC);
289+
__GFP_WAIT|GFP_ATOMIC, true);
290290
rq->cmd_type = REQ_TYPE_FS;
291291
rq->end_io = flush_end_io;
292292

block/blk-mq.c

+12-2
Original file line numberDiff line numberDiff line change
@@ -210,14 +210,15 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
210210
return rq;
211211
}
212212

213-
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
213+
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
214+
gfp_t gfp, bool reserved)
214215
{
215216
struct request *rq;
216217

217218
if (blk_mq_queue_enter(q))
218219
return NULL;
219220

220-
rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
221+
rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
221222
blk_mq_put_ctx(rq->mq_ctx);
222223
return rq;
223224
}
@@ -1327,6 +1328,15 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
13271328
reg->queue_depth = BLK_MQ_MAX_DEPTH;
13281329
}
13291330

1331+
/*
1332+
* Set aside a tag for flush requests. It will only be used while
1333+
* another flush request is in progress but outside the driver.
1334+
*
1335+
* TODO: only allocate if flushes are supported
1336+
*/
1337+
reg->queue_depth++;
1338+
reg->reserved_tags++;
1339+
13301340
if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
13311341
return ERR_PTR(-EINVAL);
13321342

include/linux/blk-mq.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *, bool);
124124
void blk_mq_run_queues(struct request_queue *q, bool async);
125125
void blk_mq_free_request(struct request *rq);
126126
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
127-
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
127+
struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
128128
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
129129
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
130130

0 commit comments

Comments
 (0)