Skip to content

Commit 364b618

Browse files
Ming Leiaxboe
authored andcommitted
blk-mq: clearing flush request reference in tags->rqs[]
Before we free request queue, clearing flush request reference in tags->rqs[], so that potential UAF can be avoided. Based on one patch written by David Jeffery. Tested-by: John Garry <john.garry@huawei.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Reviewed-by: David Jeffery <djeffery@redhat.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20210511152236.763464-5-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
1 parent bd63141 commit 364b618

File tree

1 file changed

+34
-1
lines changed

1 file changed

+34
-1
lines changed

block/blk-mq.c

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2643,16 +2643,49 @@ static void blk_mq_remove_cpuhp(struct blk_mq_hw_ctx *hctx)
26432643
&hctx->cpuhp_dead);
26442644
}
26452645

2646+
/*
2647+
* Before freeing hw queue, clearing the flush request reference in
2648+
* tags->rqs[] for avoiding potential UAF.
2649+
*/
2650+
static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
2651+
unsigned int queue_depth, struct request *flush_rq)
2652+
{
2653+
int i;
2654+
unsigned long flags;
2655+
2656+
/* The hw queue may not be mapped yet */
2657+
if (!tags)
2658+
return;
2659+
2660+
WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
2661+
2662+
for (i = 0; i < queue_depth; i++)
2663+
cmpxchg(&tags->rqs[i], flush_rq, NULL);
2664+
2665+
/*
2666+
* Wait until all pending iteration is done.
2667+
*
2668+
* Request reference is cleared and it is guaranteed to be observed
2669+
* after the ->lock is released.
2670+
*/
2671+
spin_lock_irqsave(&tags->lock, flags);
2672+
spin_unlock_irqrestore(&tags->lock, flags);
2673+
}
2674+
26462675
/* hctx->ctxs will be freed in queue's release handler */
26472676
static void blk_mq_exit_hctx(struct request_queue *q,
26482677
struct blk_mq_tag_set *set,
26492678
struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
26502679
{
2680+
struct request *flush_rq = hctx->fq->flush_rq;
2681+
26512682
if (blk_mq_hw_queue_mapped(hctx))
26522683
blk_mq_tag_idle(hctx);
26532684

2685+
blk_mq_clear_flush_rq_mapping(set->tags[hctx_idx],
2686+
set->queue_depth, flush_rq);
26542687
if (set->ops->exit_request)
2655-
set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx);
2688+
set->ops->exit_request(set, flush_rq, hctx_idx);
26562689

26572690
if (set->ops->exit_hctx)
26582691
set->ops->exit_hctx(hctx, hctx_idx);

0 commit comments

Comments
 (0)