Skip to content

Commit

Permalink
blk-mq: prepare for implementing hctx table via xarray
Browse files Browse the repository at this point in the history
It is inevitable to cause use-after-free on q->queue_hw_ctx between
queue_for_each_hw_ctx() and blk_mq_update_nr_hw_queues(). And converting
to xarray can fix the uaf, meantime code gets cleaner.

Prepare for converting q->queue_hctx_ctx into xarray, one thing is that
xa_for_each() can only accept 'unsigned long' as index, so changes type
of hctx index of queue_for_each_hw_ctx() into 'unsigned long'.

Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20220308073219.91173-6-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
  • Loading branch information
Ming Lei authored and axboe committed Mar 9, 2022
1 parent de0328d commit 4f48120
Show file tree
Hide file tree
Showing 6 changed files with 36 additions and 29 deletions.
6 changes: 3 additions & 3 deletions block/blk-mq-debugfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -707,7 +707,7 @@ static void debugfs_create_files(struct dentry *parent, void *data,
void blk_mq_debugfs_register(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);

Expand Down Expand Up @@ -780,7 +780,7 @@ void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i)
blk_mq_debugfs_register_hctx(q, hctx);
Expand All @@ -789,7 +789,7 @@ void blk_mq_debugfs_register_hctxs(struct request_queue *q)
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i)
blk_mq_debugfs_unregister_hctx(hctx);
Expand Down
9 changes: 5 additions & 4 deletions block/blk-mq-sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,7 @@ static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->sched_tags) {
Expand Down Expand Up @@ -550,9 +550,10 @@ static int blk_mq_init_sched_shared_tags(struct request_queue *queue)

int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
{
unsigned int i, flags = q->tag_set->flags;
unsigned int flags = q->tag_set->flags;
struct blk_mq_hw_ctx *hctx;
struct elevator_queue *eq;
unsigned long i;
int ret;

if (!e) {
Expand Down Expand Up @@ -618,7 +619,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
void blk_mq_sched_free_rqs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

if (blk_mq_is_shared_tags(q->tag_set->flags)) {
blk_mq_free_rqs(q->tag_set, q->sched_shared_tags,
Expand All @@ -635,7 +636,7 @@ void blk_mq_sched_free_rqs(struct request_queue *q)
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
unsigned long i;
unsigned int flags = 0;

queue_for_each_hw_ctx(q, hctx, i) {
Expand Down
16 changes: 10 additions & 6 deletions block/blk-mq-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

lockdep_assert_held(&q->sysfs_dir_lock);

Expand Down Expand Up @@ -255,7 +255,8 @@ void blk_mq_sysfs_init(struct request_queue *q)
int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int ret, i;
unsigned long i, j;
int ret;

WARN_ON_ONCE(!q->kobj.parent);
lockdep_assert_held(&q->sysfs_dir_lock);
Expand All @@ -278,8 +279,10 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
return ret;

unreg:
while (--i >= 0)
blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
queue_for_each_hw_ctx(q, hctx, j) {
if (j < i)
blk_mq_unregister_hctx(hctx);
}

kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
kobject_del(q->mq_kobj);
Expand All @@ -290,7 +293,7 @@ int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
void blk_mq_sysfs_unregister(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
Expand All @@ -306,7 +309,8 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
int blk_mq_sysfs_register(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i, ret = 0;
unsigned long i;
int ret = 0;

mutex_lock(&q->sysfs_dir_lock);
if (!q->mq_sysfs_init_done)
Expand Down
2 changes: 1 addition & 1 deletion block/blk-mq-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -515,7 +515,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
bt_for_each(NULL, q, btags, fn, priv, false);
} else {
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i) {
struct blk_mq_tags *tags = hctx->tags;
Expand Down
30 changes: 16 additions & 14 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(blk_mq_unquiesce_queue);
void blk_mq_wake_waiters(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i)
if (blk_mq_hw_queue_mapped(hctx))
Expand Down Expand Up @@ -1442,7 +1442,7 @@ static void blk_mq_timeout_work(struct work_struct *work)
container_of(work, struct request_queue, timeout_work);
unsigned long next = 0;
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

/* A deadlock might occur if a request is stuck requiring a
* timeout at the same time a queue freeze is waiting
Expand Down Expand Up @@ -2143,7 +2143,7 @@ static struct blk_mq_hw_ctx *blk_mq_get_sq_hctx(struct request_queue *q)
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{
struct blk_mq_hw_ctx *hctx, *sq_hctx;
int i;
unsigned long i;

sq_hctx = NULL;
if (blk_mq_has_sqsched(q))
Expand Down Expand Up @@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
{
struct blk_mq_hw_ctx *hctx, *sq_hctx;
int i;
unsigned long i;

sq_hctx = NULL;
if (blk_mq_has_sqsched(q))
Expand Down Expand Up @@ -2209,7 +2209,7 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
bool blk_mq_queue_stopped(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i)
if (blk_mq_hctx_stopped(hctx))
Expand Down Expand Up @@ -2248,7 +2248,7 @@ EXPORT_SYMBOL(blk_mq_stop_hw_queue);
void blk_mq_stop_hw_queues(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i)
blk_mq_stop_hw_queue(hctx);
Expand All @@ -2266,7 +2266,7 @@ EXPORT_SYMBOL(blk_mq_start_hw_queue);
void blk_mq_start_hw_queues(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i)
blk_mq_start_hw_queue(hctx);
Expand All @@ -2286,7 +2286,7 @@ EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i)
blk_mq_start_stopped_hw_queue(hctx, async);
Expand Down Expand Up @@ -3446,7 +3446,7 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
struct blk_mq_tag_set *set, int nr_queue)
{
struct blk_mq_hw_ctx *hctx;
unsigned int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i) {
if (i == nr_queue)
Expand Down Expand Up @@ -3637,7 +3637,8 @@ static void __blk_mq_free_map_and_rqs(struct blk_mq_tag_set *set,

static void blk_mq_map_swqueue(struct request_queue *q)
{
unsigned int i, j, hctx_idx;
unsigned int j, hctx_idx;
unsigned long i;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
struct blk_mq_tag_set *set = q->tag_set;
Expand Down Expand Up @@ -3744,7 +3745,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
static void queue_set_hctx_shared(struct request_queue *q, bool shared)
{
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i) {
if (shared) {
Expand Down Expand Up @@ -3844,7 +3845,7 @@ static int blk_mq_alloc_ctxs(struct request_queue *q)
void blk_mq_release(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx, *next;
int i;
unsigned long i;

queue_for_each_hw_ctx(q, hctx, i)
WARN_ON_ONCE(hctx && list_empty(&hctx->hctx_list));
Expand Down Expand Up @@ -4362,7 +4363,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
{
struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
int i, ret;
int ret;
unsigned long i;

if (!set)
return -EINVAL;
Expand Down Expand Up @@ -4738,7 +4740,7 @@ void blk_mq_cancel_work_sync(struct request_queue *q)
{
if (queue_is_mq(q)) {
struct blk_mq_hw_ctx *hctx;
int i;
unsigned long i;

cancel_delayed_work_sync(&q->requeue_work);

Expand Down
2 changes: 1 addition & 1 deletion drivers/block/rnbd/rnbd-clt.c
Original file line number Diff line number Diff line change
Expand Up @@ -1343,7 +1343,7 @@ static inline void rnbd_init_hw_queue(struct rnbd_clt_dev *dev,

static void rnbd_init_mq_hw_queues(struct rnbd_clt_dev *dev)
{
int i;
unsigned long i;
struct blk_mq_hw_ctx *hctx;
struct rnbd_queue *q;

Expand Down

0 comments on commit 4f48120

Please sign in to comment.