Skip to content

Commit

Permalink
Merge tag 'io_uring-6.13-20241220' of git://git.kernel.dk/linux
Browse files Browse the repository at this point in the history
Pull io_uring fixes from Jens Axboe:

 - Fix for a file ref leak for registered ring fds

 - Turn the ->timeout_lock into a raw spinlock, as it nests under the
   io-wq lock which is a raw spinlock as it's called from the scheduler
   side

 - Limit ring resizing to DEFER_TASKRUN for now. We will broaden this in
   the future, but for now, ensure that it's only feasible on rings with
   a single user

 - Add sanity check for io-wq enqueuing

* tag 'io_uring-6.13-20241220' of git://git.kernel.dk/linux:
  io_uring: check if iowq is killed before queuing
  io_uring/register: limit ring resizing to DEFER_TASKRUN
  io_uring: Fix registered ring file refcount leak
  io_uring: make ctx->timeout_lock a raw spinlock
  • Loading branch information
torvalds committed Dec 20, 2024
2 parents e9b8ffa + dbd2ca9 commit 7c05bd9
Show file tree
Hide file tree
Showing 5 changed files with 36 additions and 30 deletions.
4 changes: 1 addition & 3 deletions include/linux/io_uring.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,8 @@ bool io_is_uring_fops(struct file *file);

static inline void io_uring_files_cancel(void)
{
if (current->io_uring) {
io_uring_unreg_ringfd();
if (current->io_uring)
__io_uring_cancel(false);
}
}
static inline void io_uring_task_cancel(void)
{
Expand Down
2 changes: 1 addition & 1 deletion include/linux/io_uring_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ struct io_ring_ctx {

/* timeouts */
struct {
spinlock_t timeout_lock;
raw_spinlock_t timeout_lock;
struct list_head timeout_list;
struct list_head ltimeout_list;
unsigned cq_last_tm_flush;
Expand Down
17 changes: 11 additions & 6 deletions io_uring/io_uring.c
Original file line number Diff line number Diff line change
Expand Up @@ -215,9 +215,9 @@ bool io_match_task_safe(struct io_kiocb *head, struct io_uring_task *tctx,
struct io_ring_ctx *ctx = head->ctx;

/* protect against races with linked timeouts */
spin_lock_irq(&ctx->timeout_lock);
raw_spin_lock_irq(&ctx->timeout_lock);
matched = io_match_linked(head);
spin_unlock_irq(&ctx->timeout_lock);
raw_spin_unlock_irq(&ctx->timeout_lock);
} else {
matched = io_match_linked(head);
}
Expand Down Expand Up @@ -333,7 +333,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
init_waitqueue_head(&ctx->cq_wait);
init_waitqueue_head(&ctx->poll_wq);
spin_lock_init(&ctx->completion_lock);
spin_lock_init(&ctx->timeout_lock);
raw_spin_lock_init(&ctx->timeout_lock);
INIT_WQ_LIST(&ctx->iopoll_list);
INIT_LIST_HEAD(&ctx->io_buffers_comp);
INIT_LIST_HEAD(&ctx->defer_list);
Expand Down Expand Up @@ -498,10 +498,10 @@ static void io_prep_async_link(struct io_kiocb *req)
if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;

spin_lock_irq(&ctx->timeout_lock);
raw_spin_lock_irq(&ctx->timeout_lock);
io_for_each_link(cur, req)
io_prep_async_work(cur);
spin_unlock_irq(&ctx->timeout_lock);
raw_spin_unlock_irq(&ctx->timeout_lock);
} else {
io_for_each_link(cur, req)
io_prep_async_work(cur);
Expand All @@ -514,7 +514,11 @@ static void io_queue_iowq(struct io_kiocb *req)
struct io_uring_task *tctx = req->tctx;

BUG_ON(!tctx);
BUG_ON(!tctx->io_wq);

if ((current->flags & PF_KTHREAD) || !tctx->io_wq) {
io_req_task_queue_fail(req, -ECANCELED);
return;
}

/* init ->work of the whole link before punting */
io_prep_async_link(req);
Expand Down Expand Up @@ -3214,6 +3218,7 @@ __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)

void __io_uring_cancel(bool cancel_all)
{
io_uring_unreg_ringfd();
io_uring_cancel_generic(cancel_all, NULL);
}

Expand Down
3 changes: 3 additions & 0 deletions io_uring/register.c
Original file line number Diff line number Diff line change
Expand Up @@ -414,6 +414,9 @@ static int io_register_resize_rings(struct io_ring_ctx *ctx, void __user *arg)
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER &&
current != ctx->submitter_task)
return -EEXIST;
/* limited to DEFER_TASKRUN for now */
if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
return -EINVAL;
if (copy_from_user(&p, arg, sizeof(p)))
return -EFAULT;
if (p.flags & ~RESIZE_FLAGS)
Expand Down
40 changes: 20 additions & 20 deletions io_uring/timeout.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,10 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
if (!io_timeout_finish(timeout, data)) {
if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) {
/* re-arm timer */
spin_lock_irq(&ctx->timeout_lock);
raw_spin_lock_irq(&ctx->timeout_lock);
list_add(&timeout->list, ctx->timeout_list.prev);
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
spin_unlock_irq(&ctx->timeout_lock);
raw_spin_unlock_irq(&ctx->timeout_lock);
return;
}
}
Expand Down Expand Up @@ -109,7 +109,7 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
u32 seq;
struct io_timeout *timeout, *tmp;

spin_lock_irq(&ctx->timeout_lock);
raw_spin_lock_irq(&ctx->timeout_lock);
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);

list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
Expand All @@ -134,7 +134,7 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
io_kill_timeout(req, 0);
}
ctx->cq_last_tm_flush = seq;
spin_unlock_irq(&ctx->timeout_lock);
raw_spin_unlock_irq(&ctx->timeout_lock);
}

static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts)
Expand Down Expand Up @@ -200,9 +200,9 @@ void io_disarm_next(struct io_kiocb *req)
} else if (req->flags & REQ_F_LINK_TIMEOUT) {
struct io_ring_ctx *ctx = req->ctx;

spin_lock_irq(&ctx->timeout_lock);
raw_spin_lock_irq(&ctx->timeout_lock);
link = io_disarm_linked_timeout(req);
spin_unlock_irq(&ctx->timeout_lock);
raw_spin_unlock_irq(&ctx->timeout_lock);
if (link)
io_req_queue_tw_complete(link, -ECANCELED);
}
Expand Down Expand Up @@ -238,11 +238,11 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;

spin_lock_irqsave(&ctx->timeout_lock, flags);
raw_spin_lock_irqsave(&ctx->timeout_lock, flags);
list_del_init(&timeout->list);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
spin_unlock_irqrestore(&ctx->timeout_lock, flags);
raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags);

if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
req_set_fail(req);
Expand Down Expand Up @@ -285,9 +285,9 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
{
struct io_kiocb *req;

spin_lock_irq(&ctx->timeout_lock);
raw_spin_lock_irq(&ctx->timeout_lock);
req = io_timeout_extract(ctx, cd);
spin_unlock_irq(&ctx->timeout_lock);
raw_spin_unlock_irq(&ctx->timeout_lock);

if (IS_ERR(req))
return PTR_ERR(req);
Expand Down Expand Up @@ -330,7 +330,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
struct io_ring_ctx *ctx = req->ctx;
unsigned long flags;

spin_lock_irqsave(&ctx->timeout_lock, flags);
raw_spin_lock_irqsave(&ctx->timeout_lock, flags);
prev = timeout->head;
timeout->head = NULL;

Expand All @@ -345,7 +345,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
}
list_del(&timeout->list);
timeout->prev = prev;
spin_unlock_irqrestore(&ctx->timeout_lock, flags);
raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags);

req->io_task_work.func = io_req_task_link_timeout;
io_req_task_work_add(req);
Expand Down Expand Up @@ -472,12 +472,12 @@ int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
} else {
enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);

spin_lock_irq(&ctx->timeout_lock);
raw_spin_lock_irq(&ctx->timeout_lock);
if (tr->ltimeout)
ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode);
else
ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode);
spin_unlock_irq(&ctx->timeout_lock);
raw_spin_unlock_irq(&ctx->timeout_lock);
}

if (ret < 0)
Expand Down Expand Up @@ -572,7 +572,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
struct list_head *entry;
u32 tail, off = timeout->off;

spin_lock_irq(&ctx->timeout_lock);
raw_spin_lock_irq(&ctx->timeout_lock);

/*
* sqe->off holds how many events that need to occur for this
Expand Down Expand Up @@ -611,7 +611,7 @@ int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
list_add(&timeout->list, entry);
data->timer.function = io_timeout_fn;
hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
spin_unlock_irq(&ctx->timeout_lock);
raw_spin_unlock_irq(&ctx->timeout_lock);
return IOU_ISSUE_SKIP_COMPLETE;
}

Expand All @@ -620,7 +620,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
struct io_ring_ctx *ctx = req->ctx;

spin_lock_irq(&ctx->timeout_lock);
raw_spin_lock_irq(&ctx->timeout_lock);
/*
* If the back reference is NULL, then our linked request finished
* before we got a chance to setup the timer
Expand All @@ -633,7 +633,7 @@ void io_queue_linked_timeout(struct io_kiocb *req)
data->mode);
list_add_tail(&timeout->list, &ctx->ltimeout_list);
}
spin_unlock_irq(&ctx->timeout_lock);
raw_spin_unlock_irq(&ctx->timeout_lock);
/* drop submission reference */
io_put_req(req);
}
Expand Down Expand Up @@ -668,15 +668,15 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx
* timeout_lockfirst to keep locking ordering.
*/
spin_lock(&ctx->completion_lock);
spin_lock_irq(&ctx->timeout_lock);
raw_spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);

if (io_match_task(req, tctx, cancel_all) &&
io_kill_timeout(req, -ECANCELED))
canceled++;
}
spin_unlock_irq(&ctx->timeout_lock);
raw_spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock);
return canceled != 0;
}

0 comments on commit 7c05bd9

Please sign in to comment.