Skip to content

Commit

Permalink
Merge tag 'io_uring-6.13-20250103' of git://git.kernel.dk/linux
Browse files Browse the repository at this point in the history
Pull io_uring fixes from Jens Axboe:

 - Fix an issue with the read multishot support and posting of CQEs from
   io-wq context

 - Fix a regression introduced in this cycle, where making the timeout
   lock a raw one uncovered another locking dependency. As a result,
   move the timeout flushing outside of the timeout lock, punting them
   to a local list first

 - Fix use of an uninitialized variable in io_async_msghdr. Doesn't
   really matter functionally, but silences a valid KMSAN complaint that
   it's not always initialized

 - Fix use of incrementally provided buffers for read on non-pollable
   files, where the buffer always gets committed upfront. Unfortunately
   the buffer address isn't resolved first, so the read ends up using
   the updated rather than the current value

* tag 'io_uring-6.13-20250103' of git://git.kernel.dk/linux:
  io_uring/kbuf: use pre-committed buffer address for non-pollable file
  io_uring/net: always initialize kmsg->msg.msg_inq upfront
  io_uring/timeout: flush timeouts outside of the timeout lock
  io_uring/rw: fix downgraded mshot read
  • Loading branch information
torvalds committed Jan 3, 2025
2 parents aba74e6 + ed123c9 commit a984e23
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 15 deletions.
4 changes: 3 additions & 1 deletion io_uring/kbuf.c
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
struct io_uring_buf_ring *br = bl->buf_ring;
__u16 tail, head = bl->head;
struct io_uring_buf *buf;
void __user *ret;

tail = smp_load_acquire(&br->tail);
if (unlikely(tail == head))
Expand All @@ -153,6 +154,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT;
req->buf_list = bl;
req->buf_index = buf->bid;
ret = u64_to_user_ptr(buf->addr);

if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) {
/*
Expand All @@ -168,7 +170,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len,
io_kbuf_commit(req, bl, *len, 1);
req->buf_list = NULL;
}
return u64_to_user_ptr(buf->addr);
return ret;
}

void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
Expand Down
1 change: 1 addition & 0 deletions io_uring/net.c
Original file line number Diff line number Diff line change
Expand Up @@ -754,6 +754,7 @@ static int io_recvmsg_prep_setup(struct io_kiocb *req)
if (req->opcode == IORING_OP_RECV) {
kmsg->msg.msg_name = NULL;
kmsg->msg.msg_namelen = 0;
kmsg->msg.msg_inq = 0;
kmsg->msg.msg_control = NULL;
kmsg->msg.msg_get_inq = 1;
kmsg->msg.msg_controllen = 0;
Expand Down
2 changes: 2 additions & 0 deletions io_uring/rw.c
Original file line number Diff line number Diff line change
Expand Up @@ -983,6 +983,8 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
io_kbuf_recycle(req, issue_flags);
if (ret < 0)
req_set_fail(req);
} else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
cflags = io_put_kbuf(req, ret, issue_flags);
} else {
/*
* Any successful return value will keep the multishot read
Expand Down
45 changes: 31 additions & 14 deletions io_uring/timeout.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,29 +85,45 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts)
io_req_task_complete(req, ts);
}

static bool io_kill_timeout(struct io_kiocb *req, int status)
static __cold bool io_flush_killed_timeouts(struct list_head *list, int err)
{
if (list_empty(list))
return false;

while (!list_empty(list)) {
struct io_timeout *timeout;
struct io_kiocb *req;

timeout = list_first_entry(list, struct io_timeout, list);
list_del_init(&timeout->list);
req = cmd_to_io_kiocb(timeout);
if (err)
req_set_fail(req);
io_req_queue_tw_complete(req, err);
}

return true;
}

static void io_kill_timeout(struct io_kiocb *req, struct list_head *list)
__must_hold(&req->ctx->timeout_lock)
{
struct io_timeout_data *io = req->async_data;

if (hrtimer_try_to_cancel(&io->timer) != -1) {
struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);

if (status)
req_set_fail(req);
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&timeout->list);
io_req_queue_tw_complete(req, status);
return true;
list_move_tail(&timeout->list, list);
}
return false;
}

__cold void io_flush_timeouts(struct io_ring_ctx *ctx)
{
u32 seq;
struct io_timeout *timeout, *tmp;
LIST_HEAD(list);
u32 seq;

raw_spin_lock_irq(&ctx->timeout_lock);
seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
Expand All @@ -131,10 +147,11 @@ __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
if (events_got < events_needed)
break;

io_kill_timeout(req, 0);
io_kill_timeout(req, &list);
}
ctx->cq_last_tm_flush = seq;
raw_spin_unlock_irq(&ctx->timeout_lock);
io_flush_killed_timeouts(&list, 0);
}

static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts)
Expand Down Expand Up @@ -661,7 +678,7 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx
bool cancel_all)
{
struct io_timeout *timeout, *tmp;
int canceled = 0;
LIST_HEAD(list);

/*
* completion_lock is needed for io_match_task(). Take it before
Expand All @@ -672,11 +689,11 @@ __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx
list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
struct io_kiocb *req = cmd_to_io_kiocb(timeout);

if (io_match_task(req, tctx, cancel_all) &&
io_kill_timeout(req, -ECANCELED))
canceled++;
if (io_match_task(req, tctx, cancel_all))
io_kill_timeout(req, &list);
}
raw_spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock);
return canceled != 0;

return io_flush_killed_timeouts(&list, -ECANCELED);
}

0 comments on commit a984e23

Please sign in to comment.