Skip to content

Commit

Permalink
throttle-groups: do not use qemu_co_enter_next
Browse files Browse the repository at this point in the history
Prepare for removing this function; always restart throttled requests
from coroutine context.  This will matter when restarting throttled
requests will have to acquire a CoMutex.

Reviewed-by: Alberto Garcia <berto@igalia.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20170605123908.18777-9-pbonzini@redhat.com>
Signed-off-by: Fam Zheng <famz@redhat.com>
  • Loading branch information
bonzini authored and Fam Zheng committed Jun 15, 2017
1 parent 7258ed9 commit 3b170dc
Showing 1 changed file with 37 additions and 5 deletions.
42 changes: 37 additions & 5 deletions block/throttle-groups.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,6 +260,20 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write)
return must_wait;
}

/* Start the next pending I/O request for a BlockBackend. Return whether
* any request was actually pending.
*
* @blk: the current BlockBackend
* @is_write: the type of operation (read/write)
*/
static bool coroutine_fn throttle_group_co_restart_queue(BlockBackend *blk,
bool is_write)
{
BlockBackendPublic *blkp = blk_get_public(blk);

return qemu_co_queue_next(&blkp->throttled_reqs[is_write]);
}

/* Look for the next pending I/O request and schedule it.
*
* This assumes that tg->lock is held.
Expand Down Expand Up @@ -287,7 +301,7 @@ static void schedule_next_request(BlockBackend *blk, bool is_write)
if (!must_wait) {
/* Give preference to requests from the current blk */
if (qemu_in_coroutine() &&
qemu_co_queue_next(&blkp->throttled_reqs[is_write])) {
throttle_group_co_restart_queue(blk, is_write)) {
token = blk;
} else {
ThrottleTimers *tt = &blk_get_public(token)->throttle_timers;
Expand Down Expand Up @@ -340,15 +354,21 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk,
qemu_mutex_unlock(&tg->lock);
}

static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
typedef struct {
BlockBackend *blk;
bool is_write;
} RestartData;

static void coroutine_fn throttle_group_restart_queue_entry(void *opaque)
{
RestartData *data = opaque;
BlockBackend *blk = data->blk;
bool is_write = data->is_write;
BlockBackendPublic *blkp = blk_get_public(blk);
ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts);
bool empty_queue;

aio_context_acquire(blk_get_aio_context(blk));
empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]);
aio_context_release(blk_get_aio_context(blk));
empty_queue = !throttle_group_co_restart_queue(blk, is_write);

/* If the request queue was empty then we have to take care of
* scheduling the next one */
Expand All @@ -359,6 +379,18 @@ static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
}
}

static void throttle_group_restart_queue(BlockBackend *blk, bool is_write)
{
Coroutine *co;
RestartData rd = {
.blk = blk,
.is_write = is_write
};

co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd);
aio_co_enter(blk_get_aio_context(blk), co);
}

void throttle_group_restart_blk(BlockBackend *blk)
{
BlockBackendPublic *blkp = blk_get_public(blk);
Expand Down

0 comments on commit 3b170dc

Please sign in to comment.