[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH 05/10] blockjob: separate monitor and blockjob A
From: |
John Snow |
Subject: |
Re: [Qemu-devel] [PATCH 05/10] blockjob: separate monitor and blockjob APIs |
Date: |
Fri, 7 Apr 2017 20:03:36 -0400 |
User-agent: |
Mozilla/5.0 (X11; Linux x86_64; rv:45.0) Gecko/20100101 Thunderbird/45.8.0 |
On 03/23/2017 01:39 PM, Paolo Bonzini wrote:
> We already have different locking policies for APIs called by the monitor
> and the block job. Monitor APIs need consistency across block_job_get
> and the actual operation (e.g. block_job_set_speed), so currently there
> are explicit aio_context_acquire/release calls in blockdev.c.
>
> When a block job needs to do something instead it doesn't care about locking,
> because the whole coroutine runs under the AioContext lock. When moving
> away from the AioContext lock, the monitor will have to call new
> block_job_lock/unlock APIs, while blockjob APIs will take care of this
> for the users.
>
> In preparation for that, keep all the blockjob APIs together in the
> blockjob.c file.
>
> Signed-off-by: Paolo Bonzini <address@hidden>
> ---
> blockjob.c | 206
> +++++++++++++++++++++++++++++++------------------------------
Did you guys order... like, fifty million pizzas?
Hooray for
$ diff -u <(sed -n 's/^-//p' patch) <(sed -n 's/^\+//p' patch)
Looks clean, though it may be useful to do a few more things;
- Demarcate what you think is the monitor API in this file
- Organize blockjob.h to match to serve as a useful reference.
> 1 file changed, 105 insertions(+), 101 deletions(-)
>
> diff --git a/blockjob.c b/blockjob.c
> index caca718..c5f1d19 100644
> --- a/blockjob.c
> +++ b/blockjob.c
> @@ -334,11 +334,6 @@ void block_job_start(BlockJob *job)
> qemu_coroutine_enter(job->co);
> }
>
> -void block_job_fail(BlockJob *job)
> -{
> - block_job_unref(job);
> -}
> -
> static void block_job_completed_single(BlockJob *job)
> {
> if (!job->ret) {
> @@ -440,21 +435,6 @@ static void block_job_completed_txn_success(BlockJob
> *job)
> }
> }
>
> -void block_job_completed(BlockJob *job, int ret)
> -{
> - assert(blk_bs(job->blk)->job == job);
> - assert(!job->completed);
> - job->completed = true;
> - job->ret = ret;
> - if (!job->txn) {
> - block_job_completed_single(job);
> - } else if (ret < 0 || block_job_is_cancelled(job)) {
> - block_job_completed_txn_abort(job);
> - } else {
> - block_job_completed_txn_success(job);
> - }
> -}
> -
> void block_job_set_speed(BlockJob *job, int64_t speed, Error **errp)
> {
> Error *local_err = NULL;
> @@ -492,44 +472,11 @@ void block_job_user_pause(BlockJob *job)
> block_job_pause(job);
> }
>
> -static bool block_job_should_pause(BlockJob *job)
> -{
> - return job->pause_count > 0;
> -}
> -
> bool block_job_user_paused(BlockJob *job)
> {
> return job->user_paused;
> }
>
> -void coroutine_fn block_job_pause_point(BlockJob *job)
> -{
> - assert(job && block_job_started(job));
> -
> - if (!block_job_should_pause(job)) {
> - return;
> - }
> - if (block_job_is_cancelled(job)) {
> - return;
> - }
> -
> - if (job->driver->pause) {
> - job->driver->pause(job);
> - }
> -
> - if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
> - job->paused = true;
> - job->busy = false;
> - qemu_coroutine_yield(); /* wait for block_job_resume() */
> - job->busy = true;
> - job->paused = false;
> - }
> -
> - if (job->driver->resume) {
> - job->driver->resume(job);
> - }
> -}
> -
> void block_job_user_resume(BlockJob *job)
> {
> if (job && job->user_paused && job->pause_count > 0) {
> @@ -538,13 +485,6 @@ void block_job_user_resume(BlockJob *job)
> }
> }
>
> -void block_job_enter(BlockJob *job)
> -{
> - if (job->co && !job->busy) {
> - qemu_coroutine_enter(job->co);
> - }
> -}
> -
> void block_job_cancel(BlockJob *job)
> {
> if (block_job_started(job)) {
> @@ -556,11 +496,6 @@ void block_job_cancel(BlockJob *job)
> }
> }
>
> -bool block_job_is_cancelled(BlockJob *job)
> -{
> - return job->cancelled;
> -}
> -
> void block_job_iostatus_reset(BlockJob *job)
> {
> job->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
> @@ -628,42 +563,6 @@ int block_job_complete_sync(BlockJob *job, Error **errp)
> return block_job_finish_sync(job, &block_job_complete, errp);
> }
>
> -void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
> -{
> - assert(job->busy);
> -
> - /* Check cancellation *before* setting busy = false, too! */
> - if (block_job_is_cancelled(job)) {
> - return;
> - }
> -
> - job->busy = false;
> - if (!block_job_should_pause(job)) {
> - co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns);
> - }
> - job->busy = true;
> -
> - block_job_pause_point(job);
> -}
> -
> -void block_job_yield(BlockJob *job)
> -{
> - assert(job->busy);
> -
> - /* Check cancellation *before* setting busy = false, too! */
> - if (block_job_is_cancelled(job)) {
> - return;
> - }
> -
> - job->busy = false;
> - if (!block_job_should_pause(job)) {
> - qemu_coroutine_yield();
> - }
> - job->busy = true;
> -
> - block_job_pause_point(job);
> -}
> -
> BlockJobInfo *block_job_query(BlockJob *job, Error **errp)
> {
> BlockJobInfo *info;
> @@ -723,6 +622,10 @@ static void block_job_event_completed(BlockJob *job,
> const char *msg)
> &error_abort);
> }
>
> +/*
> + * API for block job drivers and the block layer.
> + */
> +
> void block_job_pause_all(void)
> {
> BlockJob *job = NULL;
> @@ -735,6 +638,59 @@ void block_job_pause_all(void)
> }
> }
>
> +void block_job_fail(BlockJob *job)
> +{
> + block_job_unref(job);
> +}
> +
> +void block_job_completed(BlockJob *job, int ret)
> +{
> + assert(blk_bs(job->blk)->job == job);
> + assert(!job->completed);
> + job->completed = true;
> + job->ret = ret;
> + if (!job->txn) {
> + block_job_completed_single(job);
> + } else if (ret < 0 || block_job_is_cancelled(job)) {
> + block_job_completed_txn_abort(job);
> + } else {
> + block_job_completed_txn_success(job);
> + }
> +}
> +
> +static bool block_job_should_pause(BlockJob *job)
> +{
> + return job->pause_count > 0;
> +}
> +
> +void coroutine_fn block_job_pause_point(BlockJob *job)
> +{
> + assert(job && block_job_started(job));
> +
> + if (!block_job_should_pause(job)) {
> + return;
> + }
> + if (block_job_is_cancelled(job)) {
> + return;
> + }
> +
> + if (job->driver->pause) {
> + job->driver->pause(job);
> + }
> +
> + if (block_job_should_pause(job) && !block_job_is_cancelled(job)) {
> + job->paused = true;
> + job->busy = false;
> + qemu_coroutine_yield(); /* wait for block_job_resume() */
> + job->busy = true;
> + job->paused = false;
> + }
> +
> + if (job->driver->resume) {
> + job->driver->resume(job);
> + }
> +}
> +
> void block_job_resume_all(void)
> {
> BlockJob *job = NULL;
> @@ -747,6 +703,54 @@ void block_job_resume_all(void)
> }
> }
>
> +void block_job_enter(BlockJob *job)
> +{
> + if (job->co && !job->busy) {
> + qemu_coroutine_enter(job->co);
> + }
> +}
> +
> +bool block_job_is_cancelled(BlockJob *job)
> +{
> + return job->cancelled;
> +}
> +
> +void block_job_sleep_ns(BlockJob *job, QEMUClockType type, int64_t ns)
> +{
> + assert(job->busy);
> +
> + /* Check cancellation *before* setting busy = false, too! */
> + if (block_job_is_cancelled(job)) {
> + return;
> + }
> +
> + job->busy = false;
> + if (!block_job_should_pause(job)) {
> + co_aio_sleep_ns(blk_get_aio_context(job->blk), type, ns);
> + }
> + job->busy = true;
> +
> + block_job_pause_point(job);
> +}
> +
> +void block_job_yield(BlockJob *job)
> +{
> + assert(job->busy);
> +
> + /* Check cancellation *before* setting busy = false, too! */
> + if (block_job_is_cancelled(job)) {
> + return;
> + }
> +
> + job->busy = false;
> + if (!block_job_should_pause(job)) {
> + qemu_coroutine_yield();
> + }
> + job->busy = true;
> +
> + block_job_pause_point(job);
> +}
> +
> void block_job_event_ready(BlockJob *job)
> {
> job->ready = true;
>
--
—js
- Re: [Qemu-devel] [PATCH 05/10] blockjob: separate monitor and blockjob APIs,
John Snow <=