[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC PATCH 12/26] cpus: push BQL lock to qemu_*_wait_io_eve
From: |
Pavel Dovgalyuk |
Subject: |
[Qemu-devel] [RFC PATCH 12/26] cpus: push BQL lock to qemu_*_wait_io_event |
Date: |
Tue, 31 Oct 2017 14:26:05 +0300 |
User-agent: |
StGit/0.17.1-dirty |
From: Alex Bennée <address@hidden>
We only really need to grab the lock for initial setup (so we don't
race with the thread-spawning thread). After that we can drop the lock
for the whole main loop and only grab it for waiting for IO events.
There is a slight wrinkle for the round-robin TCG thread as we also
expire timers which needs to be done under BQL as they are in the
main-loop.
This is stage one of reducing the lock impact as we can drop the
requirement of implicit BQL for async work and only grab the lock when
we need to sleep on the cpu->halt_cond.
Signed-off-by: Alex Bennée <address@hidden>
Tested-by: Pavel Dovgalyuk <address@hidden>
---
accel/kvm/kvm-all.c | 4 ----
cpus.c | 27 ++++++++++++++++++++-------
target/i386/hax-all.c | 3 +--
3 files changed, 21 insertions(+), 13 deletions(-)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index f290f48..8d1d2c4 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -1857,9 +1857,7 @@ int kvm_cpu_exec(CPUState *cpu)
return EXCP_HLT;
}
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
-
do {
MemTxAttrs attrs;
@@ -1989,8 +1987,6 @@ int kvm_cpu_exec(CPUState *cpu)
} while (ret == 0);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
-
if (ret < 0) {
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
vm_stop(RUN_STATE_INTERNAL_ERROR);
diff --git a/cpus.c b/cpus.c
index 2eec54f..efde5c1 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1127,6 +1127,8 @@ static bool qemu_tcg_should_sleep(CPUState *cpu)
static void qemu_tcg_wait_io_event(CPUState *cpu)
{
+ qemu_mutex_lock_iothread();
+
while (qemu_tcg_should_sleep(cpu)) {
stop_tcg_kick_timer();
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
@@ -1135,15 +1137,21 @@ static void qemu_tcg_wait_io_event(CPUState *cpu)
start_tcg_kick_timer();
qemu_wait_io_event_common(cpu);
+
+ qemu_mutex_unlock_iothread();
}
static void qemu_kvm_wait_io_event(CPUState *cpu)
{
+ qemu_mutex_lock_iothread();
+
while (cpu_thread_is_idle(cpu)) {
qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
qemu_wait_io_event_common(cpu);
+
+ qemu_mutex_unlock_iothread();
}
static void *qemu_kvm_cpu_thread_fn(void *arg)
@@ -1169,6 +1177,8 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
/* signal CPU creation */
cpu->created = true;
+ qemu_mutex_unlock_iothread();
+
qemu_cond_signal(&qemu_cpu_cond);
do {
@@ -1211,10 +1221,10 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
/* signal CPU creation */
cpu->created = true;
+ qemu_mutex_unlock_iothread();
qemu_cond_signal(&qemu_cpu_cond);
while (1) {
- qemu_mutex_unlock_iothread();
do {
int sig;
r = sigwait(&waitset, &sig);
@@ -1225,6 +1235,7 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
}
qemu_mutex_lock_iothread();
qemu_wait_io_event_common(cpu);
+ qemu_mutex_unlock_iothread();
}
return NULL;
@@ -1313,11 +1324,9 @@ static int tcg_cpu_exec(CPUState *cpu)
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
#ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti;
#endif
@@ -1377,6 +1386,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
qemu_wait_io_event_common(cpu);
}
}
+ qemu_mutex_unlock_iothread();
start_tcg_kick_timer();
@@ -1386,6 +1396,9 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
cpu->exit_request = 1;
while (1) {
+
+ qemu_mutex_lock_iothread();
+
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer();
@@ -1394,6 +1407,8 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
*/
handle_icount_deadline();
+ qemu_mutex_unlock_iothread();
+
if (!cpu) {
cpu = first_cpu;
}
@@ -1419,9 +1434,7 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
cpu_handle_guest_debug(cpu);
break;
} else if (r == EXCP_ATOMIC) {
- qemu_mutex_unlock_iothread();
cpu_exec_step_atomic(cpu);
- qemu_mutex_lock_iothread();
break;
}
} else if (cpu->stop) {
@@ -1462,6 +1475,7 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
current_cpu = cpu;
hax_init_vcpu(cpu);
+ qemu_mutex_unlock_iothread();
qemu_cond_signal(&qemu_cpu_cond);
while (1) {
@@ -1512,6 +1526,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
cpu->created = true;
cpu->can_do_io = 1;
current_cpu = cpu;
+ qemu_mutex_unlock_iothread();
qemu_cond_signal(&qemu_cpu_cond);
/* process any pending work */
@@ -1536,9 +1551,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
g_assert(cpu->halted);
break;
case EXCP_ATOMIC:
- qemu_mutex_unlock_iothread();
cpu_exec_step_atomic(cpu);
- qemu_mutex_lock_iothread();
default:
/* Ignore everything else? */
break;
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index 3ce6950..99af6bb 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -513,11 +513,10 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
hax_vcpu_interrupt(env);
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
hax_ret = hax_vcpu_run(vcpu);
+ current_cpu = cpu;
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
/* Simply continue the vcpu_run if system call interrupted */
if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
- [Qemu-devel] [RFC PATCH 02/26] blkreplay: create temporary overlay for underlaying devices, (continued)
- [Qemu-devel] [RFC PATCH 02/26] blkreplay: create temporary overlay for underlaying devices, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 03/26] replay: disable default snapshot for record/replay, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 04/26] replay: fix processing async events, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 05/26] replay: fixed replay_enable_events, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 06/26] replay: fix save/load vm for non-empty queue, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 07/26] replay: added replay log format description, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 08/26] replay: make safe vmstop at record/replay, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 09/26] replay: save prior value of the host clock, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 10/26] icount: fixed saving/restoring of icount warp timers, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 11/26] target/arm/arm-powertctl: drop BQL assertions, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 12/26] cpus: push BQL lock to qemu_*_wait_io_event,
Pavel Dovgalyuk <=
- [Qemu-devel] [RFC PATCH 13/26] cpus: only take BQL for sleeping threads, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 14/26] replay/replay.c: bump REPLAY_VERSION again, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 15/26] replay/replay-internal.c: track holding of replay_lock, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 16/26] replay: make locking visible outside replay code, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 17/26] replay: push replay_mutex_lock up the call tree, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 18/26] cpu-exec: don't overwrite exception_index, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 19/26] cpu-exec: reset exit flag before calling cpu_exec_nocache, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 20/26] replay: don't destroy mutex at exit, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 21/26] replay: check return values of fwrite, Pavel Dovgalyuk, 2017/10/31
- [Qemu-devel] [RFC PATCH 22/26] scripts/qemu-gdb: add simple tcg lock status helper, Pavel Dovgalyuk, 2017/10/31