[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC PATCH V7 09/19] Drop global lock during TCG code execu
From: |
fred . konrad |
Subject: |
[Qemu-devel] [RFC PATCH V7 09/19] Drop global lock during TCG code execution |
Date: |
Mon, 10 Aug 2015 17:27:07 +0200 |
From: KONRAD Frederic <address@hidden>
This finally allows TCG to benefit from the iothread introduction: Drop
the global mutex while running pure TCG CPU code. Reacquire the lock
when entering MMIO or PIO emulation, or when leaving the TCG loop.
We have to revert a few optimization for the current TCG threading
model, namely kicking the TCG thread in qemu_mutex_lock_iothread and not
kicking it in qemu_cpu_kick. We also need to disable RAM block
reordering until we have a more efficient locking mechanism at hand.
I'm pretty sure some cases are still broken, definitely SMP (we no
longer perform round-robin scheduling "by chance"). Still, a Linux x86
UP guest and my Musicpal ARM model boot fine here. These numbers
demonstrate where we gain something:
20338 jan 20 0 331m 75m 6904 R 99 0.9 0:50.95 qemu-system-arm
20337 jan 20 0 331m 75m 6904 S 20 0.9 0:26.50 qemu-system-arm
The guest CPU was fully loaded, but the iothread could still run mostly
independent on a second core. Without the patch we don't get beyond
32206 jan 20 0 330m 73m 7036 R 82 0.9 1:06.00 qemu-system-arm
32204 jan 20 0 330m 73m 7036 S 21 0.9 0:17.03 qemu-system-arm
We don't benefit significantly, though, when the guest is not fully
loading a host CPU.
Note that this patch depends on
http://thread.gmane.org/gmane.comp.emulators.qemu/118657
Changes from Fred Konrad:
* Rebase on the current HEAD.
* Fixes a deadlock in qemu_devices_reset().
* Remove the mutex in address_space_*
---
cpus.c | 20 +++-----------------
cputlb.c | 5 +++++
target-i386/misc_helper.c | 27 ++++++++++++++++++++++++---
translate-all.c | 2 ++
vl.c | 6 ++++++
5 files changed, 40 insertions(+), 20 deletions(-)
diff --git a/cpus.c b/cpus.c
index 2550be2..154a081 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1232,23 +1232,7 @@ bool qemu_mutex_iothread_locked(void)
void qemu_mutex_lock_iothread(void)
{
- atomic_inc(&iothread_requesting_mutex);
- /* In the simple case there is no need to bump the VCPU thread out of
- * TCG code execution.
- */
- if (!tcg_enabled() || qemu_in_vcpu_thread() ||
- !first_cpu || !first_cpu->thread) {
- qemu_mutex_lock(&qemu_global_mutex);
- atomic_dec(&iothread_requesting_mutex);
- } else {
- if (qemu_mutex_trylock(&qemu_global_mutex)) {
- qemu_cpu_kick_thread(first_cpu);
- qemu_mutex_lock(&qemu_global_mutex);
- }
- atomic_dec(&iothread_requesting_mutex);
- qemu_cond_broadcast(&qemu_io_proceeded_cond);
- }
- iothread_locked = true;
+ qemu_mutex_lock(&qemu_global_mutex);
}
void qemu_mutex_unlock_iothread(void)
@@ -1469,7 +1453,9 @@ static int tcg_cpu_exec(CPUState *cpu)
cpu->icount_decr.u16.low = decr;
cpu->icount_extra = count;
}
+ qemu_mutex_unlock_iothread();
ret = cpu_exec(cpu);
+ qemu_mutex_lock_iothread();
#ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti;
#endif
diff --git a/cputlb.c b/cputlb.c
index a506086..79fff1c 100644
--- a/cputlb.c
+++ b/cputlb.c
@@ -30,6 +30,9 @@
#include "exec/ram_addr.h"
#include "tcg/tcg.h"
+void qemu_mutex_lock_iothread(void);
+void qemu_mutex_unlock_iothread(void);
+
//#define DEBUG_TLB
//#define DEBUG_TLB_CHECK
@@ -125,8 +128,10 @@ void tlb_flush_page(CPUState *cpu, target_ulong addr)
can be detected */
void tlb_protect_code(ram_addr_t ram_addr)
{
+ qemu_mutex_lock_iothread();
cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
DIRTY_MEMORY_CODE);
+ qemu_mutex_unlock_iothread();
}
/* update the TLB so that writes in physical page 'phys_addr' are no longer
diff --git a/target-i386/misc_helper.c b/target-i386/misc_helper.c
index 52c5d65..55f63bf 100644
--- a/target-i386/misc_helper.c
+++ b/target-i386/misc_helper.c
@@ -27,8 +27,10 @@ void helper_outb(CPUX86State *env, uint32_t port, uint32_t
data)
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "outb: port=0x%04x, data=%02x\n", port, data);
#else
+ qemu_mutex_lock_iothread();
address_space_stb(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
+ qemu_mutex_unlock_iothread();
#endif
}
@@ -38,8 +40,13 @@ target_ulong helper_inb(CPUX86State *env, uint32_t port)
fprintf(stderr, "inb: port=0x%04x\n", port);
return 0;
#else
- return address_space_ldub(&address_space_io, port,
+ target_ulong ret;
+
+ qemu_mutex_lock_iothread();
+ ret = address_space_ldub(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
+ qemu_mutex_unlock_iothread();
+ return ret;
#endif
}
@@ -48,8 +55,10 @@ void helper_outw(CPUX86State *env, uint32_t port, uint32_t
data)
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "outw: port=0x%04x, data=%04x\n", port, data);
#else
+ qemu_mutex_lock_iothread();
address_space_stw(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
+ qemu_mutex_unlock_iothread();
#endif
}
@@ -59,8 +68,13 @@ target_ulong helper_inw(CPUX86State *env, uint32_t port)
fprintf(stderr, "inw: port=0x%04x\n", port);
return 0;
#else
- return address_space_lduw(&address_space_io, port,
+ target_ulong ret;
+
+ qemu_mutex_lock_iothread();
+ ret = address_space_lduw(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
+ qemu_mutex_unlock_iothread();
+ return ret;
#endif
}
@@ -69,8 +83,10 @@ void helper_outl(CPUX86State *env, uint32_t port, uint32_t
data)
#ifdef CONFIG_USER_ONLY
fprintf(stderr, "outw: port=0x%04x, data=%08x\n", port, data);
#else
+ qemu_mutex_lock_iothread();
address_space_stl(&address_space_io, port, data,
cpu_get_mem_attrs(env), NULL);
+ qemu_mutex_unlock_iothread();
#endif
}
@@ -80,8 +96,13 @@ target_ulong helper_inl(CPUX86State *env, uint32_t port)
fprintf(stderr, "inl: port=0x%04x\n", port);
return 0;
#else
- return address_space_ldl(&address_space_io, port,
+ target_ulong ret;
+
+ qemu_mutex_lock_iothread();
+ ret = address_space_ldl(&address_space_io, port,
cpu_get_mem_attrs(env), NULL);
+ qemu_mutex_unlock_iothread();
+ return ret;
#endif
}
diff --git a/translate-all.c b/translate-all.c
index 046565c..954c67a 100644
--- a/translate-all.c
+++ b/translate-all.c
@@ -1223,6 +1223,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start,
tb_page_addr_t end,
#endif
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
+ qemu_mutex_unlock_iothread();
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
@@ -1327,6 +1328,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
p->first_tb = NULL;
#ifdef TARGET_HAS_PRECISE_SMC
if (current_tb_modified) {
+ qemu_mutex_unlock_iothread();
/* we generate a block containing just the instruction
modifying the memory. It will ensure that it cannot modify
itself */
diff --git a/vl.c b/vl.c
index 3f269dc..922e969 100644
--- a/vl.c
+++ b/vl.c
@@ -1717,10 +1717,16 @@ void qemu_devices_reset(void)
{
QEMUResetEntry *re, *nre;
+ /*
+ * Some device's reset needs to grab the global_mutex. So just release it
+ * here.
+ */
+ qemu_mutex_unlock_iothread();
/* reset all devices */
QTAILQ_FOREACH_SAFE(re, &reset_handlers, entry, nre) {
re->func(re->opaque);
}
+ qemu_mutex_lock_iothread();
}
void qemu_system_reset(bool report)
--
1.9.0
- Re: [Qemu-devel] [RFC PATCH V7 07/19] protect TBContext with tb_lock., (continued)
- Re: [Qemu-devel] [RFC PATCH V7 07/19] protect TBContext with tb_lock., Paolo Bonzini, 2015/08/11
- Re: [Qemu-devel] [RFC PATCH V7 07/19] protect TBContext with tb_lock., Frederic Konrad, 2015/08/11
- Re: [Qemu-devel] [RFC PATCH V7 07/19] protect TBContext with tb_lock., Paolo Bonzini, 2015/08/11
- Re: [Qemu-devel] [RFC PATCH V7 07/19] protect TBContext with tb_lock., Peter Maydell, 2015/08/11
- Re: [Qemu-devel] [RFC PATCH V7 07/19] protect TBContext with tb_lock., Paolo Bonzini, 2015/08/11
Re: [Qemu-devel] [RFC PATCH V7 07/19] protect TBContext with tb_lock., Frederic Konrad, 2015/08/12
[Qemu-devel] [RFC PATCH V7 08/19] tcg: remove tcg_halt_cond global variable., fred . konrad, 2015/08/10
[Qemu-devel] [RFC PATCH V7 09/19] Drop global lock during TCG code execution,
fred . konrad <=
- Re: [Qemu-devel] [RFC PATCH V7 09/19] Drop global lock during TCG code execution, Paolo Bonzini, 2015/08/10
- Re: [Qemu-devel] [RFC PATCH V7 09/19] Drop global lock during TCG code execution, Frederic Konrad, 2015/08/11
- Re: [Qemu-devel] [RFC PATCH V7 09/19] Drop global lock during TCG code execution, Alex Bennée, 2015/08/11
- Re: [Qemu-devel] [RFC PATCH V7 09/19] Drop global lock during TCG code execution, Frederic Konrad, 2015/08/11
- Re: [Qemu-devel] [RFC PATCH V7 09/19] Drop global lock during TCG code execution, Paolo Bonzini, 2015/08/12
- Re: [Qemu-devel] [RFC PATCH V7 09/19] Drop global lock during TCG code execution, Frederic Konrad, 2015/08/12
[Qemu-devel] [RFC PATCH V7 11/19] tcg: switch on multithread., fred . konrad, 2015/08/10