[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [RFC PATCH V2 3/3] cpus: introduce async_run_safe_work_on_c
From: |
fred . konrad |
Subject: |
[Qemu-devel] [RFC PATCH V2 3/3] cpus: introduce async_run_safe_work_on_cpu. |
Date: |
Fri, 10 Jul 2015 18:08:06 +0200 |
From: KONRAD Frederic <address@hidden>
We already had async_run_on_cpu but we need all VCPUs outside their execution
loop to execute some tb_flush/invalidate task:
async_run_on_cpu_safe schedule a work on a VCPU but the work start when no more
VCPUs are executing code.
When a safe work is pending cpu_has_work returns true, so cpu_exec returns and
the VCPUs can't enters execution loop. cpu_thread_is_idle returns false so at
the moment where all VCPUs are stop || stopped the safe work queue can be
flushed.
Signed-off-by: KONRAD Frederic <address@hidden>
Changes V2 -> V3:
* Unlock the mutex while executing the callback.
Changes V1 -> V2:
* Move qemu_cpu_kick_thread to avoid prototype declaration.
* Use the work_mutex lock to protect the queued_safe_work_* structures.
---
cpu-exec.c | 5 ++
cpus.c | 153 ++++++++++++++++++++++++++++++++++++++++--------------
include/qom/cpu.h | 24 ++++++++-
3 files changed, 141 insertions(+), 41 deletions(-)
diff --git a/cpu-exec.c b/cpu-exec.c
index 2fdf89d..7581f76 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -363,6 +363,11 @@ int cpu_exec(CPUState *cpu)
/* This must be volatile so it is not trashed by longjmp() */
volatile bool have_tb_lock = false;
+ if (async_safe_work_pending()) {
+ cpu->exit_request = 1;
+ return 0;
+ }
+
if (cpu->halted) {
if (!cpu_has_work(cpu)) {
return EXCP_HALTED;
diff --git a/cpus.c b/cpus.c
index eabd4b1..4321380 100644
--- a/cpus.c
+++ b/cpus.c
@@ -76,7 +76,7 @@ bool cpu_is_stopped(CPUState *cpu)
static bool cpu_thread_is_idle(CPUState *cpu)
{
- if (cpu->stop || cpu->queued_work_first) {
+ if (cpu->stop || cpu->queued_work_first || cpu->queued_safe_work_first) {
return false;
}
if (cpu_is_stopped(cpu)) {
@@ -833,6 +833,45 @@ void qemu_init_cpu_loop(void)
qemu_thread_get_self(&io_thread);
}
+static void qemu_cpu_kick_thread(CPUState *cpu)
+{
+#ifndef _WIN32
+ int err;
+
+ err = pthread_kill(cpu->thread->thread, SIG_IPI);
+ if (err) {
+ fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
+ exit(1);
+ }
+#else /* _WIN32 */
+ if (!qemu_cpu_is_self(cpu)) {
+ CONTEXT tcgContext;
+
+ if (SuspendThread(cpu->hThread) == (DWORD)-1) {
+ fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
+ GetLastError());
+ exit(1);
+ }
+
+ /* On multi-core systems, we are not sure that the thread is actually
+ * suspended until we can get the context.
+ */
+ tcgContext.ContextFlags = CONTEXT_CONTROL;
+ while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
+ continue;
+ }
+
+ cpu_signal(0);
+
+ if (ResumeThread(cpu->hThread) == (DWORD)-1) {
+ fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
+ GetLastError());
+ exit(1);
+ }
+ }
+#endif
+}
+
void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
{
struct qemu_work_item wi;
@@ -894,6 +933,76 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void
*data), void *data)
qemu_cpu_kick(cpu);
}
+void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
+ void *data)
+{
+ struct qemu_work_item *wi;
+
+ wi = g_malloc0(sizeof(struct qemu_work_item));
+ wi->func = func;
+ wi->data = data;
+ wi->free = true;
+
+ qemu_mutex_lock(&cpu->work_mutex);
+ if (cpu->queued_safe_work_first == NULL) {
+ cpu->queued_safe_work_first = wi;
+ } else {
+ cpu->queued_safe_work_last->next = wi;
+ }
+ cpu->queued_safe_work_last = wi;
+ wi->next = NULL;
+ wi->done = false;
+ qemu_mutex_unlock(&cpu->work_mutex);
+
+ CPU_FOREACH(cpu) {
+ qemu_cpu_kick_thread(cpu);
+ }
+}
+
+static void flush_queued_safe_work(CPUState *cpu)
+{
+ struct qemu_work_item *wi;
+ CPUState *other_cpu;
+
+ if (cpu->queued_safe_work_first == NULL) {
+ return;
+ }
+
+ CPU_FOREACH(other_cpu) {
+ if (other_cpu->tcg_executing != 0) {
+ return;
+ }
+ }
+
+ qemu_mutex_lock(&cpu->work_mutex);
+ while ((wi = cpu->queued_safe_work_first)) {
+ cpu->queued_safe_work_first = wi->next;
+ qemu_mutex_unlock(&cpu->work_mutex);
+ wi->func(wi->data);
+ qemu_mutex_lock(&cpu->work_mutex);
+ wi->done = true;
+ if (wi->free) {
+ g_free(wi);
+ }
+ }
+ cpu->queued_safe_work_last = NULL;
+ qemu_mutex_unlock(&cpu->work_mutex);
+ qemu_cond_broadcast(&qemu_work_cond);
+}
+
+/* FIXME: add a counter to avoid this CPU_FOREACH() */
+bool async_safe_work_pending(void)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ if (cpu->queued_safe_work_first) {
+ return true;
+ }
+ }
+ return false;
+}
+
static void flush_queued_work(CPUState *cpu)
{
struct qemu_work_item *wi;
@@ -926,6 +1035,9 @@ static void qemu_wait_io_event_common(CPUState *cpu)
cpu->stopped = true;
qemu_cond_signal(&qemu_pause_cond);
}
+ qemu_mutex_unlock_iothread();
+ flush_queued_safe_work(cpu);
+ qemu_mutex_lock_iothread();
flush_queued_work(cpu);
cpu->thread_kicked = false;
}
@@ -1085,45 +1197,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
return NULL;
}
-static void qemu_cpu_kick_thread(CPUState *cpu)
-{
-#ifndef _WIN32
- int err;
-
- err = pthread_kill(cpu->thread->thread, SIG_IPI);
- if (err) {
- fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
- exit(1);
- }
-#else /* _WIN32 */
- if (!qemu_cpu_is_self(cpu)) {
- CONTEXT tcgContext;
-
- if (SuspendThread(cpu->hThread) == (DWORD)-1) {
- fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
- GetLastError());
- exit(1);
- }
-
- /* On multi-core systems, we are not sure that the thread is actually
- * suspended until we can get the context.
- */
- tcgContext.ContextFlags = CONTEXT_CONTROL;
- while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
- continue;
- }
-
- cpu_signal(0);
-
- if (ResumeThread(cpu->hThread) == (DWORD)-1) {
- fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
- GetLastError());
- exit(1);
- }
- }
-#endif
-}
-
void qemu_cpu_kick(CPUState *cpu)
{
qemu_cond_broadcast(cpu->halt_cond);
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index a2de536..692d0d3 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -243,8 +243,9 @@ struct kvm_run;
* @mem_io_pc: Host Program Counter at which the memory was accessed.
* @mem_io_vaddr: Target virtual address at which the memory was accessed.
* @kvm_fd: vCPU file descriptor for KVM.
- * @work_mutex: Lock to prevent multiple access to queued_work_*.
+ * @work_mutex: Lock to prevent multiple access to queued_* qemu_work_item.
* @queued_work_first: First asynchronous work pending.
+ * @queued_safe_work_first: First item of safe work pending.
*
* State of one CPU core or thread.
*/
@@ -267,6 +268,7 @@ struct CPUState {
struct QemuCond *halt_cond;
QemuMutex work_mutex;
struct qemu_work_item *queued_work_first, *queued_work_last;
+ struct qemu_work_item *queued_safe_work_first, *queued_safe_work_last;
bool thread_kicked;
bool created;
bool stop;
@@ -546,6 +548,26 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data),
void *data);
void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data);
/**
+ * async_run_safe_work_on_cpu:
+ * @cpu: The vCPU to run on.
+ * @func: The function to be executed.
+ * @data: Data to pass to the function.
+ *
+ * Schedules the function @func for execution on the vCPU @cpu asynchronously
+ * when all the VCPUs are outside their loop.
+ */
+void async_run_safe_work_on_cpu(CPUState *cpu, void (*func)(void *data),
+ void *data);
+
+/**
+ * async_safe_work_pending:
+ *
+ * Check whether any safe work is pending on any VCPUs.
+ * Returns: @true if a safe work is pending, @false otherwise.
+ */
+bool async_safe_work_pending(void);
+
+/**
* qemu_get_cpu:
* @index: The address@hidden value of the CPU to obtain.
*
--
1.9.0
[Qemu-devel] [RFC PATCH V2 3/3] cpus: introduce async_run_safe_work_on_cpu.,
fred . konrad <=
Re: [Qemu-devel] [RFC PATCH V2 0/3] Multithread TCG async_safe_work part., Paolo Bonzini, 2015/07/13