qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC v1 07/12] cpus: introduce async_safe_run_on_cpu.


From: Alex Bennée
Subject: [Qemu-devel] [RFC v1 07/12] cpus: introduce async_safe_run_on_cpu.
Date: Fri, 15 Apr 2016 15:23:46 +0100

From: KONRAD Frederic <address@hidden>

We already had async_run_on_cpu but for some tasks we need to ensure all
vCPUs have stopped running before updating shared structures. We call
this safe work as it is safe to make the modifications.

Work is scheduled with async_safe_run_on_cpu() which is passed the
CPUState and an anonymous structure with the relevant data for the task.
Once this is done the vCPU is kicked to bring it out of the main
execution loop.

The main difference with the other run_on_cpu functions is it operates
out of a single queue. This ensures fairness as all pending tasks will
get drained whichever vCPU is nominally doing the work. The internal
implementation is also a GArray so the need to malloc memory is
minimised while adding tasks to the queue.

When async_safe_work_pending() cpu_exec returns and the vCPUs can't
enters execution loop. Once all scheduled vCPUs have exited the loop the
last one to exit processed the execution queue.

Signed-off-by: KONRAD Frederic <address@hidden>
[AJB: Name change, single queue, atomic counter for active vCPUs]
Signed-off-by: Alex Bennée <address@hidden>

---
v1 (arm-v1)
  - now async_safe_run_on_cpu
  - single GArray based queue
  - use atomic counter to bring all vCPUs to a halt
  - wording for async safe_work
---
 cpu-exec-common.c |   1 +
 cpu-exec.c        |  11 ++++++
 cpus.c            | 102 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
 include/qom/cpu.h |  19 ++++++++++
 4 files changed, 131 insertions(+), 2 deletions(-)

diff --git a/cpu-exec-common.c b/cpu-exec-common.c
index 3d7eaa3..c2f7c29 100644
--- a/cpu-exec-common.c
+++ b/cpu-exec-common.c
@@ -79,3 +79,4 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
     cpu->current_tb = NULL;
     siglongjmp(cpu->jmp_env, 1);
 }
+
diff --git a/cpu-exec.c b/cpu-exec.c
index 42cec05..2f362f8 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -365,6 +365,17 @@ int cpu_exec(CPUState *cpu)
     uintptr_t next_tb;
     SyncClocks sc;
 
+    /*
+     * This happen when somebody doesn't want this CPU to start
+     * In case of MTTCG.
+     */
+#ifdef CONFIG_SOFTMMU
+    if (async_safe_work_pending()) {
+        cpu->exit_request = 1;
+        return 0;
+    }
+#endif
+
     /* replay_interrupt may need current_cpu */
     current_cpu = cpu;
 
diff --git a/cpus.c b/cpus.c
index 9177161..860e2a9 100644
--- a/cpus.c
+++ b/cpus.c
@@ -928,6 +928,19 @@ static QemuCond qemu_cpu_cond;
 static QemuCond qemu_pause_cond;
 static QemuCond qemu_work_cond;
 
+/* safe work */
+static int safe_work_pending;
+static int tcg_scheduled_cpus;
+
+typedef struct {
+    CPUState            *cpu;  /* CPU affected */
+    run_on_cpu_func     func;  /* Helper function */
+    void                *data; /* Helper data */
+} qemu_safe_work_item;
+
+static GArray *safe_work;       /* array of qemu_safe_work_items */
+static QemuMutex safe_work_mutex;
+
 void qemu_init_cpu_loop(void)
 {
     qemu_init_sigbus();
@@ -937,6 +950,9 @@ void qemu_init_cpu_loop(void)
     qemu_mutex_init(&qemu_global_mutex);
 
     qemu_thread_get_self(&io_thread);
+
+    safe_work = g_array_sized_new(TRUE, TRUE, sizeof(qemu_safe_work_item), 
128);
+    qemu_mutex_init(&safe_work_mutex);
 }
 
 void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
@@ -997,6 +1013,81 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func 
func, void *data)
     qemu_cpu_kick(cpu);
 }
 
+/*
+ * Safe work interface
+ *
+ * Safe work is defined as work that requires the system to be
+ * quiescent before making changes.
+ */
+
+void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
+{
+    CPUState *iter;
+    qemu_safe_work_item wi;
+    wi.cpu = cpu;
+    wi.func = func;
+    wi.data = data;
+
+    qemu_mutex_lock(&safe_work_mutex);
+    g_array_append_val(safe_work, wi);
+    atomic_inc(&safe_work_pending);
+    qemu_mutex_unlock(&safe_work_mutex);
+
+    /* Signal all vCPUs to halt */
+    CPU_FOREACH(iter) {
+        qemu_cpu_kick(iter);
+    }
+}
+
+/**
+ * flush_queued_safe_work:
+ *
+ * @scheduled_cpu_count
+ *
+ * If not 0 will signal the other vCPUs and sleep. The last vCPU to
+ * get to the function then drains the queue while the system is in a
+ * quiescent state. This allows the operations to change shared
+ * structures.
+ *
+ * @see async_run_safe_work_on_cpu
+ */
+static void flush_queued_safe_work(int scheduled_cpu_count)
+{
+    qemu_safe_work_item *wi;
+    int i;
+
+    /* bail out if there is nothing to do */
+    if (!async_safe_work_pending()) {
+        return;
+    }
+
+    if (scheduled_cpu_count) {
+
+        /* Nothing to do but sleep */
+        qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
+
+    } else {
+
+        /* We can now do the work */
+        qemu_mutex_lock(&safe_work_mutex);
+        for (i = 0; i < safe_work->len; i++) {
+            wi = &g_array_index(safe_work, qemu_safe_work_item, i);
+            wi->func(wi->cpu, wi->data);
+        }
+        g_array_remove_range(safe_work, 0, safe_work->len);
+        atomic_set(&safe_work_pending, 0);
+        qemu_mutex_unlock(&safe_work_mutex);
+
+        /* Wake everyone up */
+        qemu_cond_broadcast(&qemu_work_cond);
+    }
+}
+
+bool async_safe_work_pending(void)
+{
+    return (atomic_read(&safe_work_pending) != 0);
+}
+
 static void flush_queued_work(CPUState *cpu)
 {
     struct qemu_work_item *wi;
@@ -1259,6 +1350,7 @@ static void *qemu_tcg_single_cpu_thread_fn(void *arg)
 
         if (cpu) {
             g_assert(cpu->exit_request);
+            flush_queued_safe_work(0);
             /* Pairs with smp_wmb in qemu_cpu_kick.  */
             atomic_mb_set(&cpu->exit_request, 0);
             qemu_tcg_wait_io_event(cpu);
@@ -1300,8 +1392,13 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
     while (1) {
         bool sleep = false;
 
-        if (cpu_can_run(cpu)) {
-            int r = tcg_cpu_exec(cpu);
+        if (cpu_can_run(cpu) && !async_safe_work_pending()) {
+            int r;
+
+            atomic_inc(&tcg_scheduled_cpus);
+            r = tcg_cpu_exec(cpu);
+            flush_queued_safe_work(atomic_dec_fetch(&tcg_scheduled_cpus));
+
             switch (r)
             {
             case EXCP_DEBUG:
@@ -1319,6 +1416,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
                 /* Ignore everything else? */
                 break;
             }
+
         } else {
             sleep = true;
         }
diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 385d5bb..8ab969e 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -642,6 +642,25 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void 
*data);
 void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
 
 /**
+ * async_safe_run_on_cpu:
+ * @cpu: The vCPU to run on.
+ * @func: The function to be executed.
+ * @data: Data to pass to the function.
+ *
+ * Schedules the function @func for execution on the vCPU @cpu asynchronously
+ * when all the VCPUs are outside their loop.
+ */
+void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data);
+
+/**
+ * async_safe_work_pending:
+ *
+ * Check whether any safe work is pending on any VCPUs.
+ * Returns: @true if a safe work is pending, @false otherwise.
+ */
+bool async_safe_work_pending(void);
+
+/**
  * qemu_get_cpu:
  * @index: The address@hidden value of the CPU to obtain.
  *
-- 
2.7.4




reply via email to

[Prev in Thread] Current Thread [Next in Thread]