qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Qemu-devel] [RFC 44/48] cpus: lockstep execution support


From: Emilio G. Cota
Subject: Re: [Qemu-devel] [RFC 44/48] cpus: lockstep execution support
Date: Wed, 14 Nov 2018 13:33:46 -0500
User-agent: Mutt/1.9.4 (2018-02-28)

On Wed, Nov 14, 2018 at 16:43:22 +0000, Alex Bennée wrote:
> 
> Emilio G. Cota <address@hidden> writes:
> 
> > Signed-off-by: Emilio G. Cota <address@hidden>
> > ---
> <snip>
> >
> >  void cpu_interrupt(CPUState *cpu, int mask);
> > diff --git a/cpus.c b/cpus.c
> > index 3efe89354d..a446632a5c 100644
> > --- a/cpus.c
> > +++ b/cpus.c
> <snip>
> > +
> > +static void cpu_lockstep_init(CPUState *cpu)
> > +{
> > +    if (!lockstep_enabled) {
> > +        return;
> > +    }
> > +    qemu_mutex_lock(&lockstep_lock);
> > +    /*
> > +     * HACK: avoid racing with a wakeup, which would miss the addition
> > +     * of this CPU; just wait until no wakeup is ongoing.
> > +     */
> > +    while (unlikely(lockstep_ongoing_wakeup)) {
> > +        qemu_mutex_unlock(&lockstep_lock);
> > +        sched_yield();
> 
> This breaks Windows builds. I suspect if we do want to this sort of
> functionality we'll need to expose a utility function in
> oslib-posix/oslib-win32

This was just a quick hack to avoid adding a condvar to the
wake-up fast path. Fixed in v2 with the below, which only calls
cond_broadcast if needed (i.e. very rarely).

Thanks,

                Emilio

diff --git a/cpus.c b/cpus.c
index d7d1bd3e00..06a952e504 100644
--- a/cpus.c
+++ b/cpus.c
@@ -84,8 +84,10 @@ static unsigned int throttle_percentage;
 static bool lockstep_enabled;
 static bool lockstep_ongoing_wakeup;
 static QemuMutex lockstep_lock;
+static QemuCond lockstep_cond;
 static int n_lockstep_running_cpus;
 static int n_lockstep_cpus;
+static int n_lockstep_initializing_cpus;
 static CPUState **lockstep_cpus;
 
 #define CPU_THROTTLE_PCT_MIN 1
@@ -1260,6 +1262,7 @@ void qemu_init_cpu_loop(void)
     qemu_init_sigbus();
     qemu_mutex_init(&qemu_global_mutex);
     qemu_mutex_init(&lockstep_lock);
+    qemu_cond_init(&lockstep_cond);
 
     qemu_thread_get_self(&io_thread);
 }
@@ -1369,6 +1372,13 @@ static void lockstep_check_stop(CPUState *cpu)
             cpu_mutex_lock(cpu);
             qemu_mutex_lock(&lockstep_lock);
             lockstep_ongoing_wakeup = false;
+            /*
+             * If newly spawned CPUs are waiting to be added to the wait list,
+             * let them do so now.
+             */
+            if (unlikely(n_lockstep_initializing_cpus)) {
+                qemu_cond_broadcast(&lockstep_cond);
+            }
         }
         qemu_mutex_unlock(&lockstep_lock);
     }
@@ -1379,16 +1389,15 @@ static void cpu_lockstep_init(CPUState *cpu)
     if (!lockstep_enabled) {
         return;
     }
+
     qemu_mutex_lock(&lockstep_lock);
-    /*
-     * HACK: avoid racing with a wakeup, which would miss the addition
-     * of this CPU; just wait until no wakeup is ongoing.
-     */
-    while (unlikely(lockstep_ongoing_wakeup)) {
-        qemu_mutex_unlock(&lockstep_lock);
-        sched_yield();
-        qemu_mutex_lock(&lockstep_lock);
+    /* avoid racing with a wakeup, which would miss the addition of this CPU */
+    n_lockstep_initializing_cpus++;
+    while (lockstep_ongoing_wakeup) {
+        qemu_cond_wait(&lockstep_cond, &lockstep_lock);
     }
+    n_lockstep_initializing_cpus--;
+
     lockstep_cpus = g_realloc(lockstep_cpus,
                               (n_lockstep_cpus + 1) * sizeof(CPUState *));
     lockstep_cpus[n_lockstep_cpus++] = cpu;



reply via email to

[Prev in Thread] Current Thread [Next in Thread]