qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [RFC PATCH 06/22] CPU module changes


From: Pavel Dovgaluk
Subject: [Qemu-devel] [RFC PATCH 06/22] CPU module changes
Date: Tue, 1 Jul 2014 15:22:05 +0400

These patches include modifications of common cpu files. All interrupts and
exceptions occured during recording are written into the replay log.
These events allow correct replaying the execution by kicking cpu thread
when it finds one of these events in the log.

Signed-off-by: Pavel Dovgalyuk <address@hidden>
---

diff --git a/cpu-exec.c b/cpu-exec.c
index 38e5f02..3fecd19
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -22,6 +22,8 @@
 #include "tcg.h"
 #include "qemu/atomic.h"
 #include "sysemu/qtest.h"
+#include "replay/replay.h"
+#include "qemu/main-loop.h"
 
 void cpu_loop_exit(CPUState *cpu)
 {
@@ -189,12 +191,14 @@ static inline TranslationBlock *tb_find_fast(CPUArchState 
*env)
 
 static CPUDebugExcpHandler *debug_excp_handler;
 
-void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
+CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
 {
+    CPUDebugExcpHandler *old = debug_excp_handler;
     debug_excp_handler = handler;
+    return old;
 }
 
-static void cpu_handle_debug_exception(CPUArchState *env)
+void cpu_handle_debug_exception(CPUArchState *env)
 {
     CPUState *cpu = ENV_GET_CPU(env);
     CPUWatchpoint *wp;
@@ -281,25 +285,31 @@ int cpu_exec(CPUArchState *env)
 #else
 #error unsupported target CPU
 #endif
-    cpu->exception_index = -1;
-
     /* prepare setjmp context for exception handling */
     for(;;) {
         if (sigsetjmp(cpu->jmp_env, 0) == 0) {
             /* if an exception is pending, we execute it here */
             if (cpu->exception_index >= 0) {
+                if (cpu->exception_index == EXCP_REPLAY) {
+                    ret = cpu->exception_index;
+                    cpu->exception_index = -1;
+                    qemu_notify_event();
+                    break;
+                }
                 if (cpu->exception_index >= EXCP_INTERRUPT) {
                     /* exit request from the cpu execution loop */
                     ret = cpu->exception_index;
                     if (ret == EXCP_DEBUG) {
                         cpu_handle_debug_exception(env);
                     }
+                    cpu->exception_index = -1;
                     break;
                 } else {
+                    if (replay_exception()) {
 #if defined(CONFIG_USER_ONLY)
-                    /* if user mode only, we simulate a fake exception
-                       which will be handled outside the cpu execution
-                       loop */
+                        /* if user mode only, we simulate a fake exception
+                           which will be handled outside the cpu execution
+                           loop */
 #if defined(TARGET_I386)
                     cc->do_interrupt(cpu);
 #endif
@@ -309,13 +319,40 @@ int cpu_exec(CPUArchState *env)
                     cc->do_interrupt(cpu);
                     cpu->exception_index = -1;
 #endif
+                    } else {
+                        /* goto interrupt processing or wait loop */
+                        if (!replay_read_interrupt_request()) {
+                            /* give a chance for main_loop_wait in replay mode 
*/

+                            ret = EXCP_REPLAY;
+                            break;
+                        }
+                    }
                 }
             }
 
             next_tb = 0; /* force lookup of first TB */
             for(;;) {
                 interrupt_request = cpu->interrupt_request;
-                if (unlikely(interrupt_request)) {
+                int replay_int = 0;
+                if (replay_mode == REPLAY_SAVE && interrupt_request) {
+                    replay_write_interrupt_request();
+                    replay_int = 1;
+                } else if (replay_mode == REPLAY_PLAY) {
+                    if (replay_get_play_submode() == REPLAY_PLAY_CHANGED) {
+                        replay_int = 1;
+                    } else {
+                        if (replay_read_interrupt_request() && 
interrupt_request) {
+                            replay_int = 1;
+                            replay_reset_interrupt_request();
+                        // debug interrupts should be processed anyway
+                        } else if (interrupt_request & CPU_INTERRUPT_DEBUG) {
+                            replay_int = 1;
+                        }
+                    }
+                } else if (replay_mode == REPLAY_NONE) {
+                    replay_int = 1;
+                }
+                if (unlikely(interrupt_request && replay_int)) {
                     if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
                         /* Mask out external interrupts for this step. */
                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
@@ -599,11 +636,23 @@ int cpu_exec(CPUArchState *env)
                         next_tb = 0;
                     }
                 }
+                if (cpu->exception_index == EXCP_REPLAY) {
+                    // go to exception_index checking
+                    break;
+                }
                 if (unlikely(cpu->exit_request)) {
                     cpu->exit_request = 0;
-                    cpu->exception_index = EXCP_INTERRUPT;
+                    if (cpu->exception_index != EXCP_DEBUG)
+                        cpu->exception_index = EXCP_INTERRUPT;
                     cpu_loop_exit(cpu);
                 }
+                if (!replay_has_code()) {
+                    cpu->exception_index = EXCP_REPLAY;
+                    break;
+                }
+                /* Save exit_request value to check if it was set by 
translation */
+                int prev_exit = cpu->exit_request;
+
                 spin_lock(&tcg_ctx.tb_ctx.tb_lock);
                 have_tb_lock = true;
                 tb = tb_find_fast(env);
@@ -623,7 +672,7 @@ int cpu_exec(CPUArchState *env)
                 /* see if we can patch the calling TB. When the TB
                    spans two pages, we cannot safely do a direct
                    jump. */
-                if (next_tb != 0 && tb->page_addr[1] == -1) {
+                if (tb && (next_tb & ~TB_EXIT_MASK) != 0 && tb->page_addr[1] 
== -1) {
                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK),
                                 next_tb & TB_EXIT_MASK, tb);
                 }
@@ -636,7 +685,9 @@ int cpu_exec(CPUArchState *env)
                    starting execution if there is a pending interrupt. */
                 cpu->current_tb = tb;
                 barrier();
-                if (likely(!cpu->exit_request)) {
+                // TODO: remove useless if
+                if (likely((replay_mode == REPLAY_NONE && !cpu->exit_request)
+                           || (replay_mode != REPLAY_NONE && cpu->exit_request 
== prev_exit))) {
                     tc_ptr = tb->tc_ptr;
                     /* execute the generated code */
                     next_tb = cpu_tb_exec(cpu, tc_ptr);
diff --git a/cpus.c b/cpus.c
index 5e7f2cf..735a8e4
--- a/cpus.c
+++ b/cpus.c
@@ -39,6 +39,7 @@
 #include "qemu/main-loop.h"
 #include "qemu/bitmap.h"
 #include "qemu/seqlock.h"
+#include "replay/replay.h"
 #include "qapi-event.h"
 
 #ifndef _WIN32
@@ -85,7 +86,7 @@ static bool cpu_thread_is_idle(CPUState *cpu)
     return true;
 }
 
-static bool all_cpu_threads_idle(void)
+bool all_cpu_threads_idle(void)
 {
     CPUState *cpu;
 
@@ -171,6 +172,9 @@ int64_t cpu_get_ticks(void)
     if (use_icount) {
         return cpu_get_icount();
     }
+    if (replay_icount) {
+       return replay_get_icount();
+    }
 
     ticks = timers_state.cpu_ticks_offset;
     if (timers_state.cpu_ticks_enabled) {
@@ -206,12 +210,21 @@ int64_t cpu_get_clock(void)
     int64_t ti;
     unsigned start;
 
+    if (replay_mode == REPLAY_PLAY) {
+        return replay_read_clock(REPLAY_CLOCK_VIRTUAL);
+    }
+    
     do {
         start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
         ti = cpu_get_clock_locked();
     } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
 
+    if (replay_mode == REPLAY_SAVE) {
+        replay_save_clock(REPLAY_CLOCK_VIRTUAL, ti);
+    }
+
     return ti;
+
 }
 
 /* enable cpu_get_ticks()
@@ -219,14 +232,32 @@ int64_t cpu_get_clock(void)
  */
 void cpu_enable_ticks(void)
 {
-    /* Here, the really thing protected by seqlock is cpu_clock_offset. */
-    seqlock_write_lock(&timers_state.vm_clock_seqlock);
     if (!timers_state.cpu_ticks_enabled) {
+        cpu_do_enable_ticks();
+    }
+}
+
+void cpu_do_enable_ticks(void)
+{
+    int64_t ti;
+
+    // cpu_clock is not used in replay icount mode
+    if (!replay_icount) {
+        /* Here, the really thing protected by seqlock is cpu_clock_offset. */
+        seqlock_write_lock(&timers_state.vm_clock_seqlock);
         timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
-        timers_state.cpu_clock_offset -= get_clock();
-        timers_state.cpu_ticks_enabled = 1;
+
+        ti = get_clock();
+
+        if (replay_mode == REPLAY_SAVE) {
+            replay_save_clock(REPLAY_CLOCK_VIRTUAL, ti);
+        } else if (replay_mode == REPLAY_PLAY) {
+            ti = replay_read_clock(REPLAY_CLOCK_VIRTUAL);
+        }
+        timers_state.cpu_clock_offset -= ti;
+        seqlock_write_unlock(&timers_state.vm_clock_seqlock);
     }
-    seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+    timers_state.cpu_ticks_enabled = 1;
 }
 
 /* disable cpu_get_ticks() : the clock is stopped. You must not call
@@ -238,8 +269,10 @@ void cpu_disable_ticks(void)
     /* Here, the really thing protected by seqlock is cpu_clock_offset. */
     seqlock_write_lock(&timers_state.vm_clock_seqlock);
     if (timers_state.cpu_ticks_enabled) {
+        if (!replay_icount) {
         timers_state.cpu_ticks_offset += cpu_get_real_ticks();
         timers_state.cpu_clock_offset = cpu_get_clock_locked();
+        }
         timers_state.cpu_ticks_enabled = 0;
     }
     seqlock_write_unlock(&timers_state.vm_clock_seqlock);
@@ -354,7 +387,7 @@ void qtest_clock_warp(int64_t dest)
         qemu_icount_bias += warp;
         seqlock_write_unlock(&timers_state.vm_clock_seqlock);
 
-        qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
+        qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL, false);
         clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
     }
     qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
@@ -370,7 +403,12 @@ void qemu_clock_warp(QEMUClockType type)
      * applicable to other clocks.  But a clock argument removes the
      * need for if statements all over the place.
      */
-    if (type != QEMU_CLOCK_VIRTUAL || !use_icount) {
+    if (type != QEMU_CLOCK_VIRTUAL || (!use_icount && !replay_icount)) {
+        return;
+    }
+    
+    if (replay_icount) {
+        replay_clock_warp();
         return;
     }
 
@@ -428,6 +466,22 @@ void qemu_clock_warp(QEMUClockType type)
     }
 }
 
+static bool is_replay_enabled(void *opaque)
+{
+    return replay_mode != REPLAY_NONE;
+}
+
+static const VMStateDescription vmstate_timers_for_replay = {
+    .name = "timer for replay",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .minimum_version_id_old = 1,
+    .fields      = (VMStateField []) {
+        VMSTATE_INT64(cpu_ticks_prev, TimersState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 static const VMStateDescription vmstate_timers = {
     .name = "timer",
     .version_id = 2,
@@ -437,6 +491,14 @@ static const VMStateDescription vmstate_timers = {
         VMSTATE_INT64(dummy, TimersState),
         VMSTATE_INT64_V(cpu_clock_offset, TimersState, 2),
         VMSTATE_END_OF_LIST()
+    },
+    .subsections = (VMStateSubsection []) {
+        {
+            .vmsd = &vmstate_timers_for_replay,
+            .needed = is_replay_enabled,
+        }, {
+            /* empty */
+        }
     }
 };
 
@@ -447,6 +509,10 @@ void configure_icount(const char *option)
     if (!option) {
         return;
     }
+    if (replay_mode != REPLAY_NONE) {
+        fprintf(stderr, "-icount option is not supported in replay mode\n");
+        exit(1);
+    }
 
     icount_warp_timer = timer_new_ns(QEMU_CLOCK_REALTIME,
                                           icount_warp_rt, NULL);
@@ -528,9 +594,11 @@ static int do_vm_stop(RunState state)
     int ret = 0;
 
     if (runstate_is_running()) {
-        cpu_disable_ticks();
-        pause_all_vcpus();
+        // Disable ticks can cause recursive call of vm_stop.
+        // Stopping before calling functions prevents infinite recursion.
         runstate_set(state);
+        pause_all_vcpus();
+        cpu_disable_ticks();
         vm_state_notify(0, state);
         qapi_event_send_stop(&error_abort);
     }
@@ -820,13 +888,24 @@ static void qemu_wait_io_event_common(CPUState *cpu)
 static void qemu_tcg_wait_io_event(void)
 {
     CPUState *cpu;
+    GMainContext *context = g_main_context_default();
 
-    while (all_cpu_threads_idle()) {
-       /* Start accounting real time to the virtual clock if the CPUs
-          are idle.  */
+    if (replay_mode == REPLAY_PLAY && replay_get_play_submode() != 
REPLAY_PLAY_CHANGED &&
all_cpu_threads_idle() && first_cpu->halted)
+    {
         qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
+        g_main_context_wakeup(context);
+        // need to wait the condition, because this unlocks mutex
         qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
     }
+    else
+    {
+        while (all_cpu_threads_idle()) {
+           /* Start accounting real time to the virtual clock if the CPUs
+              are idle.  */
+            qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
+            qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); 
+        }
+    }
 
     while (iothread_requesting_mutex) {
         qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex);
@@ -851,7 +930,7 @@ static void *qemu_kvm_cpu_thread_fn(void *arg)
 {
     CPUState *cpu = arg;
     int r;
-
+    
     qemu_mutex_lock(&qemu_global_mutex);
     qemu_thread_get_self(cpu->thread);
     cpu->thread_id = qemu_get_thread_id();
@@ -932,16 +1011,18 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
 
     qemu_tcg_init_cpu_signals();
     qemu_thread_get_self(cpu->thread);
-
     qemu_mutex_lock(&qemu_global_mutex);
     CPU_FOREACH(cpu) {
         cpu->thread_id = qemu_get_thread_id();
         cpu->created = true;
+        /* init exception index here */
+        cpu->exception_index = -1;
     }
     qemu_cond_signal(&qemu_cpu_cond);
 
     /* wait for initial kick-off after machine start */
     while (QTAILQ_FIRST(&cpus)->stopped) {
+    
         qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
 
         /* process any pending work */
@@ -969,13 +1050,10 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
 static void qemu_cpu_kick_thread(CPUState *cpu)
 {
 #ifndef _WIN32
-    int err;
-
-    err = pthread_kill(cpu->thread->thread, SIG_IPI);
-    if (err) {
-        fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
-        exit(1);
-    }
+    if (cpu) {
+        cpu->exit_request = 1;
+        cpu->tcg_exit_req = 1;
+        }
 #else /* _WIN32 */
     if (!qemu_cpu_is_self(cpu)) {
         CONTEXT tcgContext;
@@ -1033,7 +1111,7 @@ bool qemu_cpu_is_self(CPUState *cpu)
     return qemu_thread_is_self(cpu->thread);
 }
 
-static bool qemu_in_vcpu_thread(void)
+bool qemu_in_vcpu_thread(void)
 {
     return current_cpu && qemu_cpu_is_self(current_cpu);
 }
@@ -1044,8 +1122,16 @@ void qemu_mutex_lock_iothread(void)
         qemu_mutex_lock(&qemu_global_mutex);
     } else {
         iothread_requesting_mutex = true;
-        if (qemu_mutex_trylock(&qemu_global_mutex)) {
-            qemu_cpu_kick_thread(first_cpu);
+        if (replay_mode == REPLAY_NONE) {
+            if (qemu_mutex_trylock(&qemu_global_mutex)) {
+                qemu_cpu_kick_thread(first_cpu);
+                qemu_mutex_lock(&qemu_global_mutex);
+            }
+        } else {
+            if (first_cpu) {
+                qemu_cpu_kick_thread(first_cpu);
+            }
+                
             qemu_mutex_lock(&qemu_global_mutex);
         }
         iothread_requesting_mutex = false;
@@ -1186,6 +1272,7 @@ void qemu_init_vcpu(CPUState *cpu)
     cpu->nr_cores = smp_cores;
     cpu->nr_threads = smp_threads;
     cpu->stopped = true;
+    cpu->instructions_count = 0;
     if (kvm_enabled()) {
         qemu_kvm_start_vcpu(cpu);
     } else if (tcg_enabled()) {
@@ -1205,6 +1292,13 @@ void cpu_stop_current(void)
     }
 }
 
+void cpu_exit_current(void)
+{
+    if (current_cpu) {
+        cpu_exit(current_cpu);
+    }
+}
+
 int vm_stop(RunState state)
 {
     if (qemu_in_vcpu_thread()) {
@@ -1265,7 +1359,6 @@ static int tcg_cpu_exec(CPUArchState *env)
         }
 
         count = qemu_icount_round(deadline);
-        qemu_icount += count;
         decr = (count > 0xffff) ? 0xffff : count;
         count -= decr;
         cpu->icount_decr.u16.low = decr;
@@ -1299,14 +1392,16 @@ static void tcg_exec_all(void)
         CPUState *cpu = next_cpu;
         CPUArchState *env = cpu->env_ptr;
 
-        qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
+        if (cpu_can_run(cpu)) {
+            qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
                           (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0);
 
-        if (cpu_can_run(cpu)) {
             r = tcg_cpu_exec(env);
             if (r == EXCP_DEBUG) {
                 cpu_handle_guest_debug(cpu);
                 break;
+            } else if (r == EXCP_REPLAY) {
+                break;
             }
         } else if (cpu->stop || cpu->stopped) {
             break;

diff --git a/exec.c b/exec.c
index 18d6c35..5a9f54e
--- a/exec.c
+++ b/exec.c
@@ -50,6 +50,7 @@
 
 #include "exec/memory-internal.h"
 #include "exec/ram_addr.h"
+#include "replay/replay.h"
 
 #include "qemu/range.h"
 
@@ -731,6 +732,7 @@ void cpu_abort(CPUState *cpu, const char *fmt, ...)
     }
     va_end(ap2);
     va_end(ap);
+    replay_finish();
 #if defined(CONFIG_USER_ONLY)
     {
         struct sigaction act;
@@ -1611,10 +1613,27 @@ static void check_watchpoint(int offset, int len_mask, 
int flags)
     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
         if ((vaddr == (wp->vaddr & len_mask) ||
              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
+
+            /* Don't actually process a watchpoint, it will be processed,
+               when reverse execution stops. */
+            if (replay_get_play_submode() == REPLAY_PLAY_REVERSE) {
+                wp->flags &= ~BP_WATCHPOINT_HIT;
+                replay_reverse_breakpoint();
+                continue;
+            }
+
             wp->flags |= BP_WATCHPOINT_HIT;
             if (!cpu->watchpoint_hit) {
                 cpu->watchpoint_hit = wp;
                 tb_check_watchpoint(cpu);
+
+                /* Current instruction is already processed by trace 
+                   and replay. Set flags that allow skpping these
+                   events */
+                if (replay_mode != REPLAY_NONE) {
+                    replay_undo_last_instruction();
+                }
+
                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
                     cpu->exception_index = EXCP_DEBUG;
                     cpu_loop_exit(cpu);

diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 2dd6206..db9ae1a
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -59,6 +59,7 @@ typedef uint64_t target_ulong;
 #define EXCP_DEBUG      0x10002 /* cpu stopped after a breakpoint or 
singlestep */
 #define EXCP_HALTED     0x10003 /* cpu is halted (waiting for external event) 
*/
 #define EXCP_YIELD      0x10004 /* cpu wants to yield timeslice to another */
+#define EXCP_REPLAY     0x10005 /* for breaking execution loop to make correct 
order of events */
 
 /* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
    addresses on the same page.  The top bits are the same.  This allows

diff --git a/include/exec/gen-icount.h b/include/exec/gen-icount.h
index da53395..20cc8ca 100644
--- a/include/exec/gen-icount.h
+++ b/include/exec/gen-icount.h
@@ -24,6 +24,13 @@ static inline void gen_tb_start(void)
     if (!use_icount)
         return;
 
+    /*
+      This impelemntation will not completely correct  in the case
+      of using breakpoints - decrement is always equal to number of 
instructions.
+      And breakpoint is hit before the last instruction in the block.
+      So the decrement will be greater by one, than number of 
+      actually executed instructions.
+    */
     icount_label = gen_new_label();
     count = tcg_temp_local_new_i32();
     tcg_gen_ld_i32(count, cpu_env,

diff --git a/include/qom/cpu.h b/include/qom/cpu.h
index 1aafbf5..9c7b5c8 100644
--- a/include/qom/cpu.h
+++ b/include/qom/cpu.h
@@ -287,6 +287,7 @@ struct CPUState {
        (absolute value) offset as small as possible.  This reduces code
        size, especially for hosts without large memory offsets.  */
     volatile sig_atomic_t tcg_exit_req;
+    uint32_t instructions_count;
 };
 
 QTAILQ_HEAD(CPUTailQ, CPUState);
@@ -466,6 +467,9 @@ static inline bool cpu_has_work(CPUState *cpu)
  */
 bool qemu_cpu_is_self(CPUState *cpu);
 
+//need here for replay work
+bool qemu_in_vcpu_thread(void);
+
 /**
  * qemu_cpu_kick:
  * @cpu: The vCPU to kick.

diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h
index 4f79081..8921fd2 100644
--- a/include/sysemu/cpus.h
+++ b/include/sysemu/cpus.h
@@ -6,6 +6,9 @@ void qemu_init_cpu_loop(void);
 void resume_all_vcpus(void);
 void pause_all_vcpus(void);
 void cpu_stop_current(void);
+void cpu_exit_current(void);
+
+bool all_cpu_threads_idle(void);
 
 void cpu_synchronize_all_states(void);
 void cpu_synchronize_all_post_reset(void);

diff --git a/translate-all.c b/translate-all.c
index 8f7e11b..af31abf
--- a/translate-all.c
+++ b/translate-all.c
@@ -59,6 +59,7 @@
 #include "exec/cputlb.h"
 #include "translate-all.h"
 #include "qemu/timer.h"
+#include "replay/replay.h"
 
 //#define DEBUG_TB_INVALIDATE
 //#define DEBUG_FLUSH
@@ -216,8 +217,10 @@ static int cpu_restore_state_from_tb(CPUState *cpu, 
TranslationBlock *tb,
     gen_intermediate_code_pc(env, tb);
 
     if (use_icount) {
-        /* Reset the cycle counter to the start of the block.  */
+        if (replay_mode == REPLAY_NONE) {
+            /* Reset the cycle counter to the start of the block.  */
         cpu->icount_decr.u16.low += tb->icount;
+        }
         /* Clear the IO flag.  */
         cpu->can_do_io = 0;
     }
@@ -243,7 +246,9 @@ static int cpu_restore_state_from_tb(CPUState *cpu, 
TranslationBlock *tb,
     while (s->gen_opc_instr_start[j] == 0) {
         j--;
     }
+    if (replay_mode == REPLAY_NONE) {
     cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
+    }
 
     restore_state_to_opc(env, tb, j);
 
@@ -803,6 +808,14 @@ void tb_flush(CPUArchState *env1)
     tcg_ctx.tb_ctx.tb_flush_count++;
 }
 
+void tb_flush_all(void)
+{
+    CPUState *cpu;
+    for (cpu = first_cpu ; cpu != NULL ; cpu = CPU_NEXT(cpu)) {
+        tb_flush(cpu->env_ptr);
+    }
+}
+
 #ifdef DEBUG_TB_CHECK
 
 static void tb_invalidate_check(target_ulong address)
@@ -1163,6 +1176,12 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, 
tb_page_addr_t end,
                 cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
                                      &current_flags);
+                /* Current instruction is already processed by trace 
+                   and replay. Set flags that allow skpping these
+                   events */
+                if (replay_mode != REPLAY_NONE) {
+                    replay_undo_last_instruction();
+                }
             }
 #endif /* TARGET_HAS_PRECISE_SMC */
             /* we need to do that to handle the case where a signal
@@ -1282,6 +1301,13 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
             cpu_restore_state_from_tb(cpu, current_tb, pc);
             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
                                  &current_flags);
+
+            /* Current instruction is already processed by trace 
+                and replay. Set flags that allow skipping these
+                events */
+            if (replay_mode != REPLAY_NONE) {
+                replay_undo_last_instruction();
+            }
         }
 #endif /* TARGET_HAS_PRECISE_SMC */
         tb_phys_invalidate(tb, addr);

diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h
index d8539fd..3364b26 100644
--- a/include/sysemu/sysemu.h
+++ b/include/sysemu/sysemu.h
@@ -58,6 +58,7 @@ void qemu_system_wakeup_request(WakeupReason reason);
 void qemu_system_wakeup_enable(WakeupReason reason, bool enabled);
 void qemu_register_wakeup_notifier(Notifier *notifier);
 void qemu_system_shutdown_request(void);
+void qemu_system_shutdown_request_impl(void);
 void qemu_system_powerdown_request(void);
 void qemu_register_powerdown_notifier(Notifier *notifier);
 void qemu_system_debug_request(void);
@@ -76,6 +77,7 @@ void qemu_add_machine_init_done_notifier(Notifier *notify);
 
 void do_savevm(Monitor *mon, const QDict *qdict);
 int load_vmstate(const char *name);
+int save_vmstate(Monitor *mon, const char *name);
 void do_delvm(Monitor *mon, const QDict *qdict);
 void do_info_snapshots(Monitor *mon, const QDict *qdict);





reply via email to

[Prev in Thread] Current Thread [Next in Thread]