qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 28/35] exec: access cpu->interrupt_request with atom


From: Emilio G. Cota
Subject: [Qemu-devel] [PATCH 28/35] exec: access cpu->interrupt_request with atomics
Date: Mon, 17 Sep 2018 12:30:56 -0400

From: Paolo Bonzini <address@hidden>

Cc: Peter Crosthwaite <address@hidden>
Cc: Richard Henderson <address@hidden>
Signed-off-by: Paolo Bonzini <address@hidden>
Signed-off-by: Emilio G. Cota <address@hidden>
---
 accel/tcg/cpu-exec.c      | 6 +++---
 accel/tcg/tcg-all.c       | 3 +--
 accel/tcg/translate-all.c | 2 +-
 qom/cpu.c                 | 6 +++---
 4 files changed, 8 insertions(+), 9 deletions(-)

diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 7ca00725ec..2383763f9b 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -426,7 +426,7 @@ static inline bool cpu_handle_halt(CPUState *cpu)
 {
     if (cpu->halted) {
 #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
-        if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
+        if ((atomic_read(&cpu->interrupt_request) & CPU_INTERRUPT_POLL)
             && replay_interrupt()) {
             X86CPU *x86_cpu = X86_CPU(cpu);
             qemu_mutex_lock_iothread();
@@ -527,7 +527,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
     if (unlikely(atomic_read(&cpu->interrupt_request))) {
         int interrupt_request;
         qemu_mutex_lock_iothread();
-        interrupt_request = cpu->interrupt_request;
+        interrupt_request = atomic_read(&cpu->interrupt_request);
         if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
             /* Mask out external interrupts for this step. */
             interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
@@ -579,7 +579,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
             }
             /* The target hook may have updated the 'cpu->interrupt_request';
              * reload the 'interrupt_request' value */
-            interrupt_request = cpu->interrupt_request;
+            interrupt_request = atomic_read(&cpu->interrupt_request);
         }
         if (interrupt_request & CPU_INTERRUPT_EXITTB) {
             cpu_reset_interrupt(cpu, CPU_INTERRUPT_EXITTB);
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
index 3d25bdcc17..69ad44bd54 100644
--- a/accel/tcg/tcg-all.c
+++ b/accel/tcg/tcg-all.c
@@ -41,8 +41,7 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
     int old_mask;
     g_assert(qemu_mutex_iothread_locked());
 
-    old_mask = cpu->interrupt_request;
-    cpu->interrupt_request |= mask;
+    old_mask = atomic_fetch_or(&cpu->interrupt_request, mask);
 
     /*
      * If called from iothread context, wake the target cpu in
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index f7784bbc2d..364757c677 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -2351,7 +2351,7 @@ void dump_opcount_info(FILE *f, fprintf_function 
cpu_fprintf)
 void cpu_interrupt(CPUState *cpu, int mask)
 {
     g_assert(qemu_mutex_iothread_locked());
-    cpu->interrupt_request |= mask;
+    atomic_or(&cpu->interrupt_request, mask);
     atomic_set(&cpu->icount_decr.u16.high, -1);
 }
 
diff --git a/qom/cpu.c b/qom/cpu.c
index 20ad54d43f..e2dfbde7c4 100644
--- a/qom/cpu.c
+++ b/qom/cpu.c
@@ -103,7 +103,7 @@ void cpu_reset_interrupt(CPUState *cpu, int mask)
     if (need_lock) {
         qemu_mutex_lock_iothread();
     }
-    cpu->interrupt_request &= ~mask;
+    atomic_and(&cpu->interrupt_request, ~mask);
     if (need_lock) {
         qemu_mutex_unlock_iothread();
     }
@@ -261,7 +261,7 @@ static void cpu_common_reset(CPUState *cpu)
         log_cpu_state(cpu, cc->reset_dump_flags);
     }
 
-    cpu->interrupt_request = 0;
+    atomic_set(&cpu->interrupt_request, 0);
     cpu->halted = 0;
     cpu->mem_io_pc = 0;
     cpu->mem_io_vaddr = 0;
@@ -395,7 +395,7 @@ static vaddr cpu_adjust_watchpoint_address(CPUState *cpu, 
vaddr addr, int len)
 
 static void generic_handle_interrupt(CPUState *cpu, int mask)
 {
-    cpu->interrupt_request |= mask;
+    atomic_or(&cpu->interrupt_request, mask);
 
     if (!qemu_cpu_is_self(cpu)) {
         qemu_cpu_kick(cpu);
-- 
2.17.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]