[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v10 59/73] accel/tcg: convert to cpu_interrupt_request
From: |
Robert Foley |
Subject: |
[PATCH v10 59/73] accel/tcg: convert to cpu_interrupt_request |
Date: |
Wed, 17 Jun 2020 17:02:17 -0400 |
From: "Emilio G. Cota" <cota@braap.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Robert Foley <robert.foley@linaro.org>
---
accel/tcg/cpu-exec.c | 15 ++++++++-------
accel/tcg/tcg-all.c | 12 +++++++++---
accel/tcg/translate-all.c | 2 +-
3 files changed, 18 insertions(+), 11 deletions(-)
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 099dd83ee0..b549a37847 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -431,7 +431,7 @@ static inline bool cpu_handle_halt_locked(CPUState *cpu)
if (cpu_halted(cpu)) {
#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
- if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
+ if ((cpu_interrupt_request(cpu) & CPU_INTERRUPT_POLL)
&& replay_interrupt()) {
X86CPU *x86_cpu = X86_CPU(cpu);
@@ -544,16 +544,17 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
*/
atomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
- if (unlikely(atomic_read(&cpu->interrupt_request))) {
+ if (unlikely(cpu_interrupt_request(cpu))) {
int interrupt_request;
+
qemu_mutex_lock_iothread();
- interrupt_request = cpu->interrupt_request;
+ interrupt_request = cpu_interrupt_request(cpu);
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
}
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_DEBUG);
cpu->exception_index = EXCP_DEBUG;
qemu_mutex_unlock_iothread();
return true;
@@ -562,7 +563,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
replay_interrupt();
- cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT);
cpu_halted_set(cpu, 1);
cpu->exception_index = EXCP_HLT;
qemu_mutex_unlock_iothread();
@@ -599,10 +600,10 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
/* The target hook may have updated the 'cpu->interrupt_request';
* reload the 'interrupt_request' value */
- interrupt_request = cpu->interrupt_request;
+ interrupt_request = cpu_interrupt_request(cpu);
}
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
- cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
+ cpu_reset_interrupt(cpu, CPU_INTERRUPT_EXITTB);
/* ensure that no TB jump will be modified as
the program flow was changed */
*last_tb = NULL;
diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c
index 3b4fda5640..5eda24d87b 100644
--- a/accel/tcg/tcg-all.c
+++ b/accel/tcg/tcg-all.c
@@ -52,10 +52,16 @@ typedef struct TCGState {
static void tcg_handle_interrupt(CPUState *cpu, int mask)
{
int old_mask;
- g_assert(qemu_mutex_iothread_locked());
- old_mask = cpu->interrupt_request;
- cpu->interrupt_request |= mask;
+ if (!cpu_mutex_locked(cpu)) {
+ cpu_mutex_lock(cpu);
+ old_mask = cpu_interrupt_request(cpu);
+ cpu_interrupt_request_or(cpu, mask);
+ cpu_mutex_unlock(cpu);
+ } else {
+ old_mask = cpu_interrupt_request(cpu);
+ cpu_interrupt_request_or(cpu, mask);
+ }
/*
* If called from iothread context, wake the target cpu in
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index c3d37058a1..23007570f6 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -2409,7 +2409,7 @@ void dump_opcount_info(void)
void cpu_interrupt(CPUState *cpu, int mask)
{
g_assert(qemu_mutex_iothread_locked());
- cpu->interrupt_request |= mask;
+ cpu_interrupt_request_or(cpu, mask);
atomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
}
--
2.17.1
- [PATCH v10 47/73] hppa: convert to cpu_interrupt_request, (continued)
- [PATCH v10 47/73] hppa: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 48/73] lm32: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 49/73] m68k: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 50/73] mips: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 51/73] nios: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 52/73] s390x: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 53/73] alpha: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 57/73] unicore32: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 54/73] moxie: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 55/73] sparc: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 59/73] accel/tcg: convert to cpu_interrupt_request,
Robert Foley <=
- [PATCH v10 56/73] openrisc: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 58/73] microblaze: convert to cpu_interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 60/73] cpu: convert to interrupt_request, Robert Foley, 2020/06/17
- [PATCH v10 62/73] cpu: introduce cpu_has_work_with_iothread_lock, Robert Foley, 2020/06/17
- [PATCH v10 63/73] ppc: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/06/17
- [PATCH v10 61/73] cpu: call .cpu_has_work with the CPU lock held, Robert Foley, 2020/06/17
- [PATCH v10 64/73] mips: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/06/17
- [PATCH v10 66/73] riscv: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/06/17
- [PATCH v10 68/73] xtensa: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/06/17
- [PATCH v10 65/73] s390x: convert to cpu_has_work_with_iothread_lock, Robert Foley, 2020/06/17