[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v9 44/74] i386/hvf: convert to cpu_request_interrupt
From: |
Robert Foley |
Subject: |
[PATCH v9 44/74] i386/hvf: convert to cpu_request_interrupt |
Date: |
Thu, 21 May 2020 12:39:41 -0400 |
From: "Emilio G. Cota" <address@hidden>
Reviewed-by: Richard Henderson <address@hidden>
Signed-off-by: Emilio G. Cota <address@hidden>
Signed-off-by: Robert Foley <address@hidden>
---
target/i386/hvf/hvf.c | 8 +++++---
target/i386/hvf/x86hvf.c | 26 +++++++++++++++-----------
2 files changed, 20 insertions(+), 14 deletions(-)
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index bf60ce9d66..52ccdf85e4 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -262,7 +262,7 @@ void update_apic_tpr(CPUState *cpu)
static void hvf_handle_interrupt(CPUState * cpu, int mask)
{
- cpu->interrupt_request |= mask;
+ cpu_interrupt_request_or(cpu, mask);
if (!qemu_cpu_is_self(cpu)) {
qemu_cpu_kick(cpu);
}
@@ -733,10 +733,12 @@ int hvf_vcpu_exec(CPUState *cpu)
ret = 0;
switch (exit_reason) {
case EXIT_REASON_HLT: {
+ uint32_t interrupt_request = cpu_interrupt_request(cpu);
+
macvm_set_rip(cpu, rip + ins_len);
- if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ if (!((interrupt_request & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK))
- && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) &&
+ && !(interrupt_request & CPU_INTERRUPT_NMI) &&
!(idtvec_info & VMCS_IDT_VEC_VALID)) {
cpu_halted_set(cpu, 1);
ret = EXCP_HLT;
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index 90f1662d0c..892ae0e99a 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -352,6 +352,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
uint8_t vector;
uint64_t intr_type;
+ uint32_t interrupt_request;
bool have_event = true;
if (env->interrupt_injected != -1) {
vector = env->interrupt_injected;
@@ -400,7 +401,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
};
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_NMI) {
if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) {
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_NMI);
info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI;
@@ -411,7 +412,7 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
}
if (!(env->hflags & HF_INHIBIT_IRQ_MASK) &&
- (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK) && !(info & VMCS_INTR_VALID)) {
int line = cpu_get_pic_interrupt(&x86cpu->env);
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_HARD);
@@ -420,39 +421,42 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
VMCS_INTR_VALID | VMCS_INTR_T_HWINTR);
}
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_HARD) {
vmx_set_int_window_exiting(cpu_state);
}
- return (cpu_state->interrupt_request
- & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR));
+ return cpu_interrupt_request(cpu_state) & (CPU_INTERRUPT_INIT |
+ CPU_INTERRUPT_TPR);
}
int hvf_process_events(CPUState *cpu_state)
{
X86CPU *cpu = X86_CPU(cpu_state);
CPUX86State *env = &cpu->env;
+ uint32_t interrupt_request;
EFLAGS(env) = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS);
- if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_INIT) {
hvf_cpu_synchronize_state(cpu_state);
do_cpu_init(cpu);
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_POLL) {
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_POLL);
apic_poll_irq(cpu->apic_state);
}
- if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) &&
+
+ interrupt_request = cpu_interrupt_request(cpu_state);
+ if (((interrupt_request & CPU_INTERRUPT_HARD) &&
(EFLAGS(env) & IF_MASK)) ||
- (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) {
+ (interrupt_request & CPU_INTERRUPT_NMI)) {
cpu_halted_set(cpu_state, 0);
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) {
+ if (interrupt_request & CPU_INTERRUPT_SIPI) {
hvf_cpu_synchronize_state(cpu_state);
do_cpu_sipi(cpu);
}
- if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) {
+ if (cpu_interrupt_request(cpu_state) & CPU_INTERRUPT_TPR) {
cpu_reset_interrupt(cpu_state, CPU_INTERRUPT_TPR);
hvf_cpu_synchronize_state(cpu_state);
apic_handle_tpr_access_report(cpu->apic_state, env->eip,
--
2.17.1
- [PATCH v9 34/74] ppc: use cpu_reset_interrupt, (continued)
- [PATCH v9 34/74] ppc: use cpu_reset_interrupt, Robert Foley, 2020/05/21
- [PATCH v9 35/74] exec: use cpu_reset_interrupt, Robert Foley, 2020/05/21
- [PATCH v9 36/74] i386: use cpu_reset_interrupt, Robert Foley, 2020/05/21
- [PATCH v9 37/74] s390x: use cpu_reset_interrupt, Robert Foley, 2020/05/21
- [PATCH v9 38/74] openrisc: use cpu_reset_interrupt, Robert Foley, 2020/05/21
- [PATCH v9 39/74] arm: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 40/74] i386: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 41/74] i386/kvm: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 42/74] i386/hax-all: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 43/74] i386/whpx-all: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 44/74] i386/hvf: convert to cpu_request_interrupt,
Robert Foley <=
- [PATCH v9 45/74] ppc: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 46/74] sh4: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 47/74] cris: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 48/74] hppa: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 49/74] lm32: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 50/74] m68k: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 51/74] mips: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 53/74] s390x: convert to cpu_interrupt_request, Robert Foley, 2020/05/21
- [PATCH v9 52/74] nios: convert to cpu_interrupt_request, Robert Foley, 2020/05/21