qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 04/15] ppc/booke: fix crit/mcheck/debug exceptions


From: Scott Wood
Subject: [Qemu-devel] [PATCH 04/15] ppc/booke: fix crit/mcheck/debug exceptions
Date: Fri, 21 Dec 2012 20:15:41 -0600

Book E does not play games with certain bits of xSRR1 being MSR save
bits and others being error status.  xSRR1 is the old MSR, period.
This was causing things like MSR[CE] to be lost, even in the saved
version, as soon as you take an exception.

rfci/rfdi/rfmci are fixed to pass the actual xSRR1 register contents,
rather than the register number.

Put FIXME comments on the hack that is "asrr0/1".  The whole point of
separate exception levels is so that you can, for example, take a machine
check or debug interrupt without corrupting critical-level operations.
The right xSRR0/1 set needs to be chosen based on CPU type flags.

Signed-off-by: Scott Wood <address@hidden>
---
 target-ppc/excp_helper.c |   31 ++++++++++++++++++++++---------
 1 file changed, 22 insertions(+), 9 deletions(-)

diff --git a/target-ppc/excp_helper.c b/target-ppc/excp_helper.c
index 5e34ad0..41037a7 100644
--- a/target-ppc/excp_helper.c
+++ b/target-ppc/excp_helper.c
@@ -84,7 +84,11 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int 
excp_model, int excp)
                   " => %08x (%02x)\n", env->nip, excp, env->error_code);
 
     /* new srr1 value excluding must-be-zero bits */
-    msr = env->msr & ~0x783f0000ULL;
+    if (excp_model == POWERPC_EXCP_BOOKE) {
+        msr = env->msr;
+    } else {
+        msr = env->msr & ~0x783f0000ULL;
+    }
 
     /* new interrupt handler msr */
     new_msr = env->msr & ((target_ulong)1 << MSR_ME);
@@ -145,6 +149,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int 
excp_model, int excp)
             srr1 = SPR_40x_SRR3;
             break;
         case POWERPC_EXCP_BOOKE:
+            /* FIXME: choose one or the other based on CPU type */
             srr0 = SPR_BOOKE_MCSRR0;
             srr1 = SPR_BOOKE_MCSRR1;
             asrr0 = SPR_BOOKE_CSRR0;
@@ -275,6 +280,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int 
excp_model, int excp)
     case POWERPC_EXCP_DEBUG:     /* Debug interrupt                          */
         switch (excp_model) {
         case POWERPC_EXCP_BOOKE:
+            /* FIXME: choose one or the other based on CPU type */
             srr0 = SPR_BOOKE_DSRR0;
             srr1 = SPR_BOOKE_DSRR1;
             asrr0 = SPR_BOOKE_CSRR0;
@@ -836,8 +842,13 @@ static inline void do_rfi(CPUPPCState *env, target_ulong 
nip, target_ulong msr,
 
 void helper_rfi(CPUPPCState *env)
 {
-    do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1],
-           ~((target_ulong)0x783F0000), 1);
+    if (env->excp_model == POWERPC_EXCP_BOOKE) {
+        do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1],
+               ~((target_ulong)0), 0);
+    } else {
+        do_rfi(env, env->spr[SPR_SRR0], env->spr[SPR_SRR1],
+               ~((target_ulong)0x783F0000), 1);
+    }
 }
 
 #if defined(TARGET_PPC64)
@@ -864,20 +875,22 @@ void helper_40x_rfci(CPUPPCState *env)
 
 void helper_rfci(CPUPPCState *env)
 {
-    do_rfi(env, env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
-           ~((target_ulong)0x3FFF0000), 0);
+    do_rfi(env, env->spr[SPR_BOOKE_CSRR0], env->spr[SPR_BOOKE_CSRR1],
+           ~((target_ulong)0), 0);
 }
 
 void helper_rfdi(CPUPPCState *env)
 {
-    do_rfi(env, env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
-           ~((target_ulong)0x3FFF0000), 0);
+    /* FIXME: choose CSRR1 or DSRR1 based on cpu type */
+    do_rfi(env, env->spr[SPR_BOOKE_DSRR0], env->spr[SPR_BOOKE_DSRR1],
+           ~((target_ulong)0), 0);
 }
 
 void helper_rfmci(CPUPPCState *env)
 {
-    do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
-           ~((target_ulong)0x3FFF0000), 0);
+    /* FIXME: choose CSRR1 or MCSRR1 based on cpu type */
+    do_rfi(env, env->spr[SPR_BOOKE_MCSRR0], env->spr[SPR_BOOKE_MCSRR1],
+           ~((target_ulong)0), 0);
 }
 #endif
 
-- 
1.7.9.5





reply via email to

[Prev in Thread] Current Thread [Next in Thread]