qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 4/4] target/ppc: Use tcg_constant_i32() in gen_ld/st()


From: Philippe Mathieu-Daudé
Subject: [PATCH 4/4] target/ppc: Use tcg_constant_i32() in gen_ld/st()
Date: Sun, 24 Oct 2021 18:16:19 +0200

Avoid using a TCG temporary by moving the MemOp index
to the constant pool.

Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
---
 target/ppc/translate.c | 29 +++++++++++++----------------
 1 file changed, 13 insertions(+), 16 deletions(-)

diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 507f6699f47..9a4ae61a39d 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -3347,15 +3347,14 @@ static void gen_lq(DisasContext *ctx)
 
     if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
         if (HAVE_ATOMIC128) {
-            TCGv_i32 oi = tcg_temp_new_i32();
+            TCGv_i32 oi;
             if (ctx->le_mode) {
-                tcg_gen_movi_i32(oi, make_memop_idx(MO_LEQ, ctx->mem_idx));
+                oi = tcg_constant_i32(make_memop_idx(MO_LEQ, ctx->mem_idx));
                 gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
             } else {
-                tcg_gen_movi_i32(oi, make_memop_idx(MO_BEQ, ctx->mem_idx));
+                oi = tcg_constant_i32(make_memop_idx(MO_BEQ, ctx->mem_idx));
                 gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
             }
-            tcg_temp_free_i32(oi);
             tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
         } else {
             /* Restart with exclusive lock.  */
@@ -3458,17 +3457,16 @@ static void gen_std(DisasContext *ctx)
 
         if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
             if (HAVE_ATOMIC128) {
-                TCGv_i32 oi = tcg_temp_new_i32();
+                TCGv_i32 oi;
                 if (ctx->le_mode) {
-                    tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128,
-                                                        ctx->mem_idx));
+                    oi = tcg_constant_i32(make_memop_idx(MO_LE | MO_128,
+                                                         ctx->mem_idx));
                     gen_helper_stq_le_parallel(cpu_env, EA, lo, hi, oi);
                 } else {
-                    tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128,
-                                                        ctx->mem_idx));
+                    oi = tcg_constant_i32(make_memop_idx(MO_BE | MO_128,
+                                                         ctx->mem_idx));
                     gen_helper_stq_be_parallel(cpu_env, EA, lo, hi, oi);
                 }
-                tcg_temp_free_i32(oi);
             } else {
                 /* Restart with exclusive lock.  */
                 gen_helper_exit_atomic(cpu_env);
@@ -4065,17 +4063,16 @@ static void gen_lqarx(DisasContext *ctx)
 
     if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
         if (HAVE_ATOMIC128) {
-            TCGv_i32 oi = tcg_temp_new_i32();
+            TCGv_i32 oi;
             if (ctx->le_mode) {
-                tcg_gen_movi_i32(oi, make_memop_idx(MO_LE | MO_128 | MO_ALIGN,
-                                                    ctx->mem_idx));
+                oi = tcg_constant_i32(make_memop_idx(MO_LE | MO_128 | MO_ALIGN,
+                                                     ctx->mem_idx));
                 gen_helper_lq_le_parallel(lo, cpu_env, EA, oi);
             } else {
-                tcg_gen_movi_i32(oi, make_memop_idx(MO_BE | MO_128 | MO_ALIGN,
-                                                    ctx->mem_idx));
+                oi = tcg_constant_i32(make_memop_idx(MO_BE | MO_128 | MO_ALIGN,
+                                                     ctx->mem_idx));
                 gen_helper_lq_be_parallel(lo, cpu_env, EA, oi);
             }
-            tcg_temp_free_i32(oi);
             tcg_gen_ld_i64(hi, cpu_env, offsetof(CPUPPCState, retxh));
         } else {
             /* Restart with exclusive lock.  */
-- 
2.31.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]