qemu-arm
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH v2 4/6] tcg/s390: Hoist common argument loads in tcg_out_op()


From: Philippe Mathieu-Daudé
Subject: [PATCH v2 4/6] tcg/s390: Hoist common argument loads in tcg_out_op()
Date: Wed, 13 Jan 2021 18:24:57 +0100

Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
---
 tcg/s390/tcg-target.c.inc | 222 ++++++++++++++++++--------------------
 1 file changed, 107 insertions(+), 115 deletions(-)

diff --git a/tcg/s390/tcg-target.c.inc b/tcg/s390/tcg-target.c.inc
index d7ef0790556..ec202e79cfc 100644
--- a/tcg/s390/tcg-target.c.inc
+++ b/tcg/s390/tcg-target.c.inc
@@ -1732,15 +1732,22 @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg 
data_reg, TCGReg addr_reg,
         case glue(glue(INDEX_op_,x),_i64)
 
 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
-                const TCGArg *args, const int *const_args)
+                              const TCGArg args[TCG_MAX_OP_ARGS],
+                              const int const_args[TCG_MAX_OP_ARGS])
 {
     S390Opcode op, op2;
-    TCGArg a0, a1, a2;
+    TCGArg a0, a1, a2, a4;
+    int c2;
+
+    a0 = args[0];
+    a1 = args[1];
+    a2 = args[2];
+    a4 = args[4];
+    c2 = const_args[2];
 
     switch (opc) {
     case INDEX_op_exit_tb:
         /* Reuse the zeroing that exists for goto_ptr.  */
-        a0 = args[0];
         if (a0 == 0) {
             tgen_gotoi(s, S390_CC_ALWAYS, tcg_code_gen_epilogue);
         } else {
@@ -1750,7 +1757,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         break;
 
     case INDEX_op_goto_tb:
-        a0 = args[0];
         if (s->tb_jmp_insn_offset) {
             /*
              * branch displacement must be aligned for atomic patching;
@@ -1784,7 +1790,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         break;
 
     case INDEX_op_goto_ptr:
-        a0 = args[0];
         if (USE_REG_TB) {
             tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0);
         }
@@ -1794,44 +1799,42 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
     OP_32_64(ld8u):
         /* ??? LLC (RXY format) is only present with the extended-immediate
            facility, whereas LLGC is always present.  */
-        tcg_out_mem(s, 0, RXY_LLGC, args[0], args[1], TCG_REG_NONE, args[2]);
+        tcg_out_mem(s, 0, RXY_LLGC, a0, a1, TCG_REG_NONE, a2);
         break;
 
     OP_32_64(ld8s):
         /* ??? LB is no smaller than LGB, so no point to using it.  */
-        tcg_out_mem(s, 0, RXY_LGB, args[0], args[1], TCG_REG_NONE, args[2]);
+        tcg_out_mem(s, 0, RXY_LGB, a0, a1, TCG_REG_NONE, a2);
         break;
 
     OP_32_64(ld16u):
         /* ??? LLH (RXY format) is only present with the extended-immediate
            facility, whereas LLGH is always present.  */
-        tcg_out_mem(s, 0, RXY_LLGH, args[0], args[1], TCG_REG_NONE, args[2]);
+        tcg_out_mem(s, 0, RXY_LLGH, a0, a1, TCG_REG_NONE, a2);
         break;
 
     case INDEX_op_ld16s_i32:
-        tcg_out_mem(s, RX_LH, RXY_LHY, args[0], args[1], TCG_REG_NONE, 
args[2]);
+        tcg_out_mem(s, RX_LH, RXY_LHY, a0, a1, TCG_REG_NONE, a2);
         break;
 
     case INDEX_op_ld_i32:
-        tcg_out_ld(s, TCG_TYPE_I32, args[0], args[1], args[2]);
+        tcg_out_ld(s, TCG_TYPE_I32, a0, a1, a2);
         break;
 
     OP_32_64(st8):
-        tcg_out_mem(s, RX_STC, RXY_STCY, args[0], args[1],
-                    TCG_REG_NONE, args[2]);
+        tcg_out_mem(s, RX_STC, RXY_STCY, a0, a1, TCG_REG_NONE, a2);
         break;
 
     OP_32_64(st16):
-        tcg_out_mem(s, RX_STH, RXY_STHY, args[0], args[1],
-                    TCG_REG_NONE, args[2]);
+        tcg_out_mem(s, RX_STH, RXY_STHY, a0, a1, TCG_REG_NONE, a2);
         break;
 
     case INDEX_op_st_i32:
-        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
+        tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2);
         break;
 
     case INDEX_op_add_i32:
-        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
+        a2 = (int32_t)args[2];
         if (const_args[2]) {
         do_addi_32:
             if (a0 == a1) {
@@ -1852,9 +1855,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         }
         break;
     case INDEX_op_sub_i32:
-        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
+        a2 = (int32_t)args[2];
         if (const_args[2]) {
-            a2 = -a2;
+            a2 = -args[2];
             goto do_addi_32;
         } else if (a0 == a1) {
             tcg_out_insn(s, RR, SR, a0, a2);
@@ -1864,7 +1867,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         break;
 
     case INDEX_op_and_i32:
-        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
+        a2 = (uint32_t)args[2];
         if (const_args[2]) {
             tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
             tgen_andi(s, TCG_TYPE_I32, a0, a2);
@@ -1875,7 +1878,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         }
         break;
     case INDEX_op_or_i32:
-        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
+        a2 = (uint32_t)args[2];
         if (const_args[2]) {
             tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
             tgen_ori(s, TCG_TYPE_I32, a0, a2);
@@ -1886,45 +1889,45 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         }
         break;
     case INDEX_op_xor_i32:
-        a0 = args[0], a1 = args[1], a2 = (uint32_t)args[2];
+        a2 = (uint32_t)args[2];
         if (const_args[2]) {
             tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
             tgen_xori(s, TCG_TYPE_I32, a0, a2);
         } else if (a0 == a1) {
-            tcg_out_insn(s, RR, XR, args[0], args[2]);
+            tcg_out_insn(s, RR, XR, a0, a2);
         } else {
             tcg_out_insn(s, RRF, XRK, a0, a1, a2);
         }
         break;
 
     case INDEX_op_neg_i32:
-        tcg_out_insn(s, RR, LCR, args[0], args[1]);
+        tcg_out_insn(s, RR, LCR, a0, a1);
         break;
 
     case INDEX_op_mul_i32:
         if (const_args[2]) {
             if ((int32_t)args[2] == (int16_t)args[2]) {
-                tcg_out_insn(s, RI, MHI, args[0], args[2]);
+                tcg_out_insn(s, RI, MHI, a0, a2);
             } else {
-                tcg_out_insn(s, RIL, MSFI, args[0], args[2]);
+                tcg_out_insn(s, RIL, MSFI, a0, a2);
             }
         } else {
-            tcg_out_insn(s, RRE, MSR, args[0], args[2]);
+            tcg_out_insn(s, RRE, MSR, a0, a2);
         }
         break;
 
     case INDEX_op_div2_i32:
-        tcg_out_insn(s, RR, DR, TCG_REG_R2, args[4]);
+        tcg_out_insn(s, RR, DR, TCG_REG_R2, a4);
         break;
     case INDEX_op_divu2_i32:
-        tcg_out_insn(s, RRE, DLR, TCG_REG_R2, args[4]);
+        tcg_out_insn(s, RRE, DLR, TCG_REG_R2, a4);
         break;
 
     case INDEX_op_shl_i32:
         op = RS_SLL;
         op2 = RSY_SLLK;
     do_shift32:
-        a0 = args[0], a1 = args[1], a2 = (int32_t)args[2];
+        a2 = (int32_t)args[2];
         if (a0 == a1) {
             if (const_args[2]) {
                 tcg_out_sh32(s, op, a0, TCG_REG_NONE, a2);
@@ -1952,110 +1955,107 @@ static inline void tcg_out_op(TCGContext *s, 
TCGOpcode opc,
     case INDEX_op_rotl_i32:
         /* ??? Using tcg_out_sh64 here for the format; it is a 32-bit rol.  */
         if (const_args[2]) {
-            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_REG_NONE, args[2]);
+            tcg_out_sh64(s, RSY_RLL, a0, a1, TCG_REG_NONE, a2);
         } else {
-            tcg_out_sh64(s, RSY_RLL, args[0], args[1], args[2], 0);
+            tcg_out_sh64(s, RSY_RLL, a0, a1, a2, 0);
         }
         break;
     case INDEX_op_rotr_i32:
         if (const_args[2]) {
-            tcg_out_sh64(s, RSY_RLL, args[0], args[1],
-                         TCG_REG_NONE, (32 - args[2]) & 31);
+            tcg_out_sh64(s, RSY_RLL, a0, a1, TCG_REG_NONE, (32 - a2) & 31);
         } else {
-            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
-            tcg_out_sh64(s, RSY_RLL, args[0], args[1], TCG_TMP0, 0);
+            tcg_out_insn(s, RR, LCR, TCG_TMP0, a2);
+            tcg_out_sh64(s, RSY_RLL, a0, a1, TCG_TMP0, 0);
         }
         break;
 
     case INDEX_op_ext8s_i32:
-        tgen_ext8s(s, TCG_TYPE_I32, args[0], args[1]);
+        tgen_ext8s(s, TCG_TYPE_I32, a0, a1);
         break;
     case INDEX_op_ext16s_i32:
-        tgen_ext16s(s, TCG_TYPE_I32, args[0], args[1]);
+        tgen_ext16s(s, TCG_TYPE_I32, a0, a1);
         break;
     case INDEX_op_ext8u_i32:
-        tgen_ext8u(s, TCG_TYPE_I32, args[0], args[1]);
+        tgen_ext8u(s, TCG_TYPE_I32, a0, a1);
         break;
     case INDEX_op_ext16u_i32:
-        tgen_ext16u(s, TCG_TYPE_I32, args[0], args[1]);
+        tgen_ext16u(s, TCG_TYPE_I32, a0, a1);
         break;
 
     OP_32_64(bswap16):
         /* The TCG bswap definition requires bits 0-47 already be zero.
            Thus we don't need the G-type insns to implement bswap16_i64.  */
-        tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
-        tcg_out_sh32(s, RS_SRL, args[0], TCG_REG_NONE, 16);
+        tcg_out_insn(s, RRE, LRVR, a0, a1);
+        tcg_out_sh32(s, RS_SRL, a0, TCG_REG_NONE, 16);
         break;
     OP_32_64(bswap32):
-        tcg_out_insn(s, RRE, LRVR, args[0], args[1]);
+        tcg_out_insn(s, RRE, LRVR, a0, a1);
         break;
 
     case INDEX_op_add2_i32:
         if (const_args[4]) {
-            tcg_out_insn(s, RIL, ALFI, args[0], args[4]);
+            tcg_out_insn(s, RIL, ALFI, a0, a4);
         } else {
-            tcg_out_insn(s, RR, ALR, args[0], args[4]);
+            tcg_out_insn(s, RR, ALR, a0, a4);
         }
-        tcg_out_insn(s, RRE, ALCR, args[1], args[5]);
+        tcg_out_insn(s, RRE, ALCR, a1, args[5]);
         break;
     case INDEX_op_sub2_i32:
         if (const_args[4]) {
-            tcg_out_insn(s, RIL, SLFI, args[0], args[4]);
+            tcg_out_insn(s, RIL, SLFI, a0, a4);
         } else {
-            tcg_out_insn(s, RR, SLR, args[0], args[4]);
+            tcg_out_insn(s, RR, SLR, a0, a4);
         }
-        tcg_out_insn(s, RRE, SLBR, args[1], args[5]);
+        tcg_out_insn(s, RRE, SLBR, a1, args[5]);
         break;
 
     case INDEX_op_br:
-        tgen_branch(s, S390_CC_ALWAYS, arg_label(args[0]));
+        tgen_branch(s, S390_CC_ALWAYS, arg_label(a0));
         break;
 
     case INDEX_op_brcond_i32:
-        tgen_brcond(s, TCG_TYPE_I32, args[2], args[0],
-                    args[1], const_args[1], arg_label(args[3]));
+        tgen_brcond(s, TCG_TYPE_I32, a2, a0,
+                    a1, const_args[1], arg_label(args[3]));
         break;
     case INDEX_op_setcond_i32:
-        tgen_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1],
-                     args[2], const_args[2]);
+        tgen_setcond(s, TCG_TYPE_I32, args[3], a0, a1, a2, const_args[2]);
         break;
     case INDEX_op_movcond_i32:
-        tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1],
-                     args[2], const_args[2], args[3], const_args[3]);
+        tgen_movcond(s, TCG_TYPE_I32, args[5], a0, a1,
+                     a2, const_args[2], args[3], const_args[3]);
         break;
 
     case INDEX_op_qemu_ld_i32:
         /* ??? Technically we can use a non-extending instruction.  */
     case INDEX_op_qemu_ld_i64:
-        tcg_out_qemu_ld(s, args[0], args[1], args[2]);
+        tcg_out_qemu_ld(s, a0, a1, a2);
         break;
     case INDEX_op_qemu_st_i32:
     case INDEX_op_qemu_st_i64:
-        tcg_out_qemu_st(s, args[0], args[1], args[2]);
+        tcg_out_qemu_st(s, a0, a1, a2);
         break;
 
     case INDEX_op_ld16s_i64:
-        tcg_out_mem(s, 0, RXY_LGH, args[0], args[1], TCG_REG_NONE, args[2]);
+        tcg_out_mem(s, 0, RXY_LGH, a0, a1, TCG_REG_NONE, a2);
         break;
     case INDEX_op_ld32u_i64:
-        tcg_out_mem(s, 0, RXY_LLGF, args[0], args[1], TCG_REG_NONE, args[2]);
+        tcg_out_mem(s, 0, RXY_LLGF, a0, a1, TCG_REG_NONE, a2);
         break;
     case INDEX_op_ld32s_i64:
-        tcg_out_mem(s, 0, RXY_LGF, args[0], args[1], TCG_REG_NONE, args[2]);
+        tcg_out_mem(s, 0, RXY_LGF, a0, a1, TCG_REG_NONE, a2);
         break;
     case INDEX_op_ld_i64:
-        tcg_out_ld(s, TCG_TYPE_I64, args[0], args[1], args[2]);
+        tcg_out_ld(s, TCG_TYPE_I64, a0, a1, a2);
         break;
 
     case INDEX_op_st32_i64:
-        tcg_out_st(s, TCG_TYPE_I32, args[0], args[1], args[2]);
+        tcg_out_st(s, TCG_TYPE_I32, a0, a1, a2);
         break;
     case INDEX_op_st_i64:
-        tcg_out_st(s, TCG_TYPE_I64, args[0], args[1], args[2]);
+        tcg_out_st(s, TCG_TYPE_I64, a0, a1, a2);
         break;
 
     case INDEX_op_add_i64:
-        a0 = args[0], a1 = args[1], a2 = args[2];
         if (const_args[2]) {
         do_addi_64:
             if (a0 == a1) {
@@ -2084,7 +2084,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         }
         break;
     case INDEX_op_sub_i64:
-        a0 = args[0], a1 = args[1], a2 = args[2];
         if (const_args[2]) {
             a2 = -a2;
             goto do_addi_64;
@@ -2096,18 +2095,16 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         break;
 
     case INDEX_op_and_i64:
-        a0 = args[0], a1 = args[1], a2 = args[2];
         if (const_args[2]) {
             tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
-            tgen_andi(s, TCG_TYPE_I64, args[0], args[2]);
+            tgen_andi(s, TCG_TYPE_I64, a0, a2);
         } else if (a0 == a1) {
-            tcg_out_insn(s, RRE, NGR, args[0], args[2]);
+            tcg_out_insn(s, RRE, NGR, a0, a2);
         } else {
             tcg_out_insn(s, RRF, NGRK, a0, a1, a2);
         }
         break;
     case INDEX_op_or_i64:
-        a0 = args[0], a1 = args[1], a2 = args[2];
         if (const_args[2]) {
             tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
             tgen_ori(s, TCG_TYPE_I64, a0, a2);
@@ -2118,7 +2115,6 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         }
         break;
     case INDEX_op_xor_i64:
-        a0 = args[0], a1 = args[1], a2 = args[2];
         if (const_args[2]) {
             tcg_out_mov(s, TCG_TYPE_I64, a0, a1);
             tgen_xori(s, TCG_TYPE_I64, a0, a2);
@@ -2130,21 +2126,21 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         break;
 
     case INDEX_op_neg_i64:
-        tcg_out_insn(s, RRE, LCGR, args[0], args[1]);
+        tcg_out_insn(s, RRE, LCGR, a0, a1);
         break;
     case INDEX_op_bswap64_i64:
-        tcg_out_insn(s, RRE, LRVGR, args[0], args[1]);
+        tcg_out_insn(s, RRE, LRVGR, a0, a1);
         break;
 
     case INDEX_op_mul_i64:
         if (const_args[2]) {
-            if (args[2] == (int16_t)args[2]) {
-                tcg_out_insn(s, RI, MGHI, args[0], args[2]);
+            if (a2 == (int16_t)args[2]) {
+                tcg_out_insn(s, RI, MGHI, a0, a2);
             } else {
-                tcg_out_insn(s, RIL, MSGFI, args[0], args[2]);
+                tcg_out_insn(s, RIL, MSGFI, a0, a2);
             }
         } else {
-            tcg_out_insn(s, RRE, MSGR, args[0], args[2]);
+            tcg_out_insn(s, RRE, MSGR, a0, a2);
         }
         break;
 
@@ -2153,10 +2149,10 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
            into R3 with this definition, but as we do in fact always
            produce both quotient and remainder using INDEX_op_div_i64
            instead requires jumping through even more hoops.  */
-        tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, args[4]);
+        tcg_out_insn(s, RRE, DSGR, TCG_REG_R2, a4);
         break;
     case INDEX_op_divu2_i64:
-        tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, args[4]);
+        tcg_out_insn(s, RRE, DLGR, TCG_REG_R2, a4);
         break;
     case INDEX_op_mulu2_i64:
         tcg_out_insn(s, RRE, MLGR, TCG_REG_R2, args[3]);
@@ -2166,9 +2162,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
         op = RSY_SLLG;
     do_shift64:
         if (const_args[2]) {
-            tcg_out_sh64(s, op, args[0], args[1], TCG_REG_NONE, args[2]);
+            tcg_out_sh64(s, op, a0, a1, TCG_REG_NONE, a2);
         } else {
-            tcg_out_sh64(s, op, args[0], args[1], args[2], 0);
+            tcg_out_sh64(s, op, a0, a1, a2, 0);
         }
         break;
     case INDEX_op_shr_i64:
@@ -2180,87 +2176,83 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
 
     case INDEX_op_rotl_i64:
         if (const_args[2]) {
-            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
-                         TCG_REG_NONE, args[2]);
+            tcg_out_sh64(s, RSY_RLLG, a0, a1, TCG_REG_NONE, a2);
         } else {
-            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], args[2], 0);
+            tcg_out_sh64(s, RSY_RLLG, a0, a1, a2, 0);
         }
         break;
     case INDEX_op_rotr_i64:
         if (const_args[2]) {
-            tcg_out_sh64(s, RSY_RLLG, args[0], args[1],
-                         TCG_REG_NONE, (64 - args[2]) & 63);
+            tcg_out_sh64(s, RSY_RLLG, a0, a1, TCG_REG_NONE, (64 - a2) & 63);
         } else {
             /* We can use the smaller 32-bit negate because only the
                low 6 bits are examined for the rotate.  */
-            tcg_out_insn(s, RR, LCR, TCG_TMP0, args[2]);
-            tcg_out_sh64(s, RSY_RLLG, args[0], args[1], TCG_TMP0, 0);
+            tcg_out_insn(s, RR, LCR, TCG_TMP0, a2);
+            tcg_out_sh64(s, RSY_RLLG, a0, a1, TCG_TMP0, 0);
         }
         break;
 
     case INDEX_op_ext8s_i64:
-        tgen_ext8s(s, TCG_TYPE_I64, args[0], args[1]);
+        tgen_ext8s(s, TCG_TYPE_I64, a0, a1);
         break;
     case INDEX_op_ext16s_i64:
-        tgen_ext16s(s, TCG_TYPE_I64, args[0], args[1]);
+        tgen_ext16s(s, TCG_TYPE_I64, a0, a1);
         break;
     case INDEX_op_ext_i32_i64:
     case INDEX_op_ext32s_i64:
-        tgen_ext32s(s, args[0], args[1]);
+        tgen_ext32s(s, a0, a1);
         break;
     case INDEX_op_ext8u_i64:
-        tgen_ext8u(s, TCG_TYPE_I64, args[0], args[1]);
+        tgen_ext8u(s, TCG_TYPE_I64, a0, a1);
         break;
     case INDEX_op_ext16u_i64:
-        tgen_ext16u(s, TCG_TYPE_I64, args[0], args[1]);
+        tgen_ext16u(s, TCG_TYPE_I64, a0, a1);
         break;
     case INDEX_op_extu_i32_i64:
     case INDEX_op_ext32u_i64:
-        tgen_ext32u(s, args[0], args[1]);
+        tgen_ext32u(s, a0, a1);
         break;
 
     case INDEX_op_add2_i64:
         if (const_args[4]) {
-            if ((int64_t)args[4] >= 0) {
-                tcg_out_insn(s, RIL, ALGFI, args[0], args[4]);
+            if ((int64_t)a4 >= 0) {
+                tcg_out_insn(s, RIL, ALGFI, a0, a4);
             } else {
-                tcg_out_insn(s, RIL, SLGFI, args[0], -args[4]);
+                tcg_out_insn(s, RIL, SLGFI, a0, -a4);
             }
         } else {
-            tcg_out_insn(s, RRE, ALGR, args[0], args[4]);
+            tcg_out_insn(s, RRE, ALGR, a0, a4);
         }
-        tcg_out_insn(s, RRE, ALCGR, args[1], args[5]);
+        tcg_out_insn(s, RRE, ALCGR, a1, args[5]);
         break;
     case INDEX_op_sub2_i64:
         if (const_args[4]) {
-            if ((int64_t)args[4] >= 0) {
-                tcg_out_insn(s, RIL, SLGFI, args[0], args[4]);
+            if ((int64_t)a4 >= 0) {
+                tcg_out_insn(s, RIL, SLGFI, a0, a4);
             } else {
-                tcg_out_insn(s, RIL, ALGFI, args[0], -args[4]);
+                tcg_out_insn(s, RIL, ALGFI, a0, -a4);
             }
         } else {
-            tcg_out_insn(s, RRE, SLGR, args[0], args[4]);
+            tcg_out_insn(s, RRE, SLGR, a0, a4);
         }
-        tcg_out_insn(s, RRE, SLBGR, args[1], args[5]);
+        tcg_out_insn(s, RRE, SLBGR, a1, args[5]);
         break;
 
     case INDEX_op_brcond_i64:
-        tgen_brcond(s, TCG_TYPE_I64, args[2], args[0],
-                    args[1], const_args[1], arg_label(args[3]));
+        tgen_brcond(s, TCG_TYPE_I64, a2, a0,
+                    a1, const_args[1], arg_label(args[3]));
         break;
     case INDEX_op_setcond_i64:
-        tgen_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1],
-                     args[2], const_args[2]);
+        tgen_setcond(s, TCG_TYPE_I64, args[3], a0, a1, a2, const_args[2]);
         break;
     case INDEX_op_movcond_i64:
-        tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1],
-                     args[2], const_args[2], args[3], const_args[3]);
+        tgen_movcond(s, TCG_TYPE_I64, args[5], a0, a1,
+                     a2, const_args[2], args[3], const_args[3]);
         break;
 
     OP_32_64(deposit):
-        a0 = args[0], a1 = args[1], a2 = args[2];
         if (const_args[1]) {
-            tgen_deposit(s, a0, a2, args[3], args[4], 1);
+            tgen_deposit(s, a0, a2, args[3], a4, 1);
         } else {
             /* Since we can't support "0Z" as a constraint, we allow a1 in
                any register.  Fix things up as if a matching constraint.  */
@@ -2272,22 +2264,22 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode 
opc,
                 }
                 tcg_out_mov(s, type, a0, a1);
             }
-            tgen_deposit(s, a0, a2, args[3], args[4], 0);
+            tgen_deposit(s, a0, a2, args[3], a4, 0);
         }
         break;
 
     OP_32_64(extract):
-        tgen_extract(s, args[0], args[1], args[2], args[3]);
+        tgen_extract(s, a0, a1, a2, args[3]);
         break;
 
     case INDEX_op_clz_i64:
-        tgen_clz(s, args[0], args[1], args[2], const_args[2]);
+        tgen_clz(s, a0, a1, a2, const_args[2]);
         break;
 
     case INDEX_op_mb:
         /* The host memory model is quite strong, we simply need to
            serialize the instruction stream.  */
-        if (args[0] & TCG_MO_ST_LD) {
+        if (a0 & TCG_MO_ST_LD) {
             tcg_out_insn(s, RR, BCR,
                          s390_facilities & FACILITY_FAST_BCR_SER ? 14 : 15, 0);
         }
-- 
2.26.2




reply via email to

[Prev in Thread] Current Thread [Next in Thread]