qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH v2 6/6] Do constant folding for unary operations.


From: Kirill Batuzov
Subject: [Qemu-devel] [PATCH v2 6/6] Do constant folding for unary operations.
Date: Thu, 9 Jun 2011 14:45:44 +0400

Perform constant folding for NOT and EXT{8,16,32}{S,U} operations.

Signed-off-by: Kirill Batuzov <address@hidden>
---
 tcg/optimize.c |   83 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 83 insertions(+), 0 deletions(-)

diff --git a/tcg/optimize.c b/tcg/optimize.c
index 653f399..2cdcc29 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -100,6 +100,11 @@ static int op_bits(int op)
     case INDEX_op_sar_i32:
     case INDEX_op_rotl_i32:
     case INDEX_op_rotr_i32:
+    case INDEX_op_not_i32:
+    case INDEX_op_ext8s_i32:
+    case INDEX_op_ext16s_i32:
+    case INDEX_op_ext8u_i32:
+    case INDEX_op_ext16u_i32:
         return 32;
 #if TCG_TARGET_REG_BITS == 64
     case INDEX_op_mov_i64:
@@ -114,6 +119,13 @@ static int op_bits(int op)
     case INDEX_op_sar_i64:
     case INDEX_op_rotl_i64:
     case INDEX_op_rotr_i64:
+    case INDEX_op_not_i64:
+    case INDEX_op_ext8s_i64:
+    case INDEX_op_ext16s_i64:
+    case INDEX_op_ext32s_i64:
+    case INDEX_op_ext8u_i64:
+    case INDEX_op_ext16u_i64:
+    case INDEX_op_ext32u_i64:
         return 64;
 #endif
     default:
@@ -243,6 +255,44 @@ static TCGArg do_constant_folding_2(int op, TCGArg x, 
TCGArg y)
         return x;
 #endif
 
+    case INDEX_op_not_i32:
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_not_i64:
+#endif
+        return ~x;
+
+    case INDEX_op_ext8s_i32:
+        return (int32_t)(int8_t)x;
+
+    case INDEX_op_ext16s_i32:
+        return (int32_t)(int16_t)x;
+
+    case INDEX_op_ext8u_i32:
+        return (uint32_t)(uint8_t)x;
+
+    case INDEX_op_ext16u_i32:
+        return (uint32_t)(uint16_t)x;
+
+#if TCG_TARGET_REG_BITS == 64
+    case INDEX_op_ext8s_i64:
+        return (int64_t)(int8_t)x;
+
+    case INDEX_op_ext16s_i64:
+        return (int64_t)(int16_t)x;
+
+    case INDEX_op_ext32s_i64:
+        return (int64_t)(int32_t)x;
+
+    case INDEX_op_ext8u_i64:
+        return (uint64_t)(uint8_t)x;
+
+    case INDEX_op_ext16u_i64:
+        return (uint64_t)(uint16_t)x;
+
+    case INDEX_op_ext32u_i64:
+        return (uint64_t)(uint32_t)x;
+#endif
+
     default:
         fprintf(stderr,
                 "Unrecognized operation %d in do_constant_folding.\n", op);
@@ -447,6 +497,39 @@ static TCGArg *tcg_constant_folding(TCGContext *s, 
uint16_t *tcg_opc_ptr,
             gen_args += 2;
             args += 2;
             break;
+        case INDEX_op_not_i32:
+        case INDEX_op_ext8s_i32:
+        case INDEX_op_ext16s_i32:
+        case INDEX_op_ext8u_i32:
+        case INDEX_op_ext16u_i32:
+#if TCG_TARGET_REG_BITS == 64
+        case INDEX_op_not_i64:
+        case INDEX_op_ext8s_i64:
+        case INDEX_op_ext16s_i64:
+        case INDEX_op_ext32s_i64:
+        case INDEX_op_ext8u_i64:
+        case INDEX_op_ext16u_i64:
+        case INDEX_op_ext32u_i64:
+#endif
+            if (temps[args[1]].state == TCG_TEMP_CONST) {
+                gen_opc_buf[op_index] = op_to_movi(op);
+                gen_args[0] = args[0];
+                gen_args[1] = do_constant_folding(op, temps[args[1]].val, 0);
+                reset_temp(temps, gen_args[0], nb_temps, nb_globals);
+                temps[gen_args[0]].state = TCG_TEMP_CONST;
+                temps[gen_args[0]].val = gen_args[1];
+                assert(temps[gen_args[0]].num_copies == 0);
+                gen_args += 2;
+                args += 2;
+                break;
+            } else {
+                reset_temp(temps, args[0], nb_temps, nb_globals);
+                gen_args[0] = args[0];
+                gen_args[1] = args[1];
+                gen_args += 2;
+                args += 2;
+                break;
+            }
         case INDEX_op_or_i32:
         case INDEX_op_and_i32:
         case INDEX_op_xor_i32:
-- 
1.7.4.1




reply via email to

[Prev in Thread] Current Thread [Next in Thread]