qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Qemu-devel] [PATCH 1/4] tcg/aarch64: more low level ops in preparation


From: Jani Kokkonen
Subject: [Qemu-devel] [PATCH 1/4] tcg/aarch64: more low level ops in preparation of tlb, lookup
Date: Fri, 31 May 2013 19:57:03 +0200
User-agent: Mozilla/5.0 (Windows NT 6.1; rv:17.0) Gecko/20130509 Thunderbird/17.0.6

From: Claudio Fontana <address@hidden>

for arith operations, add SUBS and add a shift parameter
so that all arith instructions can make use of shifted registers.
Also add functions to TEST/AND registers with immediate patterns.

Signed-off-by: Claudio Fontana <address@hidden>
---
 tcg/aarch64/tcg-target.c | 72 ++++++++++++++++++++++++++++++++++++++----------
 1 file changed, 58 insertions(+), 14 deletions(-)

diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c
index ff626eb..1343d49 100644
--- a/tcg/aarch64/tcg-target.c
+++ b/tcg/aarch64/tcg-target.c
@@ -188,6 +188,7 @@ enum aarch64_ldst_op_type { /* type of operation */
 enum aarch64_arith_opc {
     ARITH_ADD = 0x0b,
     ARITH_SUB = 0x4b,
+    ARITH_SUBS = 0x6b,
     ARITH_AND = 0x0a,
     ARITH_OR = 0x2a,
     ARITH_XOR = 0x4a
@@ -394,12 +395,20 @@ static inline void tcg_out_st(TCGContext *s, TCGType 
type, TCGReg arg,
 }
 
 static inline void tcg_out_arith(TCGContext *s, enum aarch64_arith_opc opc,
-                                 int ext, TCGReg rd, TCGReg rn, TCGReg rm)
+                                 int ext, TCGReg rd, TCGReg rn, TCGReg rm,
+                                 int shift_imm)
 {
     /* Using shifted register arithmetic operations */
     /* if extended registry operation (64bit) just OR with 0x80 << 24 */
-    unsigned int base = ext ? (0x80 | opc) << 24 : opc << 24;
-    tcg_out32(s, base | rm << 16 | rn << 5 | rd);
+    unsigned int shift, base = ext ? (0x80 | opc) << 24 : opc << 24;
+    if (shift_imm == 0) {
+        shift = 0;
+    } else if (shift_imm > 0) {
+        shift = shift_imm << 10 | 1 << 22;
+    } else /* (shift_imm < 0) */ {
+        shift = (-shift_imm) << 10;
+    }
+    tcg_out32(s, base | rm << 16 | shift | rn << 5 | rd);
 }
 
 static inline void tcg_out_mul(TCGContext *s, int ext,
@@ -482,11 +491,11 @@ static inline void tcg_out_rotl(TCGContext *s, int ext,
     tcg_out_extr(s, ext, rd, rn, rn, bits - (m & max));
 }
 
-static inline void tcg_out_cmp(TCGContext *s, int ext, TCGReg rn, TCGReg rm)
+static inline void tcg_out_cmp(TCGContext *s, int ext, TCGReg rn, TCGReg rm,
+                               int shift_imm)
 {
     /* Using CMP alias SUBS wzr, Wn, Wm */
-    unsigned int base = ext ? 0xeb00001f : 0x6b00001f;
-    tcg_out32(s, base | rm << 16 | rn << 5);
+    tcg_out_arith(s, ARITH_SUBS, ext, TCG_REG_XZR, rn, rm, shift_imm);
 }
 
 static inline void tcg_out_cset(TCGContext *s, int ext, TCGReg rd, TCGCond c)
@@ -569,6 +578,40 @@ static inline void tcg_out_call(TCGContext *s, 
tcg_target_long target)
     }
 }
 
+/* encode a logical immediate, mapping user parameter
+   M=set bits pattern length to S=M-1 */
+static inline unsigned int
+aarch64_limm(unsigned int m, unsigned int r)
+{
+    assert(m > 0);
+    return r << 16 | (m - 1) << 10;
+}
+
+/* test a register against an immediate bit pattern made of
+   M set bits rotated right by R.
+   Examples:
+   to test a 32/64 reg against 0x00000007, pass M = 3,  R = 0.
+   to test a 32/64 reg against 0x000000ff, pass M = 8,  R = 0.
+   to test a 32bit reg against 0xff000000, pass M = 8,  R = 8.
+   to test a 32bit reg against 0xff0000ff, pass M = 16, R = 8.
+ */
+static inline void tcg_out_tst(TCGContext *s, int ext, TCGReg rn,
+                               unsigned int m, unsigned int r)
+{
+    /* using TST alias of ANDS XZR, Xn,#bimm64 0x7200001f */
+    unsigned int base = ext ? 0xf240001f : 0x7200001f;
+    tcg_out32(s, base | aarch64_limm(m, r) | rn << 5);
+}
+
+/* and a register with a bit pattern, similarly to TST, no flags change */
+static inline void tcg_out_andi(TCGContext *s, int ext, TCGReg rd, TCGReg rn,
+                                unsigned int m, unsigned int r)
+{
+    /* using AND 0x12000000 */
+    unsigned int base = ext ? 0x92400000 : 0x12000000;
+    tcg_out32(s, base | aarch64_limm(m, r) | rn << 5 | rd);
+}
+
 static inline void tcg_out_ret(TCGContext *s)
 {
     /* emit RET { LR } */
@@ -830,31 +873,31 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
     case INDEX_op_add_i64:
         ext = 1; /* fall through */
     case INDEX_op_add_i32:
-        tcg_out_arith(s, ARITH_ADD, ext, args[0], args[1], args[2]);
+        tcg_out_arith(s, ARITH_ADD, ext, args[0], args[1], args[2], 0);
         break;
 
     case INDEX_op_sub_i64:
         ext = 1; /* fall through */
     case INDEX_op_sub_i32:
-        tcg_out_arith(s, ARITH_SUB, ext, args[0], args[1], args[2]);
+        tcg_out_arith(s, ARITH_SUB, ext, args[0], args[1], args[2], 0);
         break;
 
     case INDEX_op_and_i64:
         ext = 1; /* fall through */
     case INDEX_op_and_i32:
-        tcg_out_arith(s, ARITH_AND, ext, args[0], args[1], args[2]);
+        tcg_out_arith(s, ARITH_AND, ext, args[0], args[1], args[2], 0);
         break;
 
     case INDEX_op_or_i64:
         ext = 1; /* fall through */
     case INDEX_op_or_i32:
-        tcg_out_arith(s, ARITH_OR, ext, args[0], args[1], args[2]);
+        tcg_out_arith(s, ARITH_OR, ext, args[0], args[1], args[2], 0);
         break;
 
     case INDEX_op_xor_i64:
         ext = 1; /* fall through */
     case INDEX_op_xor_i32:
-        tcg_out_arith(s, ARITH_XOR, ext, args[0], args[1], args[2]);
+        tcg_out_arith(s, ARITH_XOR, ext, args[0], args[1], args[2], 0);
         break;
 
     case INDEX_op_mul_i64:
@@ -909,7 +952,8 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
         if (const_args[2]) {    /* ROR / EXTR Wd, Wm, Wm, 32 - m */
             tcg_out_rotl(s, ext, args[0], args[1], args[2]);
         } else {
-            tcg_out_arith(s, ARITH_SUB, 0, TCG_REG_TMP, TCG_REG_XZR, args[2]);
+            tcg_out_arith(s, ARITH_SUB, 0,
+                          TCG_REG_TMP, TCG_REG_XZR, args[2], 0);
             tcg_out_shiftrot_reg(s, SRR_ROR, ext,
                                  args[0], args[1], TCG_REG_TMP);
         }
@@ -918,14 +962,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
     case INDEX_op_brcond_i64:
         ext = 1; /* fall through */
     case INDEX_op_brcond_i32: /* CMP 0, 1, cond(2), label 3 */
-        tcg_out_cmp(s, ext, args[0], args[1]);
+        tcg_out_cmp(s, ext, args[0], args[1], 0);
         tcg_out_goto_label_cond(s, args[2], args[3]);
         break;
 
     case INDEX_op_setcond_i64:
         ext = 1; /* fall through */
     case INDEX_op_setcond_i32:
-        tcg_out_cmp(s, ext, args[1], args[2]);
+        tcg_out_cmp(s, ext, args[1], args[2], 0);
         tcg_out_cset(s, 0, args[0], args[3]);
         break;
 
-- 
1.8.1







reply via email to

[Prev in Thread] Current Thread [Next in Thread]