[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 11/29] target/arm: Split gen_add_CC and gen_sub_CC
|
From: |
Peter Maydell |
|
Subject: |
[PULL 11/29] target/arm: Split gen_add_CC and gen_sub_CC |
|
Date: |
Thu, 18 May 2023 13:50:49 +0100 |
From: Richard Henderson <richard.henderson@linaro.org>
Split out specific 32-bit and 64-bit functions.
These carry the same signature as tcg_gen_add_i64,
and so will be easier to pass as callbacks.
Retain gen_add_CC and gen_sub_CC during conversion.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20230512144106.3608981-6-peter.maydell@linaro.org
[PMM: rebased]
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/tcg/translate-a64.c | 149 +++++++++++++++++++--------------
1 file changed, 84 insertions(+), 65 deletions(-)
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index ffcd05eb38a..7a633abf830 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -682,83 +682,102 @@ static inline void gen_logic_CC(int sf, TCGv_i64 result)
}
/* dest = T0 + T1; compute C, N, V and Z flags */
+static void gen_add64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ TCGv_i64 result, flag, tmp;
+ result = tcg_temp_new_i64();
+ flag = tcg_temp_new_i64();
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_movi_i64(tmp, 0);
+ tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
+
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
+
+ gen_set_NZ64(result);
+
+ tcg_gen_xor_i64(flag, result, t0);
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_andc_i64(flag, flag, tmp);
+ tcg_gen_extrh_i64_i32(cpu_VF, flag);
+
+ tcg_gen_mov_i64(dest, result);
+}
+
+static void gen_add32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ TCGv_i32 t0_32 = tcg_temp_new_i32();
+ TCGv_i32 t1_32 = tcg_temp_new_i32();
+ TCGv_i32 tmp = tcg_temp_new_i32();
+
+ tcg_gen_movi_i32(tmp, 0);
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
+ tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+}
+
static void gen_add_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
{
if (sf) {
- TCGv_i64 result, flag, tmp;
- result = tcg_temp_new_i64();
- flag = tcg_temp_new_i64();
- tmp = tcg_temp_new_i64();
-
- tcg_gen_movi_i64(tmp, 0);
- tcg_gen_add2_i64(result, flag, t0, tmp, t1, tmp);
-
- tcg_gen_extrl_i64_i32(cpu_CF, flag);
-
- gen_set_NZ64(result);
-
- tcg_gen_xor_i64(flag, result, t0);
- tcg_gen_xor_i64(tmp, t0, t1);
- tcg_gen_andc_i64(flag, flag, tmp);
- tcg_gen_extrh_i64_i32(cpu_VF, flag);
-
- tcg_gen_mov_i64(dest, result);
+ gen_add64_CC(dest, t0, t1);
} else {
- /* 32 bit arithmetic */
- TCGv_i32 t0_32 = tcg_temp_new_i32();
- TCGv_i32 t1_32 = tcg_temp_new_i32();
- TCGv_i32 tmp = tcg_temp_new_i32();
-
- tcg_gen_movi_i32(tmp, 0);
- tcg_gen_extrl_i64_i32(t0_32, t0);
- tcg_gen_extrl_i64_i32(t1_32, t1);
- tcg_gen_add2_i32(cpu_NF, cpu_CF, t0_32, tmp, t1_32, tmp);
- tcg_gen_mov_i32(cpu_ZF, cpu_NF);
- tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
- tcg_gen_xor_i32(tmp, t0_32, t1_32);
- tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
- tcg_gen_extu_i32_i64(dest, cpu_NF);
+ gen_add32_CC(dest, t0, t1);
}
}
/* dest = T0 - T1; compute C, N, V and Z flags */
+static void gen_sub64_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ /* 64 bit arithmetic */
+ TCGv_i64 result, flag, tmp;
+
+ result = tcg_temp_new_i64();
+ flag = tcg_temp_new_i64();
+ tcg_gen_sub_i64(result, t0, t1);
+
+ gen_set_NZ64(result);
+
+ tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
+ tcg_gen_extrl_i64_i32(cpu_CF, flag);
+
+ tcg_gen_xor_i64(flag, result, t0);
+ tmp = tcg_temp_new_i64();
+ tcg_gen_xor_i64(tmp, t0, t1);
+ tcg_gen_and_i64(flag, flag, tmp);
+ tcg_gen_extrh_i64_i32(cpu_VF, flag);
+ tcg_gen_mov_i64(dest, result);
+}
+
+static void gen_sub32_CC(TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
+{
+ /* 32 bit arithmetic */
+ TCGv_i32 t0_32 = tcg_temp_new_i32();
+ TCGv_i32 t1_32 = tcg_temp_new_i32();
+ TCGv_i32 tmp;
+
+ tcg_gen_extrl_i64_i32(t0_32, t0);
+ tcg_gen_extrl_i64_i32(t1_32, t1);
+ tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
+ tcg_gen_mov_i32(cpu_ZF, cpu_NF);
+ tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
+ tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
+ tmp = tcg_temp_new_i32();
+ tcg_gen_xor_i32(tmp, t0_32, t1_32);
+ tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
+ tcg_gen_extu_i32_i64(dest, cpu_NF);
+}
+
static void gen_sub_CC(int sf, TCGv_i64 dest, TCGv_i64 t0, TCGv_i64 t1)
{
if (sf) {
- /* 64 bit arithmetic */
- TCGv_i64 result, flag, tmp;
-
- result = tcg_temp_new_i64();
- flag = tcg_temp_new_i64();
- tcg_gen_sub_i64(result, t0, t1);
-
- gen_set_NZ64(result);
-
- tcg_gen_setcond_i64(TCG_COND_GEU, flag, t0, t1);
- tcg_gen_extrl_i64_i32(cpu_CF, flag);
-
- tcg_gen_xor_i64(flag, result, t0);
- tmp = tcg_temp_new_i64();
- tcg_gen_xor_i64(tmp, t0, t1);
- tcg_gen_and_i64(flag, flag, tmp);
- tcg_gen_extrh_i64_i32(cpu_VF, flag);
- tcg_gen_mov_i64(dest, result);
+ gen_sub64_CC(dest, t0, t1);
} else {
- /* 32 bit arithmetic */
- TCGv_i32 t0_32 = tcg_temp_new_i32();
- TCGv_i32 t1_32 = tcg_temp_new_i32();
- TCGv_i32 tmp;
-
- tcg_gen_extrl_i64_i32(t0_32, t0);
- tcg_gen_extrl_i64_i32(t1_32, t1);
- tcg_gen_sub_i32(cpu_NF, t0_32, t1_32);
- tcg_gen_mov_i32(cpu_ZF, cpu_NF);
- tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0_32, t1_32);
- tcg_gen_xor_i32(cpu_VF, cpu_NF, t0_32);
- tmp = tcg_temp_new_i32();
- tcg_gen_xor_i32(tmp, t0_32, t1_32);
- tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
- tcg_gen_extu_i32_i64(dest, cpu_NF);
+ gen_sub32_CC(dest, t0, t1);
}
}
--
2.34.1
- [PULL 01/29] sbsa-ref: switch default cpu core to Neoverse-N1, (continued)
- [PULL 01/29] sbsa-ref: switch default cpu core to Neoverse-N1, Peter Maydell, 2023/05/18
- [PULL 04/29] arm/kvm: add support for MTE, Peter Maydell, 2023/05/18
- [PULL 08/29] target/arm: Create decodetree skeleton for A64, Peter Maydell, 2023/05/18
- [PULL 07/29] target/arm: Split out disas_a64_legacy, Peter Maydell, 2023/05/18
- [PULL 05/29] target/arm: add RAZ/WI handling for DBGDTR[TX|RX], Peter Maydell, 2023/05/18
- [PULL 26/29] target/arm: Convert ERET, ERETAA, ERETAB to decodetree, Peter Maydell, 2023/05/18
- [PULL 02/29] target/arm: Fix vd == vm overlap in sve_ldff1_z, Peter Maydell, 2023/05/18
- [PULL 03/29] Maintainers: add myself as reviewer for sbsa-ref, Peter Maydell, 2023/05/18
- [PULL 09/29] target/arm: Pull calls to disas_sve() and disas_sme() out of legacy decoder, Peter Maydell, 2023/05/18
- [PULL 10/29] target/arm: Convert PC-rel addressing to decodetree, Peter Maydell, 2023/05/18
- [PULL 11/29] target/arm: Split gen_add_CC and gen_sub_CC,
Peter Maydell <=
- [PULL 15/29] target/arm: Convert Logical (immediate) to decodetree, Peter Maydell, 2023/05/18
- [PULL 06/29] sbsa-ref: use Bochs graphics card instead of VGA, Peter Maydell, 2023/05/18
- [PULL 28/29] hw/arm/vexpress: Avoid trivial memory leak of 'flashalias', Peter Maydell, 2023/05/18
- [PULL 20/29] target/arm: Convert CBZ, CBNZ to decodetree, Peter Maydell, 2023/05/18
- [PULL 17/29] target/arm: Convert Bitfield to decodetree, Peter Maydell, 2023/05/18
- [PULL 12/29] target/arm: Convert Add/subtract (immediate) to decodetree, Peter Maydell, 2023/05/18
- [PULL 25/29] target/arm: Convert BRAA, BRAB, BLRAA, BLRAB to decodetree, Peter Maydell, 2023/05/18
- [PULL 22/29] target/arm: Convert conditional branch insns to decodetree, Peter Maydell, 2023/05/18
- [PULL 19/29] target/arm: Convert unconditional branch immediate to decodetree, Peter Maydell, 2023/05/18
- [PULL 14/29] target/arm: Replace bitmask64 with MAKE_64BIT_MASK, Peter Maydell, 2023/05/18