[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 11/46] tcg/tci: Inline tci_write_reg32 into all callers
From: |
Richard Henderson |
Subject: |
[PULL 11/46] tcg/tci: Inline tci_write_reg32 into all callers |
Date: |
Fri, 5 Feb 2021 12:56:15 -1000 |
For a 64-bit TCI, the upper bits of a 32-bit operation are
undefined (much like a native ppc64 32-bit operation). It
simplifies everything if we don't force-extend the result.
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tci.c | 66 +++++++++++++++++++++++++------------------------------
1 file changed, 30 insertions(+), 36 deletions(-)
diff --git a/tcg/tci.c b/tcg/tci.c
index 005d2946c4..39ad00663f 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -117,12 +117,6 @@ tci_write_reg(tcg_target_ulong *regs, TCGReg index,
tcg_target_ulong value)
regs[index] = value;
}
-static void
-tci_write_reg32(tcg_target_ulong *regs, TCGReg index, uint32_t value)
-{
- tci_write_reg(regs, index, value);
-}
-
#if TCG_TARGET_REG_BITS == 32
static void tci_write_reg64(tcg_target_ulong *regs, uint32_t high_index,
uint32_t low_index, uint64_t value)
@@ -549,7 +543,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
t1 = tci_read_r32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
condition = *tb_ptr++;
- tci_write_reg32(regs, t0, tci_compare32(t1, t2, condition));
+ tci_write_reg(regs, t0, tci_compare32(t1, t2, condition));
break;
#if TCG_TARGET_REG_BITS == 32
case INDEX_op_setcond2_i32:
@@ -557,7 +551,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
tmp64 = tci_read_r64(regs, &tb_ptr);
v64 = tci_read_ri64(regs, &tb_ptr);
condition = *tb_ptr++;
- tci_write_reg32(regs, t0, tci_compare64(tmp64, v64, condition));
+ tci_write_reg(regs, t0, tci_compare64(tmp64, v64, condition));
break;
#elif TCG_TARGET_REG_BITS == 64
case INDEX_op_setcond_i64:
@@ -571,12 +565,12 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
case INDEX_op_mov_i32:
t0 = *tb_ptr++;
t1 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1);
+ tci_write_reg(regs, t0, t1);
break;
case INDEX_op_tci_movi_i32:
t0 = *tb_ptr++;
t1 = tci_read_i32(&tb_ptr);
- tci_write_reg32(regs, t0, t1);
+ tci_write_reg(regs, t0, t1);
break;
/* Load/store operations (32 bit). */
@@ -603,7 +597,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
t0 = *tb_ptr++;
t1 = tci_read_r(regs, &tb_ptr);
t2 = tci_read_s32(&tb_ptr);
- tci_write_reg32(regs, t0, *(uint32_t *)(t1 + t2));
+ tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
break;
case INDEX_op_st8_i32:
t0 = tci_read_r8(regs, &tb_ptr);
@@ -631,44 +625,44 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1 + t2);
+ tci_write_reg(regs, t0, t1 + t2);
break;
case INDEX_op_sub_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1 - t2);
+ tci_write_reg(regs, t0, t1 - t2);
break;
case INDEX_op_mul_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1 * t2);
+ tci_write_reg(regs, t0, t1 * t2);
break;
#if TCG_TARGET_HAS_div_i32
case INDEX_op_div_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, (int32_t)t1 / (int32_t)t2);
+ tci_write_reg(regs, t0, (int32_t)t1 / (int32_t)t2);
break;
case INDEX_op_divu_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1 / t2);
+ tci_write_reg(regs, t0, t1 / t2);
break;
case INDEX_op_rem_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, (int32_t)t1 % (int32_t)t2);
+ tci_write_reg(regs, t0, (int32_t)t1 % (int32_t)t2);
break;
case INDEX_op_remu_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1 % t2);
+ tci_write_reg(regs, t0, t1 % t2);
break;
#elif TCG_TARGET_HAS_div2_i32
case INDEX_op_div2_i32:
@@ -680,19 +674,19 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1 & t2);
+ tci_write_reg(regs, t0, t1 & t2);
break;
case INDEX_op_or_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1 | t2);
+ tci_write_reg(regs, t0, t1 | t2);
break;
case INDEX_op_xor_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1 ^ t2);
+ tci_write_reg(regs, t0, t1 ^ t2);
break;
/* Shift/rotate operations (32 bit). */
@@ -701,32 +695,32 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1 << (t2 & 31));
+ tci_write_reg(regs, t0, t1 << (t2 & 31));
break;
case INDEX_op_shr_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1 >> (t2 & 31));
+ tci_write_reg(regs, t0, t1 >> (t2 & 31));
break;
case INDEX_op_sar_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, ((int32_t)t1 >> (t2 & 31)));
+ tci_write_reg(regs, t0, ((int32_t)t1 >> (t2 & 31)));
break;
#if TCG_TARGET_HAS_rot_i32
case INDEX_op_rotl_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, rol32(t1, t2 & 31));
+ tci_write_reg(regs, t0, rol32(t1, t2 & 31));
break;
case INDEX_op_rotr_i32:
t0 = *tb_ptr++;
t1 = tci_read_ri32(regs, &tb_ptr);
t2 = tci_read_ri32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, ror32(t1, t2 & 31));
+ tci_write_reg(regs, t0, ror32(t1, t2 & 31));
break;
#endif
#if TCG_TARGET_HAS_deposit_i32
@@ -737,7 +731,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
tmp16 = *tb_ptr++;
tmp8 = *tb_ptr++;
tmp32 = (((1 << tmp8) - 1) << tmp16);
- tci_write_reg32(regs, t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
+ tci_write_reg(regs, t0, (t1 & ~tmp32) | ((t2 << tmp16) & tmp32));
break;
#endif
case INDEX_op_brcond_i32:
@@ -789,56 +783,56 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
case INDEX_op_ext8s_i32:
t0 = *tb_ptr++;
t1 = tci_read_r8s(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1);
+ tci_write_reg(regs, t0, t1);
break;
#endif
#if TCG_TARGET_HAS_ext16s_i32
case INDEX_op_ext16s_i32:
t0 = *tb_ptr++;
t1 = tci_read_r16s(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1);
+ tci_write_reg(regs, t0, t1);
break;
#endif
#if TCG_TARGET_HAS_ext8u_i32
case INDEX_op_ext8u_i32:
t0 = *tb_ptr++;
t1 = tci_read_r8(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1);
+ tci_write_reg(regs, t0, t1);
break;
#endif
#if TCG_TARGET_HAS_ext16u_i32
case INDEX_op_ext16u_i32:
t0 = *tb_ptr++;
t1 = tci_read_r16(regs, &tb_ptr);
- tci_write_reg32(regs, t0, t1);
+ tci_write_reg(regs, t0, t1);
break;
#endif
#if TCG_TARGET_HAS_bswap16_i32
case INDEX_op_bswap16_i32:
t0 = *tb_ptr++;
t1 = tci_read_r16(regs, &tb_ptr);
- tci_write_reg32(regs, t0, bswap16(t1));
+ tci_write_reg(regs, t0, bswap16(t1));
break;
#endif
#if TCG_TARGET_HAS_bswap32_i32
case INDEX_op_bswap32_i32:
t0 = *tb_ptr++;
t1 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, bswap32(t1));
+ tci_write_reg(regs, t0, bswap32(t1));
break;
#endif
#if TCG_TARGET_HAS_not_i32
case INDEX_op_not_i32:
t0 = *tb_ptr++;
t1 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, ~t1);
+ tci_write_reg(regs, t0, ~t1);
break;
#endif
#if TCG_TARGET_HAS_neg_i32
case INDEX_op_neg_i32:
t0 = *tb_ptr++;
t1 = tci_read_r32(regs, &tb_ptr);
- tci_write_reg32(regs, t0, -t1);
+ tci_write_reg(regs, t0, -t1);
break;
#endif
#if TCG_TARGET_REG_BITS == 64
@@ -880,7 +874,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
t0 = *tb_ptr++;
t1 = tci_read_r(regs, &tb_ptr);
t2 = tci_read_s32(&tb_ptr);
- tci_write_reg32(regs, t0, *(uint32_t *)(t1 + t2));
+ tci_write_reg(regs, t0, *(uint32_t *)(t1 + t2));
break;
case INDEX_op_ld32s_i64:
t0 = *tb_ptr++;
--
2.25.1
- [PULL 01/46] tcg/s390: Fix compare instruction from extended-immediate facility, (continued)
- [PULL 01/46] tcg/s390: Fix compare instruction from extended-immediate facility, Richard Henderson, 2021/02/05
- [PULL 02/46] exec/cpu-defs: Remove TCG backends dependency, Richard Henderson, 2021/02/05
- [PULL 03/46] tcg/aarch64: Do not convert TCGArg to temps that are not temps, Richard Henderson, 2021/02/05
- [PULL 04/46] configure: Fix --enable-tcg-interpreter, Richard Henderson, 2021/02/05
- [PULL 05/46] tcg/tci: Make tci_tb_ptr thread-local, Richard Henderson, 2021/02/05
- [PULL 06/46] tcg/tci: Implement INDEX_op_ld16s_i32, Richard Henderson, 2021/02/05
- [PULL 07/46] tcg/tci: Implement INDEX_op_ld8s_i64, Richard Henderson, 2021/02/05
- [PULL 08/46] tcg/tci: Inline tci_write_reg32s into the only caller, Richard Henderson, 2021/02/05
- [PULL 09/46] tcg/tci: Inline tci_write_reg8 into its callers, Richard Henderson, 2021/02/05
- [PULL 10/46] tcg/tci: Inline tci_write_reg16 into the only caller, Richard Henderson, 2021/02/05
- [PULL 11/46] tcg/tci: Inline tci_write_reg32 into all callers,
Richard Henderson <=
- [PULL 12/46] tcg/tci: Inline tci_write_reg64 into 64-bit callers, Richard Henderson, 2021/02/05
- [PULL 13/46] tcg/tci: Merge INDEX_op_ld8u_{i32,i64}, Richard Henderson, 2021/02/05
- [PULL 14/46] tcg/tci: Merge INDEX_op_ld8s_{i32,i64}, Richard Henderson, 2021/02/05
- [PULL 15/46] tcg/tci: Merge INDEX_op_ld16u_{i32,i64}, Richard Henderson, 2021/02/05
- [PULL 16/46] tcg/tci: Merge INDEX_op_ld16s_{i32,i64}, Richard Henderson, 2021/02/05
- [PULL 17/46] tcg/tci: Merge INDEX_op_{ld_i32,ld32u_i64}, Richard Henderson, 2021/02/05
- [PULL 18/46] tcg/tci: Merge INDEX_op_st8_{i32,i64}, Richard Henderson, 2021/02/05
- [PULL 19/46] tcg/tci: Merge INDEX_op_st16_{i32,i64}, Richard Henderson, 2021/02/05
- [PULL 21/46] tcg/tci: Merge INDEX_op_{st_i32,st32_i64}, Richard Henderson, 2021/02/05
- [PULL 20/46] tcg/tci: Move stack bounds check to compile-time, Richard Henderson, 2021/02/05