[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 34/35] target/alpha: Use TCG_COND_TSTNE for gen_fold_mzero
|
From: |
Richard Henderson |
|
Subject: |
[PATCH v2 34/35] target/alpha: Use TCG_COND_TSTNE for gen_fold_mzero |
|
Date: |
Sat, 28 Oct 2023 12:45:21 -0700 |
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
target/alpha/translate.c | 49 +++++++++++++++++++---------------------
1 file changed, 23 insertions(+), 26 deletions(-)
diff --git a/target/alpha/translate.c b/target/alpha/translate.c
index c7daf46de7..c68c2bcd21 100644
--- a/target/alpha/translate.c
+++ b/target/alpha/translate.c
@@ -490,56 +490,53 @@ static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond
cond, int ra,
/* Fold -0.0 for comparison with COND. */
-static void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src)
+static TCGv_i64 gen_fold_mzero(TCGCond *pcond, uint64_t *pimm, TCGv_i64 src)
{
- uint64_t mzero = 1ull << 63;
+ TCGv_i64 tmp;
- switch (cond) {
+ *pimm = 0;
+ switch (*pcond) {
case TCG_COND_LE:
case TCG_COND_GT:
/* For <= or >, the -0.0 value directly compares the way we want. */
- tcg_gen_mov_i64(dest, src);
- break;
+ return src;
case TCG_COND_EQ:
case TCG_COND_NE:
- /* For == or !=, we can simply mask off the sign bit and compare. */
- tcg_gen_andi_i64(dest, src, mzero - 1);
- break;
+ /* For == or !=, we can compare without the sign bit. */
+ *pcond = *pcond == TCG_COND_EQ ? TCG_COND_TSTEQ : TCG_COND_TSTNE;
+ *pimm = INT64_MAX;
+ return src;
case TCG_COND_GE:
case TCG_COND_LT:
/* For >= or <, map -0.0 to +0.0. */
- tcg_gen_movcond_i64(TCG_COND_NE, dest, src, tcg_constant_i64(mzero),
- src, tcg_constant_i64(0));
- break;
+ tmp = tcg_temp_new_i64();
+ tcg_gen_movcond_i64(TCG_COND_EQ, tmp,
+ src, tcg_constant_i64(INT64_MIN),
+ tcg_constant_i64(0), src);
+ return tmp;
default:
- abort();
+ g_assert_not_reached();
}
}
static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra,
int32_t disp)
{
- TCGv cmp_tmp = tcg_temp_new();
- DisasJumpType ret;
-
- gen_fold_mzero(cond, cmp_tmp, load_fpr(ctx, ra));
- ret = gen_bcond_internal(ctx, cond, cmp_tmp, 0, disp);
- return ret;
+ uint64_t imm;
+ TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
+ return gen_bcond_internal(ctx, cond, tmp, imm, disp);
}
static void gen_fcmov(DisasContext *ctx, TCGCond cond, int ra, int rb, int rc)
{
- TCGv_i64 va, vb, z;
-
- z = load_zero(ctx);
- vb = load_fpr(ctx, rb);
- va = tcg_temp_new();
- gen_fold_mzero(cond, va, load_fpr(ctx, ra));
-
- tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc), va, z, vb, load_fpr(ctx, rc));
+ uint64_t imm;
+ TCGv_i64 tmp = gen_fold_mzero(&cond, &imm, load_fpr(ctx, ra));
+ tcg_gen_movcond_i64(cond, dest_fpr(ctx, rc),
+ tmp, tcg_constant_i64(imm),
+ load_fpr(ctx, rb), load_fpr(ctx, rc));
}
#define QUAL_RM_N 0x080 /* Round mode nearest even */
--
2.34.1
- [PATCH v2 13/35] tcg/i386: Move tcg_cond_to_jcc[] into tcg_out_cmp, (continued)
- [PATCH v2 13/35] tcg/i386: Move tcg_cond_to_jcc[] into tcg_out_cmp, Richard Henderson, 2023/10/28
- [PATCH v2 20/35] tcg/sparc64: Hoist read of tcg_cond_to_rcond, Richard Henderson, 2023/10/28
- [PATCH v2 23/35] tcg/ppc: Sink tcg_to_bc usage into tcg_out_bc, Richard Henderson, 2023/10/28
- [PATCH v2 17/35] tcg/mips: Support TCG_COND_TST{EQ,NE}, Richard Henderson, 2023/10/28
- [PATCH v2 29/35] tcg/s390x: Add TCG_CT_CONST_CMP, Richard Henderson, 2023/10/28
- [PATCH v2 31/35] tcg/tci: Support TCG_COND_TST{EQ,NE}, Richard Henderson, 2023/10/28
- [PATCH v2 19/35] tcg/sparc64: Implement tcg_out_extrl_i64_i32, Richard Henderson, 2023/10/28
- [PATCH v2 27/35] tcg/ppc: Support TCG_COND_TST{EQ,NE}, Richard Henderson, 2023/10/28
- [PATCH v2 22/35] tcg/sparc64: Support TCG_COND_TST{EQ,NE}, Richard Henderson, 2023/10/28
- [PATCH v2 30/35] tcg/s390x: Support TCG_COND_TST{EQ,NE}, Richard Henderson, 2023/10/28
- [PATCH v2 34/35] target/alpha: Use TCG_COND_TSTNE for gen_fold_mzero,
Richard Henderson <=
- [PATCH v2 35/35] target/m68k: Use TCG_COND_TST{EQ, NE} in gen_fcc_cond, Richard Henderson, 2023/10/28