[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v2 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multip
From: |
Richard Henderson |
Subject: |
[PATCH v2 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies |
Date: |
Thu, 7 Oct 2021 12:54:47 -0700 |
Rename to fold_multiply2, and handle muls2_i32, mulu2_i64,
and muls2_i64.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 44 +++++++++++++++++++++++++++++++++++---------
1 file changed, 35 insertions(+), 9 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index d82d0f15c7..0011ac31ec 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -1400,19 +1400,44 @@ static bool fold_multiply(OptContext *ctx, TCGOp *op)
return false;
}
-static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
+static bool fold_multiply2(OptContext *ctx, TCGOp *op)
{
if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
- uint32_t a = arg_info(op->args[2])->val;
- uint32_t b = arg_info(op->args[3])->val;
- uint64_t r = (uint64_t)a * b;
+ uint64_t a = arg_info(op->args[2])->val;
+ uint64_t b = arg_info(op->args[3])->val;
+ uint64_t h, l;
TCGArg rl, rh;
- TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32);
+ TCGOp *op2;
+
+ switch (op->opc) {
+ case INDEX_op_mulu2_i32:
+ l = (uint64_t)(uint32_t)a * (uint32_t)b;
+ h = (int32_t)(l >> 32);
+ l = (int32_t)l;
+ break;
+ case INDEX_op_muls2_i32:
+ l = (int64_t)(int32_t)a * (int32_t)b;
+ h = l >> 32;
+ l = (int32_t)l;
+ break;
+ case INDEX_op_mulu2_i64:
+ mulu64(&l, &h, a, b);
+ break;
+ case INDEX_op_muls2_i64:
+ muls64(&l, &h, a, b);
+ break;
+ default:
+ g_assert_not_reached();
+ }
rl = op->args[0];
rh = op->args[1];
- tcg_opt_gen_movi(ctx, op, rl, (int32_t)r);
- tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(r >> 32));
+
+ /* The proper opcode is supplied by tcg_opt_gen_mov. */
+ op2 = tcg_op_insert_before(ctx->tcg, op, 0);
+
+ tcg_opt_gen_movi(ctx, op, rl, l);
+ tcg_opt_gen_movi(ctx, op2, rh, h);
return true;
}
return false;
@@ -1912,8 +1937,9 @@ void tcg_optimize(TCGContext *s)
CASE_OP_32_64(muluh):
done = fold_multiply(&ctx, op);
break;
- case INDEX_op_mulu2_i32:
- done = fold_mulu2_i32(&ctx, op);
+ CASE_OP_32_64(muls2):
+ CASE_OP_32_64(mulu2):
+ done = fold_multiply2(&ctx, op);
break;
CASE_OP_32_64(nand):
done = fold_nand(&ctx, op);
--
2.25.1
- [PATCH v2 28/48] tcg/optimize: Split out fold_dup, fold_dup2, (continued)
- [PATCH v2 28/48] tcg/optimize: Split out fold_dup, fold_dup2, Richard Henderson, 2021/10/07
- [PATCH v2 35/48] tcg/optimize: Split out fold_sub_to_neg, Richard Henderson, 2021/10/07
- [PATCH v2 06/48] tcg/optimize: Split out init_arguments, Richard Henderson, 2021/10/07
- [PATCH v2 22/48] tcg/optimize: Split out fold_movcond, Richard Henderson, 2021/10/07
- [PATCH v2 25/48] tcg/optimize: Split out fold_deposit, Richard Henderson, 2021/10/07
- [PATCH v2 12/48] tcg/optimize: Split out finish_folding, Richard Henderson, 2021/10/07
- [PATCH v2 33/48] tcg/optimize: Add type to OptContext, Richard Henderson, 2021/10/07
- [PATCH v2 39/48] tcg/optimize: Expand fold_mulu2_i32 to all 4-arg multiplies,
Richard Henderson <=
- [PATCH v2 11/48] tcg/optimize: Return true from tcg_opt_gen_{mov, movi}, Richard Henderson, 2021/10/07
- [PATCH v2 19/48] tcg/optimize: Split out fold_setcond, Richard Henderson, 2021/10/07
- [PATCH v2 21/48] tcg/optimize: Split out fold_addsub2_i32, Richard Henderson, 2021/10/07
- [PATCH v2 23/48] tcg/optimize: Split out fold_extract2, Richard Henderson, 2021/10/07
- [PATCH v2 29/48] tcg/optimize: Split out fold_mov, Richard Henderson, 2021/10/07
- [PATCH v2 20/48] tcg/optimize: Split out fold_mulu2_i32, Richard Henderson, 2021/10/07
- [PATCH v2 36/48] tcg/optimize: Split out fold_xi_to_x, Richard Henderson, 2021/10/07
- [PATCH v2 37/48] tcg/optimize: Split out fold_ix_to_i, Richard Henderson, 2021/10/07