[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 13/56] tcg/optimize: Split out fold_call
From: |
Richard Henderson |
Subject: |
[PULL 13/56] tcg/optimize: Split out fold_call |
Date: |
Wed, 27 Oct 2021 19:40:48 -0700 |
Calls are special in that they have a variable number
of arguments, and need to be able to clobber globals.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 63 ++++++++++++++++++++++++++++++++------------------
1 file changed, 41 insertions(+), 22 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index fad6f5de1f..74b9aa025a 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -624,10 +624,42 @@ static void copy_propagate(OptContext *ctx, TCGOp *op,
}
}
+static bool fold_call(OptContext *ctx, TCGOp *op)
+{
+ TCGContext *s = ctx->tcg;
+ int nb_oargs = TCGOP_CALLO(op);
+ int nb_iargs = TCGOP_CALLI(op);
+ int flags, i;
+
+ init_arguments(ctx, op, nb_oargs + nb_iargs);
+ copy_propagate(ctx, op, nb_oargs, nb_iargs);
+
+ /* If the function reads or writes globals, reset temp data. */
+ flags = tcg_call_flags(op);
+ if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
+ int nb_globals = s->nb_globals;
+
+ for (i = 0; i < nb_globals; i++) {
+ if (test_bit(i, ctx->temps_used.l)) {
+ reset_ts(&ctx->tcg->temps[i]);
+ }
+ }
+ }
+
+ /* Reset temp data for outputs. */
+ for (i = 0; i < nb_oargs; i++) {
+ reset_temp(op->args[i]);
+ }
+
+ /* Stop optimizing MB across calls. */
+ ctx->prev_mb = NULL;
+ return true;
+}
+
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
- int nb_temps, nb_globals, i;
+ int nb_temps, i;
TCGOp *op, *op_next;
OptContext ctx = { .tcg = s };
@@ -637,8 +669,6 @@ void tcg_optimize(TCGContext *s)
available through the doubly linked circular list. */
nb_temps = s->nb_temps;
- nb_globals = s->nb_globals;
-
for (i = 0; i < nb_temps; ++i) {
s->temps[i].state_ptr = NULL;
}
@@ -647,17 +677,17 @@ void tcg_optimize(TCGContext *s)
uint64_t z_mask, partmask, affected, tmp;
int nb_oargs, nb_iargs;
TCGOpcode opc = op->opc;
- const TCGOpDef *def = &tcg_op_defs[opc];
+ const TCGOpDef *def;
- /* Count the arguments, and initialize the temps that are
- going to be used */
+ /* Calls are special. */
if (opc == INDEX_op_call) {
- nb_oargs = TCGOP_CALLO(op);
- nb_iargs = TCGOP_CALLI(op);
- } else {
- nb_oargs = def->nb_oargs;
- nb_iargs = def->nb_iargs;
+ fold_call(&ctx, op);
+ continue;
}
+
+ def = &tcg_op_defs[opc];
+ nb_oargs = def->nb_oargs;
+ nb_iargs = def->nb_iargs;
init_arguments(&ctx, op, nb_oargs + nb_iargs);
copy_propagate(&ctx, op, nb_oargs, nb_iargs);
@@ -1549,16 +1579,6 @@ void tcg_optimize(TCGContext *s)
if (def->flags & TCG_OPF_BB_END) {
memset(&ctx.temps_used, 0, sizeof(ctx.temps_used));
} else {
- if (opc == INDEX_op_call &&
- !(tcg_call_flags(op)
- & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
- for (i = 0; i < nb_globals; i++) {
- if (test_bit(i, ctx.temps_used.l)) {
- reset_ts(&s->temps[i]);
- }
- }
- }
-
for (i = 0; i < nb_oargs; i++) {
reset_temp(op->args[i]);
/* Save the corresponding known-zero bits mask for the
@@ -1599,7 +1619,6 @@ void tcg_optimize(TCGContext *s)
case INDEX_op_qemu_st_i32:
case INDEX_op_qemu_st8_i32:
case INDEX_op_qemu_st_i64:
- case INDEX_op_call:
/* Opcodes that touch guest memory stop the optimization. */
ctx.prev_mb = NULL;
break;
--
2.25.1
- [PULL 02/56] host-utils: move checks out of divu128/divs128, (continued)
- [PULL 02/56] host-utils: move checks out of divu128/divs128, Richard Henderson, 2021/10/27
- [PULL 03/56] host-utils: move udiv_qrnnd() to host-utils, Richard Henderson, 2021/10/27
- [PULL 01/56] qemu/int128: Add int128_{not,xor}, Richard Henderson, 2021/10/27
- [PULL 04/56] host-utils: add 128-bit quotient support to divu128/divs128, Richard Henderson, 2021/10/27
- [PULL 05/56] host-utils: add unit tests for divu128/divs128, Richard Henderson, 2021/10/27
- [PULL 09/56] tcg/optimize: Change tcg_opt_gen_{mov,movi} interface, Richard Henderson, 2021/10/27
- [PULL 06/56] tcg/optimize: Rename "mask" to "z_mask", Richard Henderson, 2021/10/27
- [PULL 07/56] tcg/optimize: Split out OptContext, Richard Henderson, 2021/10/27
- [PULL 10/56] tcg/optimize: Move prev_mb into OptContext, Richard Henderson, 2021/10/27
- [PULL 12/56] tcg/optimize: Split out copy_propagate, Richard Henderson, 2021/10/27
- [PULL 13/56] tcg/optimize: Split out fold_call,
Richard Henderson <=
- [PULL 18/56] tcg/optimize: Use a boolean to avoid a mass of continues, Richard Henderson, 2021/10/27
- [PULL 15/56] tcg/optimize: Change fail return for do_constant_folding_cond*, Richard Henderson, 2021/10/27
- [PULL 27/56] tcg/optimize: Split out fold_movcond, Richard Henderson, 2021/10/27
- [PULL 11/56] tcg/optimize: Split out init_arguments, Richard Henderson, 2021/10/27
- [PULL 14/56] tcg/optimize: Drop nb_oargs, nb_iargs locals, Richard Henderson, 2021/10/27
- [PULL 16/56] tcg/optimize: Return true from tcg_opt_gen_{mov,movi}, Richard Henderson, 2021/10/27
- [PULL 17/56] tcg/optimize: Split out finish_folding, Richard Henderson, 2021/10/27
- [PULL 25/56] tcg/optimize: Split out fold_mulu2_i32, Richard Henderson, 2021/10/27
- [PULL 19/56] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st}, Richard Henderson, 2021/10/27
- [PULL 20/56] tcg/optimize: Split out fold_const{1,2}, Richard Henderson, 2021/10/27