[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL v2 19/60] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st}
From: |
Richard Henderson |
Subject: |
[PULL v2 19/60] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st} |
Date: |
Thu, 28 Oct 2021 21:32:48 -0700 |
This puts the separate mb optimization into the same framework
as the others. While fold_qemu_{ld,st} are currently identical,
that won't last as more code gets moved.
Reviewed-by: Luis Pires <luis.pires@eldorado.org.br>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/optimize.c | 89 +++++++++++++++++++++++++++++---------------------
1 file changed, 51 insertions(+), 38 deletions(-)
diff --git a/tcg/optimize.c b/tcg/optimize.c
index 699476e2f1..159a5a9ee5 100644
--- a/tcg/optimize.c
+++ b/tcg/optimize.c
@@ -692,6 +692,44 @@ static bool fold_call(OptContext *ctx, TCGOp *op)
return true;
}
+static bool fold_mb(OptContext *ctx, TCGOp *op)
+{
+ /* Eliminate duplicate and redundant fence instructions. */
+ if (ctx->prev_mb) {
+ /*
+ * Merge two barriers of the same type into one,
+ * or a weaker barrier into a stronger one,
+ * or two weaker barriers into a stronger one.
+ * mb X; mb Y => mb X|Y
+ * mb; strl => mb; st
+ * ldaq; mb => ld; mb
+ * ldaq; strl => ld; mb; st
+ * Other combinations are also merged into a strong
+ * barrier. This is stricter than specified but for
+ * the purposes of TCG is better than not optimizing.
+ */
+ ctx->prev_mb->args[0] |= op->args[0];
+ tcg_op_remove(ctx->tcg, op);
+ } else {
+ ctx->prev_mb = op;
+ }
+ return true;
+}
+
+static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
+{
+ /* Opcodes that touch guest memory stop the mb optimization. */
+ ctx->prev_mb = NULL;
+ return false;
+}
+
+static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
+{
+ /* Opcodes that touch guest memory stop the mb optimization. */
+ ctx->prev_mb = NULL;
+ return false;
+}
+
/* Propagate constants and copies, fold constant expressions. */
void tcg_optimize(TCGContext *s)
{
@@ -1599,6 +1637,19 @@ void tcg_optimize(TCGContext *s)
}
break;
+ case INDEX_op_mb:
+ done = fold_mb(&ctx, op);
+ break;
+ case INDEX_op_qemu_ld_i32:
+ case INDEX_op_qemu_ld_i64:
+ done = fold_qemu_ld(&ctx, op);
+ break;
+ case INDEX_op_qemu_st_i32:
+ case INDEX_op_qemu_st8_i32:
+ case INDEX_op_qemu_st_i64:
+ done = fold_qemu_st(&ctx, op);
+ break;
+
default:
break;
}
@@ -1606,43 +1657,5 @@ void tcg_optimize(TCGContext *s)
if (!done) {
finish_folding(&ctx, op);
}
-
- /* Eliminate duplicate and redundant fence instructions. */
- if (ctx.prev_mb) {
- switch (opc) {
- case INDEX_op_mb:
- /* Merge two barriers of the same type into one,
- * or a weaker barrier into a stronger one,
- * or two weaker barriers into a stronger one.
- * mb X; mb Y => mb X|Y
- * mb; strl => mb; st
- * ldaq; mb => ld; mb
- * ldaq; strl => ld; mb; st
- * Other combinations are also merged into a strong
- * barrier. This is stricter than specified but for
- * the purposes of TCG is better than not optimizing.
- */
- ctx.prev_mb->args[0] |= op->args[0];
- tcg_op_remove(s, op);
- break;
-
- default:
- /* Opcodes that end the block stop the optimization. */
- if ((def->flags & TCG_OPF_BB_END) == 0) {
- break;
- }
- /* fallthru */
- case INDEX_op_qemu_ld_i32:
- case INDEX_op_qemu_ld_i64:
- case INDEX_op_qemu_st_i32:
- case INDEX_op_qemu_st8_i32:
- case INDEX_op_qemu_st_i64:
- /* Opcodes that touch guest memory stop the optimization. */
- ctx.prev_mb = NULL;
- break;
- }
- } else if (opc == INDEX_op_mb) {
- ctx.prev_mb = op;
- }
}
}
--
2.25.1
- [PULL v2 08/60] tcg/optimize: Remove do_default label, (continued)
- [PULL v2 08/60] tcg/optimize: Remove do_default label, Richard Henderson, 2021/10/29
- [PULL v2 10/60] tcg/optimize: Move prev_mb into OptContext, Richard Henderson, 2021/10/29
- [PULL v2 13/60] tcg/optimize: Split out fold_call, Richard Henderson, 2021/10/29
- [PULL v2 12/60] tcg/optimize: Split out copy_propagate, Richard Henderson, 2021/10/29
- [PULL v2 15/60] tcg/optimize: Change fail return for do_constant_folding_cond*, Richard Henderson, 2021/10/29
- [PULL v2 17/60] tcg/optimize: Split out finish_folding, Richard Henderson, 2021/10/29
- [PULL v2 16/60] tcg/optimize: Return true from tcg_opt_gen_{mov, movi}, Richard Henderson, 2021/10/29
- [PULL v2 18/60] tcg/optimize: Use a boolean to avoid a mass of continues, Richard Henderson, 2021/10/29
- [PULL v2 11/60] tcg/optimize: Split out init_arguments, Richard Henderson, 2021/10/29
- [PULL v2 14/60] tcg/optimize: Drop nb_oargs, nb_iargs locals, Richard Henderson, 2021/10/29
- [PULL v2 19/60] tcg/optimize: Split out fold_mb, fold_qemu_{ld,st},
Richard Henderson <=
- [PULL v2 22/60] tcg/optimize: Split out fold_brcond2, Richard Henderson, 2021/10/29
- [PULL v2 20/60] tcg/optimize: Split out fold_const{1,2}, Richard Henderson, 2021/10/29
- [PULL v2 25/60] tcg/optimize: Split out fold_mulu2_i32, Richard Henderson, 2021/10/29
- [PULL v2 21/60] tcg/optimize: Split out fold_setcond2, Richard Henderson, 2021/10/29
- [PULL v2 24/60] tcg/optimize: Split out fold_setcond, Richard Henderson, 2021/10/29
- [PULL v2 26/60] tcg/optimize: Split out fold_addsub2_i32, Richard Henderson, 2021/10/29
- [PULL v2 27/60] tcg/optimize: Split out fold_movcond, Richard Henderson, 2021/10/29
- [PULL v2 28/60] tcg/optimize: Split out fold_extract2, Richard Henderson, 2021/10/29
- [PULL v2 30/60] tcg/optimize: Split out fold_deposit, Richard Henderson, 2021/10/29
- [PULL v2 32/60] tcg/optimize: Split out fold_bswap, Richard Henderson, 2021/10/29