[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 34/70] tcg/tci: Hoist op_size checking into tci_args_*
From: |
Richard Henderson |
Subject: |
[PATCH v3 34/70] tcg/tci: Hoist op_size checking into tci_args_* |
Date: |
Sun, 7 Feb 2021 18:37:16 -0800 |
This performs the size check while reading the arguments,
which means that we don't have to arrange for it to be
done after the operation. Which tidies all of the branches.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tci.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 73 insertions(+), 14 deletions(-)
diff --git a/tcg/tci.c b/tcg/tci.c
index a1846825ea..3dc89ed829 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -24,7 +24,7 @@
#if defined(CONFIG_DEBUG_TCG)
# define tci_assert(cond) assert(cond)
#else
-# define tci_assert(cond) ((void)0)
+# define tci_assert(cond) ((void)(cond))
#endif
#include "qemu-common.h"
@@ -135,146 +135,217 @@ static tcg_target_ulong tci_read_label(const uint8_t
**tb_ptr)
* s = signed ldst offset
*/
+static void check_size(const uint8_t *start, const uint8_t **tb_ptr)
+{
+ const uint8_t *old_code_ptr = start - 2;
+ uint8_t op_size = old_code_ptr[1];
+ tci_assert(*tb_ptr == old_code_ptr + op_size);
+}
+
static void tci_args_l(const uint8_t **tb_ptr, void **l0)
{
+ const uint8_t *start = *tb_ptr;
+
*l0 = (void *)tci_read_label(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rr(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_ri(const uint8_t **tb_ptr,
TCGReg *r0, tcg_target_ulong *i1)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*i1 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
#if TCG_TARGET_REG_BITS == 64
static void tci_args_rI(const uint8_t **tb_ptr,
TCGReg *r0, tcg_target_ulong *i1)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*i1 = tci_read_i(tb_ptr);
+
+ check_size(start, tb_ptr);
}
#endif
static void tci_args_rrm(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGMemOpIdx *m2)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*m2 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrr(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGReg *r2)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrs(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, int32_t *i2)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*i2 = tci_read_s32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrcl(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGCond *c2, void **l3)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*c2 = tci_read_b(tb_ptr);
*l3 = (void *)tci_read_label(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrc(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*c3 = tci_read_b(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrm(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*m3 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrbb(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
TCGReg *r2, uint8_t *i3, uint8_t *i4)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*i3 = tci_read_b(tb_ptr);
*i4 = tci_read_b(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrrm(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
TCGReg *r2, TCGReg *r3, TCGMemOpIdx *m4)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*r3 = tci_read_r(tb_ptr);
*m4 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
#if TCG_TARGET_REG_BITS == 32
static void tci_args_rrrr(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*r3 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrrcl(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
TCGReg *r2, TCGReg *r3, TCGCond *c4, void **l5)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*r3 = tci_read_r(tb_ptr);
*c4 = tci_read_b(tb_ptr);
*l5 = (void *)tci_read_label(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrrrc(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*r3 = tci_read_r(tb_ptr);
*r4 = tci_read_r(tb_ptr);
*c5 = tci_read_b(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrrrr(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*r3 = tci_read_r(tb_ptr);
*r4 = tci_read_r(tb_ptr);
*r5 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
}
#endif
@@ -440,10 +511,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
for (;;) {
TCGOpcode opc = tb_ptr[0];
-#if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
- uint8_t op_size = tb_ptr[1];
- const uint8_t *old_code_ptr = tb_ptr;
-#endif
TCGReg r0, r1, r2, r3;
tcg_target_ulong t1;
TCGCond condition;
@@ -493,7 +560,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
break;
case INDEX_op_br:
tci_args_l(&tb_ptr, &ptr);
- tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr = ptr;
continue;
case INDEX_op_setcond_i32:
@@ -646,9 +712,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
case INDEX_op_brcond_i32:
tci_args_rrcl(&tb_ptr, &r0, &r1, &condition, &ptr);
if (tci_compare32(regs[r0], regs[r1], condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr = ptr;
- continue;
}
break;
#if TCG_TARGET_REG_BITS == 32
@@ -669,7 +733,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
T1 = tci_uint64(regs[r1], regs[r0]);
T2 = tci_uint64(regs[r3], regs[r2]);
if (tci_compare64(T1, T2, condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr = ptr;
continue;
}
@@ -803,9 +866,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
case INDEX_op_brcond_i64:
tci_args_rrcl(&tb_ptr, &r0, &r1, &condition, &ptr);
if (tci_compare64(regs[r0], regs[r1], condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr = ptr;
- continue;
}
break;
case INDEX_op_ext32s_i64:
@@ -834,9 +895,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
case INDEX_op_goto_tb:
tci_args_l(&tb_ptr, &ptr);
- tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr = *(void **)ptr;
- continue;
+ break;
case INDEX_op_qemu_ld_i32:
if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
@@ -1014,6 +1074,5 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
default:
g_assert_not_reached();
}
- tci_assert(tb_ptr == old_code_ptr + op_size);
}
}
--
2.25.1
- [PATCH v3 26/70] tcg/tci: Reuse tci_args_l for calls., (continued)
- [PATCH v3 26/70] tcg/tci: Reuse tci_args_l for calls., Richard Henderson, 2021/02/07
- [PATCH v3 23/70] tcg/tci: Split out tci_args_rrrrrc, Richard Henderson, 2021/02/07
- [PATCH v3 28/70] tcg/tci: Reuse tci_args_l for goto_tb, Richard Henderson, 2021/02/07
- [PATCH v3 31/70] tcg/tci: Clean up deposit operations, Richard Henderson, 2021/02/07
- [PATCH v3 27/70] tcg/tci: Reuse tci_args_l for exit_tb, Richard Henderson, 2021/02/07
- [PATCH v3 33/70] tcg/tci: Split out tci_args_{rrm,rrrm,rrrrm}, Richard Henderson, 2021/02/07
- [PATCH v3 39/70] tcg/tci: Improve tcg_target_call_clobber_regs, Richard Henderson, 2021/02/07
- [PATCH v3 38/70] tcg/tci: Use ffi for calls, Richard Henderson, 2021/02/07
- [PATCH v3 25/70] tcg/tci: Split out tci_args_ri and tci_args_rI, Richard Henderson, 2021/02/07
- [PATCH v3 37/70] tcg: Build ffi data structures for helpers, Richard Henderson, 2021/02/07
- [PATCH v3 34/70] tcg/tci: Hoist op_size checking into tci_args_*,
Richard Henderson <=
- [PATCH v3 36/70] tcg/tci: Implement the disassembler properly, Richard Henderson, 2021/02/07
- [PATCH v3 29/70] tcg/tci: Split out tci_args_rrrrrr, Richard Henderson, 2021/02/07
- [PATCH v3 35/70] tcg/tci: Remove tci_disas, Richard Henderson, 2021/02/07
- [PATCH v3 32/70] tcg/tci: Reduce qemu_ld/st TCGMemOpIdx operand to 32-bits, Richard Henderson, 2021/02/07
- [PATCH v3 30/70] tcg/tci: Split out tci_args_rrrr, Richard Henderson, 2021/02/07
- [PATCH v3 40/70] tcg/tci: Move call-return regs to end of tcg_target_reg_alloc_order, Richard Henderson, 2021/02/07
- [PATCH v3 41/70] tcg/tci: Push opcode emit into each case, Richard Henderson, 2021/02/07
- [PATCH v3 42/70] tcg/tci: Split out tcg_out_op_rrs, Richard Henderson, 2021/02/07
- [PATCH v3 43/70] tcg/tci: Split out tcg_out_op_l, Richard Henderson, 2021/02/07
- [PATCH v3 44/70] tcg/tci: Split out tcg_out_op_p, Richard Henderson, 2021/02/07