[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v4 34/71] tcg/tci: Hoist op_size checking into tci_args_*
From: |
Richard Henderson |
Subject: |
[PATCH v4 34/71] tcg/tci: Hoist op_size checking into tci_args_* |
Date: |
Wed, 17 Feb 2021 12:19:59 -0800 |
This performs the size check while reading the arguments,
which means that we don't have to arrange for it to be
done after the operation. Which tidies all of the branches.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
tcg/tci.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 73 insertions(+), 14 deletions(-)
diff --git a/tcg/tci.c b/tcg/tci.c
index f6cc5a3ab0..6b63beea28 100644
--- a/tcg/tci.c
+++ b/tcg/tci.c
@@ -24,7 +24,7 @@
#if defined(CONFIG_DEBUG_TCG)
# define tci_assert(cond) assert(cond)
#else
-# define tci_assert(cond) ((void)0)
+# define tci_assert(cond) ((void)(cond))
#endif
#include "qemu-common.h"
@@ -135,146 +135,217 @@ static tcg_target_ulong tci_read_label(const uint8_t
**tb_ptr)
* s = signed ldst offset
*/
+static void check_size(const uint8_t *start, const uint8_t **tb_ptr)
+{
+ const uint8_t *old_code_ptr = start - 2;
+ uint8_t op_size = old_code_ptr[1];
+ tci_assert(*tb_ptr == old_code_ptr + op_size);
+}
+
static void tci_args_l(const uint8_t **tb_ptr, void **l0)
{
+ const uint8_t *start = *tb_ptr;
+
*l0 = (void *)tci_read_label(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rr(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_ri(const uint8_t **tb_ptr,
TCGReg *r0, tcg_target_ulong *i1)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*i1 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
#if TCG_TARGET_REG_BITS == 64
static void tci_args_rI(const uint8_t **tb_ptr,
TCGReg *r0, tcg_target_ulong *i1)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*i1 = tci_read_i(tb_ptr);
+
+ check_size(start, tb_ptr);
}
#endif
static void tci_args_rrm(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGMemOpIdx *m2)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*m2 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrr(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGReg *r2)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrs(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, int32_t *i2)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*i2 = tci_read_s32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrcl(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGCond *c2, void **l3)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*c2 = tci_read_b(tb_ptr);
*l3 = (void *)tci_read_label(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrc(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGCond *c3)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*c3 = tci_read_b(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrm(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGMemOpIdx *m3)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*m3 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrbb(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
TCGReg *r2, uint8_t *i3, uint8_t *i4)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*i3 = tci_read_b(tb_ptr);
*i4 = tci_read_b(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrrm(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
TCGReg *r2, TCGReg *r3, TCGMemOpIdx *m4)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*r3 = tci_read_r(tb_ptr);
*m4 = tci_read_i32(tb_ptr);
+
+ check_size(start, tb_ptr);
}
#if TCG_TARGET_REG_BITS == 32
static void tci_args_rrrr(const uint8_t **tb_ptr,
TCGReg *r0, TCGReg *r1, TCGReg *r2, TCGReg *r3)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*r3 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrrcl(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
TCGReg *r2, TCGReg *r3, TCGCond *c4, void **l5)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*r3 = tci_read_r(tb_ptr);
*c4 = tci_read_b(tb_ptr);
*l5 = (void *)tci_read_label(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrrrc(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGCond *c5)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*r3 = tci_read_r(tb_ptr);
*r4 = tci_read_r(tb_ptr);
*c5 = tci_read_b(tb_ptr);
+
+ check_size(start, tb_ptr);
}
static void tci_args_rrrrrr(const uint8_t **tb_ptr, TCGReg *r0, TCGReg *r1,
TCGReg *r2, TCGReg *r3, TCGReg *r4, TCGReg *r5)
{
+ const uint8_t *start = *tb_ptr;
+
*r0 = tci_read_r(tb_ptr);
*r1 = tci_read_r(tb_ptr);
*r2 = tci_read_r(tb_ptr);
*r3 = tci_read_r(tb_ptr);
*r4 = tci_read_r(tb_ptr);
*r5 = tci_read_r(tb_ptr);
+
+ check_size(start, tb_ptr);
}
#endif
@@ -423,10 +494,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
for (;;) {
TCGOpcode opc = tb_ptr[0];
-#if defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG)
- uint8_t op_size = tb_ptr[1];
- const uint8_t *old_code_ptr = tb_ptr;
-#endif
TCGReg r0, r1, r2, r3;
tcg_target_ulong t1;
TCGCond condition;
@@ -476,7 +543,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
break;
case INDEX_op_br:
tci_args_l(&tb_ptr, &ptr);
- tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr = ptr;
continue;
case INDEX_op_setcond_i32:
@@ -629,9 +695,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
case INDEX_op_brcond_i32:
tci_args_rrcl(&tb_ptr, &r0, &r1, &condition, &ptr);
if (tci_compare32(regs[r0], regs[r1], condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr = ptr;
- continue;
}
break;
#if TCG_TARGET_REG_BITS == 32
@@ -652,7 +716,6 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
T1 = tci_uint64(regs[r1], regs[r0]);
T2 = tci_uint64(regs[r3], regs[r2]);
if (tci_compare64(T1, T2, condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr = ptr;
continue;
}
@@ -786,9 +849,7 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
case INDEX_op_brcond_i64:
tci_args_rrcl(&tb_ptr, &r0, &r1, &condition, &ptr);
if (tci_compare64(regs[r0], regs[r1], condition)) {
- tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr = ptr;
- continue;
}
break;
case INDEX_op_ext32s_i64:
@@ -817,9 +878,8 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
case INDEX_op_goto_tb:
tci_args_l(&tb_ptr, &ptr);
- tci_assert(tb_ptr == old_code_ptr + op_size);
tb_ptr = *(void **)ptr;
- continue;
+ break;
case INDEX_op_qemu_ld_i32:
if (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS) {
@@ -997,6 +1057,5 @@ uintptr_t QEMU_DISABLE_CFI tcg_qemu_tb_exec(CPUArchState
*env,
default:
g_assert_not_reached();
}
- tci_assert(tb_ptr == old_code_ptr + op_size);
}
}
--
2.25.1
- [PATCH v4 23/71] tcg/tci: Split out tci_args_rrrrrc, (continued)
- [PATCH v4 23/71] tcg/tci: Split out tci_args_rrrrrc, Richard Henderson, 2021/02/17
- [PATCH v4 25/71] tcg/tci: Split out tci_args_ri and tci_args_rI, Richard Henderson, 2021/02/17
- [PATCH v4 31/71] tcg/tci: Clean up deposit operations, Richard Henderson, 2021/02/17
- [PATCH v4 30/71] tcg/tci: Split out tci_args_rrrr, Richard Henderson, 2021/02/17
- [PATCH v4 27/71] tcg/tci: Reuse tci_args_l for exit_tb, Richard Henderson, 2021/02/17
- [PATCH v4 28/71] tcg/tci: Reuse tci_args_l for goto_tb, Richard Henderson, 2021/02/17
- [PATCH v4 32/71] tcg/tci: Reduce qemu_ld/st TCGMemOpIdx operand to 32-bits, Richard Henderson, 2021/02/17
- [PATCH v4 33/71] tcg/tci: Split out tci_args_{rrm,rrrm,rrrrm}, Richard Henderson, 2021/02/17
- [PATCH v4 29/71] tcg/tci: Split out tci_args_rrrrrr, Richard Henderson, 2021/02/17
- [PATCH v4 35/71] tcg/tci: Remove tci_disas, Richard Henderson, 2021/02/17
- [PATCH v4 34/71] tcg/tci: Hoist op_size checking into tci_args_*,
Richard Henderson <=
- [PATCH v4 36/71] tcg/tci: Implement the disassembler properly, Richard Henderson, 2021/02/17
- [PATCH v4 37/71] tcg: Build ffi data structures for helpers, Richard Henderson, 2021/02/17
- [PATCH v4 38/71] tcg/tci: Use ffi for calls, Richard Henderson, 2021/02/17
- [PATCH v4 39/71] tcg/tci: Improve tcg_target_call_clobber_regs, Richard Henderson, 2021/02/17
- [PATCH v4 40/71] tcg/tci: Move call-return regs to end of tcg_target_reg_alloc_order, Richard Henderson, 2021/02/17
- [PATCH v4 41/71] tcg/tci: Push opcode emit into each case, Richard Henderson, 2021/02/17
- [PATCH v4 42/71] tcg/tci: Split out tcg_out_op_rrs, Richard Henderson, 2021/02/17
- [PATCH v4 43/71] tcg/tci: Split out tcg_out_op_l, Richard Henderson, 2021/02/17
- [PATCH v4 44/71] tcg/tci: Split out tcg_out_op_p, Richard Henderson, 2021/02/17
- [PATCH v4 45/71] tcg/tci: Split out tcg_out_op_rr, Richard Henderson, 2021/02/17