[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v5 13/36] tcg: Add temp allocation for TCGv_i128
From: |
Richard Henderson |
Subject: |
[PATCH v5 13/36] tcg: Add temp allocation for TCGv_i128 |
Date: |
Wed, 25 Jan 2023 18:38:01 -1000 |
This enables allocation of i128. The type is not yet
usable, as we have not yet added data movement ops.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/tcg/tcg.h | 32 +++++++++++++++++++++++++
tcg/tcg.c | 60 +++++++++++++++++++++++++++++++++--------------
2 files changed, 74 insertions(+), 18 deletions(-)
diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h
index 8b7e61e7a5..7a8e4bbdd7 100644
--- a/include/tcg/tcg.h
+++ b/include/tcg/tcg.h
@@ -685,6 +685,11 @@ static inline TCGTemp *tcgv_i64_temp(TCGv_i64 v)
return tcgv_i32_temp((TCGv_i32)v);
}
+static inline TCGTemp *tcgv_i128_temp(TCGv_i128 v)
+{
+ return tcgv_i32_temp((TCGv_i32)v);
+}
+
static inline TCGTemp *tcgv_ptr_temp(TCGv_ptr v)
{
return tcgv_i32_temp((TCGv_i32)v);
@@ -705,6 +710,11 @@ static inline TCGArg tcgv_i64_arg(TCGv_i64 v)
return temp_arg(tcgv_i64_temp(v));
}
+static inline TCGArg tcgv_i128_arg(TCGv_i128 v)
+{
+ return temp_arg(tcgv_i128_temp(v));
+}
+
static inline TCGArg tcgv_ptr_arg(TCGv_ptr v)
{
return temp_arg(tcgv_ptr_temp(v));
@@ -726,6 +736,11 @@ static inline TCGv_i64 temp_tcgv_i64(TCGTemp *t)
return (TCGv_i64)temp_tcgv_i32(t);
}
+static inline TCGv_i128 temp_tcgv_i128(TCGTemp *t)
+{
+ return (TCGv_i128)temp_tcgv_i32(t);
+}
+
static inline TCGv_ptr temp_tcgv_ptr(TCGTemp *t)
{
return (TCGv_ptr)temp_tcgv_i32(t);
@@ -851,6 +866,11 @@ static inline void tcg_temp_free_i64(TCGv_i64 arg)
tcg_temp_free_internal(tcgv_i64_temp(arg));
}
+static inline void tcg_temp_free_i128(TCGv_i128 arg)
+{
+ tcg_temp_free_internal(tcgv_i128_temp(arg));
+}
+
static inline void tcg_temp_free_ptr(TCGv_ptr arg)
{
tcg_temp_free_internal(tcgv_ptr_temp(arg));
@@ -899,6 +919,18 @@ static inline TCGv_i64 tcg_temp_local_new_i64(void)
return temp_tcgv_i64(t);
}
+static inline TCGv_i128 tcg_temp_new_i128(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, false);
+ return temp_tcgv_i128(t);
+}
+
+static inline TCGv_i128 tcg_temp_local_new_i128(void)
+{
+ TCGTemp *t = tcg_temp_new_internal(TCG_TYPE_I128, true);
+ return temp_tcgv_i128(t);
+}
+
static inline TCGv_ptr tcg_global_mem_new_ptr(TCGv_ptr reg, intptr_t offset,
const char *name)
{
diff --git a/tcg/tcg.c b/tcg/tcg.c
index 63e0753ded..d449bb0864 100644
--- a/tcg/tcg.c
+++ b/tcg/tcg.c
@@ -1273,26 +1273,45 @@ TCGTemp *tcg_temp_new_internal(TCGType type, bool
temp_local)
tcg_debug_assert(ts->base_type == type);
tcg_debug_assert(ts->kind == kind);
} else {
+ int i, n;
+
+ switch (type) {
+ case TCG_TYPE_I32:
+ case TCG_TYPE_V64:
+ case TCG_TYPE_V128:
+ case TCG_TYPE_V256:
+ n = 1;
+ break;
+ case TCG_TYPE_I64:
+ n = 64 / TCG_TARGET_REG_BITS;
+ break;
+ case TCG_TYPE_I128:
+ n = 128 / TCG_TARGET_REG_BITS;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
ts = tcg_temp_alloc(s);
- if (TCG_TARGET_REG_BITS == 32 && type == TCG_TYPE_I64) {
- TCGTemp *ts2 = tcg_temp_alloc(s);
+ ts->base_type = type;
+ ts->temp_allocated = 1;
+ ts->kind = kind;
- ts->base_type = type;
- ts->type = TCG_TYPE_I32;
- ts->temp_allocated = 1;
- ts->kind = kind;
-
- tcg_debug_assert(ts2 == ts + 1);
- ts2->base_type = TCG_TYPE_I64;
- ts2->type = TCG_TYPE_I32;
- ts2->temp_allocated = 1;
- ts2->temp_subindex = 1;
- ts2->kind = kind;
- } else {
- ts->base_type = type;
+ if (n == 1) {
ts->type = type;
- ts->temp_allocated = 1;
- ts->kind = kind;
+ } else {
+ ts->type = TCG_TYPE_REG;
+
+ for (i = 1; i < n; ++i) {
+ TCGTemp *ts2 = tcg_temp_alloc(s);
+
+ tcg_debug_assert(ts2 == ts + i);
+ ts2->base_type = type;
+ ts2->type = TCG_TYPE_REG;
+ ts2->temp_allocated = 1;
+ ts2->temp_subindex = i;
+ ts2->kind = kind;
+ }
}
}
@@ -3381,9 +3400,14 @@ static void temp_allocate_frame(TCGContext *s, TCGTemp
*ts)
case TCG_TYPE_V64:
align = 8;
break;
+ case TCG_TYPE_I128:
case TCG_TYPE_V128:
case TCG_TYPE_V256:
- /* Note that we do not require aligned storage for V256. */
+ /*
+ * Note that we do not require aligned storage for V256,
+ * and that we provide alignment for I128 to match V128,
+ * even if that's above what the host ABI requires.
+ */
align = 16;
break;
default:
--
2.34.1
- Re: [PATCH v5 08/36] include/qemu/int128: Use Int128 structure for TCI, (continued)
- [PATCH v5 10/36] tcg/tci: Fix big-endian return register ordering, Richard Henderson, 2023/01/25
- [PATCH v5 11/36] tcg/tci: Add TCG_TARGET_CALL_{RET,ARG}_I128, Richard Henderson, 2023/01/25
- [PATCH v5 12/36] tcg: Add TCG_TARGET_CALL_{RET,ARG}_I128, Richard Henderson, 2023/01/25
- [PATCH v5 14/36] tcg: Add basic data movement for TCGv_i128, Richard Henderson, 2023/01/25
- [PATCH v5 13/36] tcg: Add temp allocation for TCGv_i128,
Richard Henderson <=
- [PATCH v5 15/36] tcg: Add guest load/store primitives for TCGv_i128, Richard Henderson, 2023/01/25
- [PATCH v5 16/36] tcg: Add tcg_gen_{non}atomic_cmpxchg_i128, Richard Henderson, 2023/01/25
- [PATCH v5 17/36] tcg: Split out tcg_gen_nonatomic_cmpxchg_i{32,64}, Richard Henderson, 2023/01/25