[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
Re: [Qemu-devel] [PATCH v2 01/29] tcg: Implement tcg_gen_gvec_3i()
From: |
Alex Bennée |
Subject: |
Re: [Qemu-devel] [PATCH v2 01/29] tcg: Implement tcg_gen_gvec_3i() |
Date: |
Wed, 01 May 2019 16:23:14 +0100 |
User-agent: |
mu4e 1.3.1; emacs 26.1 |
Richard Henderson <address@hidden> writes:
> From: David Hildenbrand <address@hidden>
>
> Let's add tcg_gen_gvec_3i(), similar to tcg_gen_gvec_2i(), however
> without introducing "gen_helper_gvec_3i *fnoi", as it isn't needed
> for now.
>
> Signed-off-by: David Hildenbrand <address@hidden>
> Message-Id: <address@hidden>
> Signed-off-by: Richard Henderson <address@hidden>
> ---
> tcg/tcg-op-gvec.h | 24 ++++++++
> tcg/tcg-op-gvec.c | 139 ++++++++++++++++++++++++++++++++++++++++++++++
> 2 files changed, 163 insertions(+)
>
> diff --git a/tcg/tcg-op-gvec.h b/tcg/tcg-op-gvec.h
> index 850da32ded..c093243c4c 100644
> --- a/tcg/tcg-op-gvec.h
> +++ b/tcg/tcg-op-gvec.h
> @@ -164,6 +164,27 @@ typedef struct {
> bool load_dest;
> } GVecGen3;
>
> +typedef struct {
> + /*
> + * Expand inline as a 64-bit or 32-bit integer. Only one of these will be
> + * non-NULL.
> + */
> + void (*fni8)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t);
> + void (*fni4)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t);
> + /* Expand inline with a host vector type. */
> + void (*fniv)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec, int64_t);
> + /* Expand out-of-line helper w/descriptor, data in descriptor. */
> + gen_helper_gvec_3 *fno;
> + /* The opcode, if any, to which this corresponds. */
> + TCGOpcode opc;
> + /* The vector element size, if applicable. */
> + uint8_t vece;
> + /* Prefer i64 to v64. */
> + bool prefer_i64;
> + /* Load dest as a 3rd source operand. */
> + bool load_dest;
> +} GVecGen3i;
> +
> typedef struct {
> /* Expand inline as a 64-bit or 32-bit integer.
> Only one of these will be non-NULL. */
> @@ -193,6 +214,9 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs,
> uint32_t oprsz,
> uint32_t maxsz, TCGv_i64 c, const GVecGen2s *);
> void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs, uint32_t bofs,
> uint32_t oprsz, uint32_t maxsz, const GVecGen3 *);
> +void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
> + uint32_t oprsz, uint32_t maxsz, int64_t c,
> + const GVecGen3i *);
> void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t
> cofs,
> uint32_t oprsz, uint32_t maxsz, const GVecGen4 *);
>
> diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
> index 0996ef0812..f831adb4e7 100644
> --- a/tcg/tcg-op-gvec.c
> +++ b/tcg/tcg-op-gvec.c
> @@ -663,6 +663,29 @@ static void expand_3_i32(uint32_t dofs, uint32_t aofs,
> tcg_temp_free_i32(t0);
> }
>
> +static void expand_3i_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
> + uint32_t oprsz, int32_t c, bool load_dest,
> + void (*fni)(TCGv_i32, TCGv_i32, TCGv_i32, int32_t))
> +{
> + TCGv_i32 t0 = tcg_temp_new_i32();
> + TCGv_i32 t1 = tcg_temp_new_i32();
> + TCGv_i32 t2 = tcg_temp_new_i32();
> + uint32_t i;
> +
> + for (i = 0; i < oprsz; i += 4) {
> + tcg_gen_ld_i32(t0, cpu_env, aofs + i);
> + tcg_gen_ld_i32(t1, cpu_env, bofs + i);
> + if (load_dest) {
> + tcg_gen_ld_i32(t2, cpu_env, dofs + i);
> + }
> + fni(t2, t0, t1, c);
> + tcg_gen_st_i32(t2, cpu_env, dofs + i);
> + }
> + tcg_temp_free_i32(t0);
> + tcg_temp_free_i32(t1);
> + tcg_temp_free_i32(t2);
> +}
> +
> /* Expand OPSZ bytes worth of three-operand operations using i32 elements.
> */
> static void expand_4_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
> uint32_t cofs, uint32_t oprsz, bool write_aofs,
> @@ -770,6 +793,29 @@ static void expand_3_i64(uint32_t dofs, uint32_t aofs,
> tcg_temp_free_i64(t0);
> }
>
> +static void expand_3i_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
> + uint32_t oprsz, int64_t c, bool load_dest,
> + void (*fni)(TCGv_i64, TCGv_i64, TCGv_i64, int64_t))
> +{
> + TCGv_i64 t0 = tcg_temp_new_i64();
> + TCGv_i64 t1 = tcg_temp_new_i64();
> + TCGv_i64 t2 = tcg_temp_new_i64();
> + uint32_t i;
> +
> + for (i = 0; i < oprsz; i += 8) {
> + tcg_gen_ld_i64(t0, cpu_env, aofs + i);
> + tcg_gen_ld_i64(t1, cpu_env, bofs + i);
> + if (load_dest) {
> + tcg_gen_ld_i64(t2, cpu_env, dofs + i);
> + }
> + fni(t2, t0, t1, c);
> + tcg_gen_st_i64(t2, cpu_env, dofs + i);
> + }
> + tcg_temp_free_i64(t0);
> + tcg_temp_free_i64(t1);
> + tcg_temp_free_i64(t2);
> +}
> +
> /* Expand OPSZ bytes worth of three-operand operations using i64 elements.
> */
> static void expand_4_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
> uint32_t cofs, uint32_t oprsz, bool write_aofs,
> @@ -883,6 +929,35 @@ static void expand_3_vec(unsigned vece, uint32_t dofs,
> uint32_t aofs,
> tcg_temp_free_vec(t0);
> }
>
> +/*
> + * Expand OPSZ bytes worth of three-vector operands and an immediate operand
> + * using host vectors.
> + */
> +static void expand_3i_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
> + uint32_t bofs, uint32_t oprsz, uint32_t tysz,
> + TCGType type, int64_t c, bool load_dest,
> + void (*fni)(unsigned, TCGv_vec, TCGv_vec, TCGv_vec,
> + int64_t))
> +{
> + TCGv_vec t0 = tcg_temp_new_vec(type);
> + TCGv_vec t1 = tcg_temp_new_vec(type);
> + TCGv_vec t2 = tcg_temp_new_vec(type);
> + uint32_t i;
> +
> + for (i = 0; i < oprsz; i += tysz) {
> + tcg_gen_ld_vec(t0, cpu_env, aofs + i);
> + tcg_gen_ld_vec(t1, cpu_env, bofs + i);
> + if (load_dest) {
> + tcg_gen_ld_vec(t2, cpu_env, dofs + i);
> + }
> + fni(vece, t2, t0, t1, c);
> + tcg_gen_st_vec(t2, cpu_env, dofs + i);
> + }
> + tcg_temp_free_vec(t0);
> + tcg_temp_free_vec(t1);
> + tcg_temp_free_vec(t2);
> +}
> +
> /* Expand OPSZ bytes worth of four-operand operations using host vectors. */
> static void expand_4_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
> uint32_t bofs, uint32_t cofs, uint32_t oprsz,
> @@ -1174,6 +1249,70 @@ void tcg_gen_gvec_3(uint32_t dofs, uint32_t aofs,
> uint32_t bofs,
> }
> }
>
> +/* Expand a vector operation with three vectors and an immediate. */
> +void tcg_gen_gvec_3i(uint32_t dofs, uint32_t aofs, uint32_t bofs,
> + uint32_t oprsz, uint32_t maxsz, int64_t c,
> + const GVecGen3i *g)
> +{
> + TCGType type;
> + uint32_t some;
> +
> + check_size_align(oprsz, maxsz, dofs | aofs | bofs);
> + check_overlap_3(dofs, aofs, bofs, maxsz);
> +
> + type = 0;
> + if (g->fniv) {
> + type = choose_vector_type(g->opc, g->vece, oprsz, g->prefer_i64);
> + }
> + switch (type) {
> + case TCG_TYPE_V256:
> + /*
> + * Recall that ARM SVE allows vector sizes that are not a
> + * power of 2, but always a multiple of 16. The intent is
> + * that e.g. size == 80 would be expanded with 2x32 + 1x16.
> + */
> + some = QEMU_ALIGN_DOWN(oprsz, 32);
> + expand_3i_vec(g->vece, dofs, aofs, bofs, some, 32, TCG_TYPE_V256,
> + c, g->load_dest, g->fniv);
> + if (some == oprsz) {
> + break;
> + }
> + dofs += some;
> + aofs += some;
> + bofs += some;
> + oprsz -= some;
> + maxsz -= some;
> + /* fallthru */
> + case TCG_TYPE_V128:
> + expand_3i_vec(g->vece, dofs, aofs, bofs, oprsz, 16, TCG_TYPE_V128,
> + c, g->load_dest, g->fniv);
> + break;
> + case TCG_TYPE_V64:
> + expand_3i_vec(g->vece, dofs, aofs, bofs, oprsz, 8, TCG_TYPE_V64,
> + c, g->load_dest, g->fniv);
> + break;
> +
> + case 0:
> + if (g->fni8 && check_size_impl(oprsz, 8)) {
> + expand_3i_i64(dofs, aofs, bofs, oprsz, c, g->load_dest, g->fni8);
> + } else if (g->fni4 && check_size_impl(oprsz, 4)) {
> + expand_3i_i32(dofs, aofs, bofs, oprsz, c, g->load_dest, g->fni4);
> + } else {
> + assert(g->fno != NULL);
> + tcg_gen_gvec_3_ool(dofs, aofs, bofs, oprsz, maxsz, c, g->fno);
> + return;
> + }
> + break;
> +
> + default:
> + g_assert_not_reached();
> + }
> +
> + if (oprsz < maxsz) {
> + expand_clr(dofs + oprsz, maxsz - oprsz);
> + }
> +}
> +
My only slight concern is this is effectively a c&p of the helper above.
However I can't really see any way to DRY so:
Reviewed-by: Alex Bennée <address@hidden>
> /* Expand a vector four-operand operation. */
> void tcg_gen_gvec_4(uint32_t dofs, uint32_t aofs, uint32_t bofs, uint32_t
> cofs,
> uint32_t oprsz, uint32_t maxsz, const GVecGen4 *g)
--
Alex Bennée
- [Qemu-devel] [PATCH v2 00/29] tcg vector improvements, Richard Henderson, 2019/05/01
- [Qemu-devel] [PATCH v2 01/29] tcg: Implement tcg_gen_gvec_3i(), Richard Henderson, 2019/05/01
- Re: [Qemu-devel] [PATCH v2 01/29] tcg: Implement tcg_gen_gvec_3i(),
Alex Bennée <=
- [Qemu-devel] [PATCH v2 02/29] tcg: Do not recreate INDEX_op_neg_vec unless supported, Richard Henderson, 2019/05/01
- [Qemu-devel] [PATCH v2 03/29] tcg: Allow add_vec, sub_vec, neg_vec, not_vec to be expanded, Richard Henderson, 2019/05/01
- [Qemu-devel] [PATCH v2 04/29] tcg: Specify optional vector requirements with a list, Richard Henderson, 2019/05/01
- [Qemu-devel] [PATCH v2 05/29] tcg: Assert fixed_reg is read-only, Richard Henderson, 2019/05/01
- [Qemu-devel] [PATCH v2 06/29] tcg: Return bool success from tcg_out_mov, Richard Henderson, 2019/05/01