[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH v3 46/47] target/loongarch: Implement xvld xvst
From: |
Song Gao |
Subject: |
[PATCH v3 46/47] target/loongarch: Implement xvld xvst |
Date: |
Fri, 14 Jul 2023 16:46:14 +0800 |
This patch includes:
- XVLD[X], XVST[X];
- XVLDREPL.{B/H/W/D};
- XVSTELM.{B/H/W/D}.
Signed-off-by: Song Gao <gaosong@loongson.cn>
---
target/loongarch/disas.c | 24 ++++++
target/loongarch/insn_trans/trans_lasx.c.inc | 80 ++++++++++++++++++++
target/loongarch/insn_trans/trans_lsx.c.inc | 54 ++++++-------
target/loongarch/insns.decode | 18 +++++
4 files changed, 149 insertions(+), 27 deletions(-)
diff --git a/target/loongarch/disas.c b/target/loongarch/disas.c
index a518c59772..e5fb362d7f 100644
--- a/target/loongarch/disas.c
+++ b/target/loongarch/disas.c
@@ -1753,6 +1753,16 @@ static void output_vvr_x(DisasContext *ctx, arg_vvr *a,
const char *mnemonic)
output(ctx, mnemonic, "x%d, x%d, r%d", a->vd, a->vj, a->rk);
}
+static void output_vrr_x(DisasContext *ctx, arg_vrr *a, const char *mnemonic)
+{
+ output(ctx, mnemonic, "x%d, r%d, r%d", a->vd, a->rj, a->rk);
+}
+
+static void output_vr_ii_x(DisasContext *ctx, arg_vr_ii *a, const char
*mnemonic)
+{
+ output(ctx, mnemonic, "x%d, r%d, 0x%x, 0x%x", a->vd, a->rj, a->imm,
a->imm2);
+}
+
INSN_LASX(xvadd_b, vvv)
INSN_LASX(xvadd_h, vvv)
INSN_LASX(xvadd_w, vvv)
@@ -2596,3 +2606,17 @@ INSN_LASX(xvextrins_d, vv_i)
INSN_LASX(xvextrins_w, vv_i)
INSN_LASX(xvextrins_h, vv_i)
INSN_LASX(xvextrins_b, vv_i)
+
+INSN_LASX(xvld, vr_i)
+INSN_LASX(xvst, vr_i)
+INSN_LASX(xvldx, vrr)
+INSN_LASX(xvstx, vrr)
+
+INSN_LASX(xvldrepl_d, vr_i)
+INSN_LASX(xvldrepl_w, vr_i)
+INSN_LASX(xvldrepl_h, vr_i)
+INSN_LASX(xvldrepl_b, vr_i)
+INSN_LASX(xvstelm_d, vr_ii)
+INSN_LASX(xvstelm_w, vr_ii)
+INSN_LASX(xvstelm_h, vr_ii)
+INSN_LASX(xvstelm_b, vr_ii)
diff --git a/target/loongarch/insn_trans/trans_lasx.c.inc
b/target/loongarch/insn_trans/trans_lasx.c.inc
index cf53c12543..b8b112d7cc 100644
--- a/target/loongarch/insn_trans/trans_lasx.c.inc
+++ b/target/loongarch/insn_trans/trans_lasx.c.inc
@@ -926,3 +926,83 @@ TRANS(xvextrins_b, gen_vv_i, 32, gen_helper_vextrins_b)
TRANS(xvextrins_h, gen_vv_i, 32, gen_helper_vextrins_h)
TRANS(xvextrins_w, gen_vv_i, 32, gen_helper_vextrins_w)
TRANS(xvextrins_d, gen_vv_i, 32, gen_helper_vextrins_d)
+
+static bool gen_lasx_memory(DisasContext *ctx, arg_vr_i *a,
+ void (*func)(DisasContext *, int, TCGv))
+{
+ TCGv addr = gpr_src(ctx, a->rj, EXT_NONE);
+ TCGv temp = NULL;
+
+ CHECK_VEC;
+
+ if (a->imm) {
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, addr, a->imm);
+ addr = temp;
+ }
+
+ func(ctx, a->vd, addr);
+ return true;
+}
+
+static void gen_xvld(DisasContext *ctx, int vreg, TCGv addr)
+{
+ int i;
+ TCGv temp = tcg_temp_new();
+ TCGv dest = tcg_temp_new();
+
+ tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUQ);
+ set_vreg64(dest, vreg, 0);
+
+ for (i = 1; i < 4; i++) {
+ tcg_gen_addi_tl(temp, addr, 8 * i);
+ tcg_gen_qemu_ld_i64(dest, temp, ctx->mem_idx, MO_TEUQ);
+ set_vreg64(dest, vreg, i);
+ }
+}
+
+static void gen_xvst(DisasContext * ctx, int vreg, TCGv addr)
+{
+ int i;
+ TCGv temp = tcg_temp_new();
+ TCGv dest = tcg_temp_new();
+
+ get_vreg64(dest, vreg, 0);
+ tcg_gen_qemu_st_i64(dest, addr, ctx->mem_idx, MO_TEUQ);
+
+ for (i = 1; i < 4; i++) {
+ tcg_gen_addi_tl(temp, addr, 8 * i);
+ get_vreg64(dest, vreg, i);
+ tcg_gen_qemu_st_i64(dest, temp, ctx->mem_idx, MO_TEUQ);
+ }
+}
+
+TRANS(xvld, gen_lasx_memory, gen_xvld)
+TRANS(xvst, gen_lasx_memory, gen_xvst)
+
+static bool gen_lasx_memoryx(DisasContext *ctx, arg_vrr *a,
+ void (*func)(DisasContext*, int, TCGv))
+{
+ TCGv src1 = gpr_src(ctx, a->rj, EXT_NONE);
+ TCGv src2 = gpr_src(ctx, a->rk, EXT_NONE);
+ TCGv addr = tcg_temp_new();
+
+ CHECK_VEC;
+
+ tcg_gen_add_tl(addr, src1, src2);
+ func(ctx, a->vd, addr);
+
+ return true;
+}
+
+TRANS(xvldx, gen_lasx_memoryx, gen_xvld)
+TRANS(xvstx, gen_lasx_memoryx, gen_xvst)
+
+TRANS(xvldrepl_b, do_vldrepl, 32, MO_8)
+TRANS(xvldrepl_h, do_vldrepl, 32, MO_16)
+TRANS(xvldrepl_w, do_vldrepl, 32, MO_32)
+TRANS(xvldrepl_d, do_vldrepl, 32, MO_64)
+VSTELM(xvstelm_b, MO_8, B)
+VSTELM(xvstelm_h, MO_16, H)
+VSTELM(xvstelm_w, MO_32, W)
+VSTELM(xvstelm_d, MO_64, D)
diff --git a/target/loongarch/insn_trans/trans_lsx.c.inc
b/target/loongarch/insn_trans/trans_lsx.c.inc
index d2ea70d8f0..8fa721eab3 100644
--- a/target/loongarch/insn_trans/trans_lsx.c.inc
+++ b/target/loongarch/insn_trans/trans_lsx.c.inc
@@ -4430,33 +4430,33 @@ static bool trans_vstx(DisasContext *ctx, arg_vrr *a)
return true;
}
-#define VLDREPL(NAME, MO) \
-static bool trans_## NAME (DisasContext *ctx, arg_vr_i *a) \
-{ \
- TCGv addr, temp; \
- TCGv_i64 val; \
- \
- CHECK_VEC; \
- \
- addr = gpr_src(ctx, a->rj, EXT_NONE); \
- val = tcg_temp_new_i64(); \
- \
- if (a->imm) { \
- temp = tcg_temp_new(); \
- tcg_gen_addi_tl(temp, addr, a->imm); \
- addr = temp; \
- } \
- \
- tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, MO); \
- tcg_gen_gvec_dup_i64(MO, vec_full_offset(a->vd), 16, ctx->vl/8, val); \
- \
- return true; \
-}
-
-VLDREPL(vldrepl_b, MO_8)
-VLDREPL(vldrepl_h, MO_16)
-VLDREPL(vldrepl_w, MO_32)
-VLDREPL(vldrepl_d, MO_64)
+static bool do_vldrepl(DisasContext *ctx, arg_vr_i * a,
+ uint32_t oprsz, MemOp mop)
+{
+ TCGv addr, temp;
+ TCGv_i64 val;
+
+ CHECK_VEC;
+
+ addr = gpr_src(ctx, a->rj, EXT_NONE);
+ val = tcg_temp_new_i64();
+
+ if (a->imm) {
+ temp = tcg_temp_new();
+ tcg_gen_addi_tl(temp, addr, a->imm);
+ addr = temp;
+ }
+
+ tcg_gen_qemu_ld_i64(val, addr, ctx->mem_idx, mop);
+ tcg_gen_gvec_dup_i64(mop, vec_full_offset(a->vd), oprsz, ctx->vl / 8, val);
+
+ return true;
+}
+
+TRANS(vldrepl_b, do_vldrepl, 16, MO_8)
+TRANS(vldrepl_h, do_vldrepl, 16, MO_16)
+TRANS(vldrepl_w, do_vldrepl, 16, MO_32)
+TRANS(vldrepl_d, do_vldrepl, 16, MO_64)
#define VSTELM(NAME, MO, E) \
static bool trans_## NAME (DisasContext *ctx, arg_vr_ii *a) \
diff --git a/target/loongarch/insns.decode b/target/loongarch/insns.decode
index 64b67ee9ac..64b308f9fb 100644
--- a/target/loongarch/insns.decode
+++ b/target/loongarch/insns.decode
@@ -550,6 +550,10 @@ dbcl 0000 00000010 10101 ...............
@i15
@vr_i8i2 .... ........ imm2:2 ........ rj:5 vd:5 &vr_ii imm=%i8s2
@vr_i8i3 .... ....... imm2:3 ........ rj:5 vd:5 &vr_ii imm=%i8s1
@vr_i8i4 .... ...... imm2:4 imm:s8 rj:5 vd:5 &vr_ii
+@vr_i8i2x .... ........ imm2:2 ........ rj:5 vd:5 &vr_ii imm=%i8s3
+@vr_i8i3x .... ....... imm2:3 ........ rj:5 vd:5 &vr_ii imm=%i8s2
+@vr_i8i4x .... ...... imm2:4 ........ rj:5 vd:5 &vr_ii imm=%i8s1
+@vr_i8i5x .... ..... imm2:5 imm:s8 rj:5 vd:5 &vr_ii
@vrr .... ........ ..... rk:5 rj:5 vd:5 &vrr
@v_i13 .... ........ .. imm:13 vd:5 &v_i
@@ -2060,3 +2064,17 @@ xvextrins_d 0111 01111000 00 ........ ..... .....
@vv_ui8
xvextrins_w 0111 01111000 01 ........ ..... ..... @vv_ui8
xvextrins_h 0111 01111000 10 ........ ..... ..... @vv_ui8
xvextrins_b 0111 01111000 11 ........ ..... ..... @vv_ui8
+
+xvld 0010 110010 ............ ..... ..... @vr_i12
+xvst 0010 110011 ............ ..... ..... @vr_i12
+xvldx 0011 10000100 10000 ..... ..... ..... @vrr
+xvstx 0011 10000100 11000 ..... ..... ..... @vrr
+
+xvldrepl_d 0011 00100001 0 ......... ..... ..... @vr_i9
+xvldrepl_w 0011 00100010 .......... ..... ..... @vr_i10
+xvldrepl_h 0011 0010010 ........... ..... ..... @vr_i11
+xvldrepl_b 0011 001010 ............ ..... ..... @vr_i12
+xvstelm_d 0011 00110001 .. ........ ..... ..... @vr_i8i2x
+xvstelm_w 0011 0011001 ... ........ ..... ..... @vr_i8i3x
+xvstelm_h 0011 001101 .... ........ ..... ..... @vr_i8i4x
+xvstelm_b 0011 00111 ..... ........ ..... ..... @vr_i8i5x
--
2.39.1
- [PATCH v3 42/47] target/loongarch: Implement xvinsgr2vr xvpickve2gr, (continued)
- [PATCH v3 42/47] target/loongarch: Implement xvinsgr2vr xvpickve2gr, Song Gao, 2023/07/14
- [PATCH v3 31/47] target/loongarch: Implement xvssrln xvssran, Song Gao, 2023/07/14
- [PATCH v3 47/47] target/loongarch: CPUCFG support LASX, Song Gao, 2023/07/14
- [PATCH v3 36/47] target/loongarch: Implement xvfrstp, Song Gao, 2023/07/14
- [PATCH v3 37/47] target/loongarch: Implement LASX fpu arith instructions, Song Gao, 2023/07/14
- [PATCH v3 29/47] target/loongarch: Implement xvsrln xvsran, Song Gao, 2023/07/14
- [PATCH v3 41/47] target/loongarch: Implement xvbitsel xvset, Song Gao, 2023/07/14
- [PATCH v3 39/47] target/loongarch: Implement xvseq xvsle xvslt, Song Gao, 2023/07/14
- [PATCH v3 44/47] target/loongarch: Implement xvpack xvpick xvilv{l/h}, Song Gao, 2023/07/14
- [PATCH v3 45/47] target/loongarch: Implement xvshuf xvperm{i} xvshuf4i xvextrins, Song Gao, 2023/07/14
- [PATCH v3 46/47] target/loongarch: Implement xvld xvst,
Song Gao <=
- [PATCH v3 21/47] target/loongarch: Implement vext2xv, Song Gao, 2023/07/14
- [PATCH v3 15/47] target/loongarch: Implement xvmax/xvmin, Song Gao, 2023/07/14
- [PATCH v3 11/47] target/loongarch: Implement xvaddw/xvsubw, Song Gao, 2023/07/14
- [PATCH v3 10/47] target/loongarch: Implement xvhaddw/xvhsubw, Song Gao, 2023/07/14
- [PATCH v3 13/47] target/loongarch: Implement xvabsd, Song Gao, 2023/07/14
- [PATCH v3 33/47] target/loongarch: Implement xvclo xvclz, Song Gao, 2023/07/14
- [PATCH v3 32/47] target/loongarch: Implement xvssrlrn xvssrarn, Song Gao, 2023/07/14
- [PATCH v3 38/47] target/loongarch: Implement LASX fpu fcvt instructions, Song Gao, 2023/07/14
- [PATCH v3 40/47] target/loongarch: Implement xvfcmp, Song Gao, 2023/07/14
- [PATCH v3 35/47] target/loongarch: Implement xvbitclr xvbitset xvbitrev, Song Gao, 2023/07/14