[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 29/38] target/riscv: RV64 Only SIMD 32-bit Add/Subtract Instructi
From: |
LIU Zhiwei |
Subject: |
[PATCH 29/38] target/riscv: RV64 Only SIMD 32-bit Add/Subtract Instructions |
Date: |
Fri, 12 Feb 2021 23:02:47 +0800 |
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
---
target/riscv/helper.h | 30 +++
target/riscv/insn32-64.decode | 32 +++
target/riscv/insn_trans/trans_rvp.c.inc | 67 ++++++
target/riscv/packed_helper.c | 278 ++++++++++++++++++++++++
4 files changed, 407 insertions(+)
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 7b3f41866e..0ade207de6 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1396,3 +1396,33 @@ DEF_HELPER_3(sra_u, tl, env, tl, tl)
DEF_HELPER_3(bitrev, tl, env, tl, tl)
DEF_HELPER_3(wext, tl, env, i64, tl)
DEF_HELPER_4(bpick, tl, env, tl, tl, tl)
+#ifdef TARGET_RISCV64
+DEF_HELPER_3(radd32, tl, env, tl, tl)
+DEF_HELPER_3(uradd32, tl, env, tl, tl)
+DEF_HELPER_3(kadd32, tl, env, tl, tl)
+DEF_HELPER_3(ukadd32, tl, env, tl, tl)
+DEF_HELPER_3(rsub32, tl, env, tl, tl)
+DEF_HELPER_3(ursub32, tl, env, tl, tl)
+DEF_HELPER_3(ksub32, tl, env, tl, tl)
+DEF_HELPER_3(uksub32, tl, env, tl, tl)
+DEF_HELPER_3(cras32, tl, env, tl, tl)
+DEF_HELPER_3(rcras32, tl, env, tl, tl)
+DEF_HELPER_3(urcras32, tl, env, tl, tl)
+DEF_HELPER_3(kcras32, tl, env, tl, tl)
+DEF_HELPER_3(ukcras32, tl, env, tl, tl)
+DEF_HELPER_3(crsa32, tl, env, tl, tl)
+DEF_HELPER_3(rcrsa32, tl, env, tl, tl)
+DEF_HELPER_3(urcrsa32, tl, env, tl, tl)
+DEF_HELPER_3(kcrsa32, tl, env, tl, tl)
+DEF_HELPER_3(ukcrsa32, tl, env, tl, tl)
+DEF_HELPER_3(stas32, tl, env, tl, tl)
+DEF_HELPER_3(rstas32, tl, env, tl, tl)
+DEF_HELPER_3(urstas32, tl, env, tl, tl)
+DEF_HELPER_3(kstas32, tl, env, tl, tl)
+DEF_HELPER_3(ukstas32, tl, env, tl, tl)
+DEF_HELPER_3(stsa32, tl, env, tl, tl)
+DEF_HELPER_3(rstsa32, tl, env, tl, tl)
+DEF_HELPER_3(urstsa32, tl, env, tl, tl)
+DEF_HELPER_3(kstsa32, tl, env, tl, tl)
+DEF_HELPER_3(ukstsa32, tl, env, tl, tl)
+#endif
diff --git a/target/riscv/insn32-64.decode b/target/riscv/insn32-64.decode
index 1094172210..66eec1a44a 100644
--- a/target/riscv/insn32-64.decode
+++ b/target/riscv/insn32-64.decode
@@ -82,3 +82,35 @@ fmv_d_x 1111001 00000 ..... 000 ..... 1010011 @r2
hlv_wu 0110100 00001 ..... 100 ..... 1110011 @r2
hlv_d 0110110 00000 ..... 100 ..... 1110011 @r2
hsv_d 0110111 ..... ..... 100 00000 1110011 @r2_s
+
+# *** RV64P Standard Extension (in addition to RV32P) ***
+add32 0100000 ..... ..... 010 ..... 1111111 @r
+radd32 0000000 ..... ..... 010 ..... 1111111 @r
+uradd32 0010000 ..... ..... 010 ..... 1111111 @r
+kadd32 0001000 ..... ..... 010 ..... 1111111 @r
+ukadd32 0011000 ..... ..... 010 ..... 1111111 @r
+sub32 0100001 ..... ..... 010 ..... 1111111 @r
+rsub32 0000001 ..... ..... 010 ..... 1111111 @r
+ursub32 0010001 ..... ..... 010 ..... 1111111 @r
+ksub32 0001001 ..... ..... 010 ..... 1111111 @r
+uksub32 0011001 ..... ..... 010 ..... 1111111 @r
+cras32 0100010 ..... ..... 010 ..... 1111111 @r
+rcras32 0000010 ..... ..... 010 ..... 1111111 @r
+urcras32 0010010 ..... ..... 010 ..... 1111111 @r
+kcras32 0001010 ..... ..... 010 ..... 1111111 @r
+ukcras32 0011010 ..... ..... 010 ..... 1111111 @r
+crsa32 0100011 ..... ..... 010 ..... 1111111 @r
+rcrsa32 0000011 ..... ..... 010 ..... 1111111 @r
+urcrsa32 0010011 ..... ..... 010 ..... 1111111 @r
+kcrsa32 0001011 ..... ..... 010 ..... 1111111 @r
+ukcrsa32 0011011 ..... ..... 010 ..... 1111111 @r
+stas32 1111000 ..... ..... 010 ..... 1111111 @r
+rstas32 1011000 ..... ..... 010 ..... 1111111 @r
+urstas32 1101000 ..... ..... 010 ..... 1111111 @r
+kstas32 1100000 ..... ..... 010 ..... 1111111 @r
+ukstas32 1110000 ..... ..... 010 ..... 1111111 @r
+stsa32 1111001 ..... ..... 010 ..... 1111111 @r
+rstsa32 1011001 ..... ..... 010 ..... 1111111 @r
+urstsa32 1101001 ..... ..... 010 ..... 1111111 @r
+kstsa32 1100001 ..... ..... 010 ..... 1111111 @r
+ukstsa32 1110001 ..... ..... 010 ..... 1111111 @r
diff --git a/target/riscv/insn_trans/trans_rvp.c.inc
b/target/riscv/insn_trans/trans_rvp.c.inc
index 8c47fd562b..ea673b3aca 100644
--- a/target/riscv/insn_trans/trans_rvp.c.inc
+++ b/target/riscv/insn_trans/trans_rvp.c.inc
@@ -1090,3 +1090,70 @@ static bool trans_msubr32(DisasContext *ctx, arg_r *a)
tcg_temp_free_i32(w3);
return true;
}
+
+#ifdef TARGET_RISCV64
+/*
+ *** RV64 Only Instructions
+ */
+/* RV64 Only) SIMD 32-bit Add/Subtract Instructions */
+static void tcg_gen_simd_add32(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_andi_i64(t1, a, ~0xffffffff);
+ tcg_gen_add_i64(t2, a, b);
+ tcg_gen_add_i64(t1, t1, b);
+ tcg_gen_deposit_i64(d, t1, t2, 0, 32);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+GEN_RVP_R_INLINE(add32, add, 2, trans_add);
+
+static void tcg_gen_simd_sub32(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
+{
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+
+ tcg_gen_andi_i64(t1, b, ~0xffffffff);
+ tcg_gen_sub_i64(t2, a, b);
+ tcg_gen_sub_i64(t1, a, t1);
+ tcg_gen_deposit_i64(d, t1, t2, 0, 32);
+
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+}
+
+GEN_RVP_R_INLINE(sub32, sub, 2, trans_sub);
+
+GEN_RVP_R_OOL(radd32);
+GEN_RVP_R_OOL(uradd32);
+GEN_RVP_R_OOL(kadd32);
+GEN_RVP_R_OOL(ukadd32);
+GEN_RVP_R_OOL(rsub32);
+GEN_RVP_R_OOL(ursub32);
+GEN_RVP_R_OOL(ksub32);
+GEN_RVP_R_OOL(uksub32);
+GEN_RVP_R_OOL(cras32);
+GEN_RVP_R_OOL(rcras32);
+GEN_RVP_R_OOL(urcras32);
+GEN_RVP_R_OOL(kcras32);
+GEN_RVP_R_OOL(ukcras32);
+GEN_RVP_R_OOL(crsa32);
+GEN_RVP_R_OOL(rcrsa32);
+GEN_RVP_R_OOL(urcrsa32);
+GEN_RVP_R_OOL(kcrsa32);
+GEN_RVP_R_OOL(ukcrsa32);
+GEN_RVP_R_OOL(stas32);
+GEN_RVP_R_OOL(rstas32);
+GEN_RVP_R_OOL(urstas32);
+GEN_RVP_R_OOL(kstas32);
+GEN_RVP_R_OOL(ukstas32);
+GEN_RVP_R_OOL(stsa32);
+GEN_RVP_R_OOL(rstsa32);
+GEN_RVP_R_OOL(urstsa32);
+GEN_RVP_R_OOL(kstsa32);
+GEN_RVP_R_OOL(ukstsa32);
+#endif
diff --git a/target/riscv/packed_helper.c b/target/riscv/packed_helper.c
index 95e60da70b..bb56933c39 100644
--- a/target/riscv/packed_helper.c
+++ b/target/riscv/packed_helper.c
@@ -2996,3 +2996,281 @@ static inline void do_bpick(CPURISCVState *env, void
*vd, void *va,
}
RVPR_ACC(bpick, 1, sizeof(target_ulong));
+
+/*
+ *** RV64 Only Instructions
+ */
+/* (RV64 Only) SIMD 32-bit Add/Subtract Instructions */
+#ifdef TARGET_RISCV64
+static inline void do_radd32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint16_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[i] = hadd32(a[i], b[i]);
+}
+
+RVPR(radd32, 1, 4);
+
+static inline void do_uradd32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint16_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[i] = haddu32(a[i], b[i]);
+}
+
+RVPR(uradd32, 1, 4);
+
+static inline void do_kadd32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint16_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[i] = sadd32(env, 0, a[i], b[i]);
+}
+
+RVPR(kadd32, 1, 4);
+
+static inline void do_ukadd32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint16_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[i] = saddu32(env, 0, a[i], b[i]);
+}
+
+RVPR(ukadd32, 1, 4);
+
+static inline void do_rsub32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint16_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[i] = hsub32(a[i], b[i]);
+}
+
+RVPR(rsub32, 1, 4);
+
+static inline void do_ursub32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint16_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[i] = hsubu64(a[i], b[i]);
+}
+
+RVPR(ursub32, 1, 4);
+
+static inline void do_ksub32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint16_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[i] = ssub32(env, 0, a[i], b[i]);
+}
+
+RVPR(ksub32, 1, 4);
+
+static inline void do_uksub32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint16_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[i] = ssubu32(env, 0, a[i], b[i]);
+}
+
+RVPR(uksub32, 1, 4);
+
+static inline void do_cras32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = a[H4(i)] - b[H4(i + 1)];
+ d[H4(i + 1)] = a[H4(i + 1)] + b[H4(i)];
+}
+
+RVPR(cras32, 2, 4);
+
+static inline void do_rcras32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = hsub32(a[H4(i)], b[H4(i + 1)]);
+ d[H4(i + 1)] = hadd32(a[H4(i + 1)], b[H4(i)]);
+}
+
+RVPR(rcras32, 2, 4);
+
+static inline void do_urcras32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = hsubu64(a[H4(i)], b[H4(i + 1)]);
+ d[H4(i + 1)] = haddu32(a[H4(i + 1)], b[H4(i)]);
+}
+
+RVPR(urcras32, 2, 4);
+
+static inline void do_kcras32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = ssub32(env, 0, a[H4(i)], b[H4(i + 1)]);
+ d[H4(i + 1)] = sadd32(env, 0, a[H4(i + 1)], b[H4(i)]);
+}
+
+RVPR(kcras32, 2, 4);
+
+static inline void do_ukcras32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = ssubu32(env, 0, a[H4(i)], b[H4(i + 1)]);
+ d[H4(i + 1)] = saddu32(env, 0, a[H4(i + 1)], b[H4(i)]);
+}
+
+RVPR(ukcras32, 2, 4);
+
+static inline void do_crsa32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = a[H4(i)] + b[H4(i + 1)];
+ d[H4(i + 1)] = a[H4(i + 1)] - b[H4(i)];
+}
+
+RVPR(crsa32, 2, 4);
+
+static inline void do_rcrsa32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = hadd32(a[H4(i)], b[H4(i + 1)]);
+ d[H4(i + 1)] = hsub32(a[H4(i + 1)], b[H4(i)]);
+}
+
+RVPR(rcrsa32, 2, 4);
+
+static inline void do_urcrsa32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = haddu32(a[H4(i)], b[H4(i + 1)]);
+ d[H4(i + 1)] = hsubu64(a[H4(i + 1)], b[H4(i)]);
+}
+
+RVPR(urcrsa32, 2, 4);
+
+static inline void do_kcrsa32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = sadd32(env, 0, a[H4(i)], b[H4(i + 1)]);
+ d[H4(i + 1)] = ssub32(env, 0, a[H4(i + 1)], b[H4(i)]);
+}
+
+RVPR(kcrsa32, 2, 4);
+
+static inline void do_ukcrsa32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = saddu32(env, 0, a[H4(i)], b[H4(i + 1)]);
+ d[H4(i + 1)] = ssubu32(env, 0, a[H4(i + 1)], b[H4(i)]);
+}
+
+RVPR(ukcrsa32, 2, 4);
+
+static inline void do_stas32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = a[H4(i)] - b[H4(i)];
+ d[H4(i + 1)] = a[H4(i + 1)] + b[H4(i + 1)];
+}
+
+RVPR(stas32, 2, 4);
+
+static inline void do_rstas32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = hsub32(a[H4(i)], b[H4(i)]);
+ d[H4(i + 1)] = hadd32(a[H4(i + 1)], b[H4(i + 1)]);
+}
+
+RVPR(rstas32, 2, 4);
+
+static inline void do_urstas32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = hsubu64(a[H4(i)], b[H4(i)]);
+ d[H4(i + 1)] = haddu32(a[H4(i + 1)], b[H4(i + 1)]);
+}
+
+RVPR(urstas32, 2, 4);
+
+static inline void do_kstas32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = ssub32(env, 0, a[H4(i)], b[H4(i)]);
+ d[H4(i + 1)] = sadd32(env, 0, a[H4(i + 1)], b[H4(i + 1)]);
+}
+
+RVPR(kstas32, 2, 4);
+
+static inline void do_ukstas32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = ssubu32(env, 0, a[H4(i)], b[H4(i)]);
+ d[H4(i + 1)] = saddu32(env, 0, a[H4(i + 1)], b[H4(i + 1)]);
+}
+
+RVPR(ukstas32, 2, 4);
+
+static inline void do_stsa32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = a[H4(i)] + b[H4(i)];
+ d[H4(i + 1)] = a[H4(i + 1)] - b[H4(i + 1)];
+}
+
+RVPR(stsa32, 2, 4);
+
+static inline void do_rstsa32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = hadd32(a[H4(i)], b[H4(i)]);
+ d[H4(i + 1)] = hsub32(a[H4(i + 1)], b[H4(i + 1)]);
+}
+
+RVPR(rstsa32, 2, 4);
+
+static inline void do_urstsa32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = haddu32(a[H4(i)], b[H4(i)]);
+ d[H4(i + 1)] = hsubu64(a[H4(i + 1)], b[H4(i + 1)]);
+}
+
+RVPR(urstsa32, 2, 4);
+
+static inline void do_kstsa32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ int32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = sadd32(env, 0, a[H4(i)], b[H4(i)]);
+ d[H4(i + 1)] = ssub32(env, 0, a[H4(i + 1)], b[H4(i + 1)]);
+}
+
+RVPR(kstsa32, 2, 4);
+
+static inline void do_ukstsa32(CPURISCVState *env, void *vd, void *va,
+ void *vb, uint8_t i)
+{
+ uint32_t *d = vd, *a = va, *b = vb;
+ d[H4(i)] = saddu32(env, 0, a[H4(i)], b[H4(i)]);
+ d[H4(i + 1)] = ssubu32(env, 0, a[H4(i + 1)], b[H4(i + 1)]);
+}
+
+RVPR(ukstsa32, 2, 4);
+#endif
--
2.17.1
- [PATCH 19/38] target/riscv: Signed 16-bit Multiply 64-bit Add/Subtract Instructions, (continued)
- [PATCH 19/38] target/riscv: Signed 16-bit Multiply 64-bit Add/Subtract Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 20/38] target/riscv: Partial-SIMD Miscellaneous Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 21/38] target/riscv: 8-bit Multiply with 32-bit Add Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 22/38] target/riscv: 64-bit Add/Subtract Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 23/38] target/riscv: 32-bit Multiply 64-bit Add/Subtract Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 24/38] target/riscv: Signed 16-bit Multiply with 64-bit Add/Subtract Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 25/38] target/riscv: Non-SIMD Q15 saturation ALU Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 26/38] target/riscv: Non-SIMD Q31 saturation ALU Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 27/38] target/riscv: 32-bit Computation Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 28/38] target/riscv: Non-SIMD Miscellaneous Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 29/38] target/riscv: RV64 Only SIMD 32-bit Add/Subtract Instructions,
LIU Zhiwei <=
- [PATCH 30/38] target/riscv: RV64 Only SIMD 32-bit Shift Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 31/38] target/riscv: RV64 Only SIMD 32-bit Miscellaneous Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 32/38] target/riscv: RV64 Only SIMD Q15 saturating Multiply Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 33/38] target/riscv: RV64 Only 32-bit Multiply Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 34/38] target/riscv: RV64 Only 32-bit Multiply & Add Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 35/38] target/riscv: RV64 Only 32-bit Parallel Multiply & Add Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 36/38] target/riscv: RV64 Only Non-SIMD 32-bit Shift Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 37/38] target/riscv: RV64 Only 32-bit Packing Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 38/38] target/riscv: configure and turn on packed extension from command line, LIU Zhiwei, 2021/02/12