[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 23/38] target/riscv: 32-bit Multiply 64-bit Add/Subtract Instruct
From: |
LIU Zhiwei |
Subject: |
[PATCH 23/38] target/riscv: 32-bit Multiply 64-bit Add/Subtract Instructions |
Date: |
Fri, 12 Feb 2021 23:02:41 +0800 |
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com>
---
target/riscv/helper.h | 9 ++
target/riscv/insn32.decode | 9 ++
target/riscv/insn_trans/trans_rvp.c.inc | 63 ++++++++++
target/riscv/packed_helper.c | 155 ++++++++++++++++++++++++
4 files changed, 236 insertions(+)
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index cce4c8cbcc..4d89417287 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -1338,3 +1338,12 @@ DEF_HELPER_3(rsub64, i64, env, i64, i64)
DEF_HELPER_3(ursub64, i64, env, i64, i64)
DEF_HELPER_3(ksub64, i64, env, i64, i64)
DEF_HELPER_3(uksub64, i64, env, i64, i64)
+
+DEF_HELPER_4(smar64, i64, env, tl, tl, i64)
+DEF_HELPER_4(smsr64, i64, env, tl, tl, i64)
+DEF_HELPER_4(umar64, i64, env, tl, tl, i64)
+DEF_HELPER_4(umsr64, i64, env, tl, tl, i64)
+DEF_HELPER_4(kmar64, i64, env, tl, tl, i64)
+DEF_HELPER_4(kmsr64, i64, env, tl, tl, i64)
+DEF_HELPER_4(ukmar64, i64, env, tl, tl, i64)
+DEF_HELPER_4(ukmsr64, i64, env, tl, tl, i64)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index b52e1c1142..60b8b3617b 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -808,3 +808,12 @@ rsub64 1000001 ..... ..... 001 ..... 1111111 @r
ursub64 1010001 ..... ..... 001 ..... 1111111 @r
ksub64 1001001 ..... ..... 001 ..... 1111111 @r
uksub64 1011001 ..... ..... 001 ..... 1111111 @r
+
+smar64 1000010 ..... ..... 001 ..... 1111111 @r
+smsr64 1000011 ..... ..... 001 ..... 1111111 @r
+umar64 1010010 ..... ..... 001 ..... 1111111 @r
+umsr64 1010011 ..... ..... 001 ..... 1111111 @r
+kmar64 1001010 ..... ..... 001 ..... 1111111 @r
+kmsr64 1001011 ..... ..... 001 ..... 1111111 @r
+ukmar64 1011010 ..... ..... 001 ..... 1111111 @r
+ukmsr64 1011011 ..... ..... 001 ..... 1111111 @r
diff --git a/target/riscv/insn_trans/trans_rvp.c.inc
b/target/riscv/insn_trans/trans_rvp.c.inc
index 94e5e09425..3e62024aac 100644
--- a/target/riscv/insn_trans/trans_rvp.c.inc
+++ b/target/riscv/insn_trans/trans_rvp.c.inc
@@ -742,3 +742,66 @@ GEN_RVP_R_D64_S64_S64_OOL(rsub64);
GEN_RVP_R_D64_S64_S64_OOL(ursub64);
GEN_RVP_R_D64_S64_S64_OOL(ksub64);
GEN_RVP_R_D64_S64_S64_OOL(uksub64);
+
+/* 32-bit Multiply with 64-bit Add/Subtract Instructions */
+
+/* Function to accumulate 64bit destination register */
+static bool
+r_d64_acc_ool(DisasContext *ctx, arg_r *a,
+ void (* fn)(TCGv_i64, TCGv_ptr, TCGv, TCGv, TCGv_i64))
+{
+#ifdef TARGET_RISCV64
+ return r_acc_ool(ctx, a, fn);
+#else
+ TCGv_i32 src1, src2;
+ TCGv_i64 dst, src3;
+ TCGv_i32 d0, d1;
+
+ if (!has_ext(ctx, RVP) || !ctx->ext_p64) {
+ return false;
+ }
+
+ src1 = tcg_temp_new_i32();
+ src2 = tcg_temp_new_i32();
+ src3 = tcg_temp_new_i64();
+ dst = tcg_temp_new_i64();
+ d0 = tcg_temp_new_i32();
+ d1 = tcg_temp_new_i32();
+
+ gen_get_gpr(src1, a->rs1);
+ gen_get_gpr(src2, a->rs2);
+ gen_get_gpr(d0, a->rd);
+ gen_get_gpr(d1, a->rd + 1);
+ tcg_gen_concat_i32_i64(src3, d0, d1);
+
+ fn(dst, cpu_env, src1, src2, src3);
+
+ tcg_gen_extrl_i64_i32(d0, dst);
+ tcg_gen_extrh_i64_i32(d1, dst);
+ gen_set_gpr(a->rd, d0);
+ gen_set_gpr(a->rd + 1, d1);
+
+ tcg_temp_free_i32(d0);
+ tcg_temp_free_i32(d1);
+ tcg_temp_free_i32(src1);
+ tcg_temp_free_i32(src2);
+ tcg_temp_free_i64(src3);
+ tcg_temp_free_i64(dst);
+ return true;
+#endif
+}
+
+#define GEN_RVP_R_D64_ACC_OOL(NAME) \
+static bool trans_##NAME(DisasContext *s, arg_r *a) \
+{ \
+ return r_d64_acc_ool(s, a, gen_helper_##NAME); \
+}
+
+GEN_RVP_R_D64_ACC_OOL(smar64);
+GEN_RVP_R_D64_ACC_OOL(smsr64);
+GEN_RVP_R_D64_ACC_OOL(umar64);
+GEN_RVP_R_D64_ACC_OOL(umsr64);
+GEN_RVP_R_D64_ACC_OOL(kmar64);
+GEN_RVP_R_D64_ACC_OOL(kmsr64);
+GEN_RVP_R_D64_ACC_OOL(ukmar64);
+GEN_RVP_R_D64_ACC_OOL(ukmsr64);
diff --git a/target/riscv/packed_helper.c b/target/riscv/packed_helper.c
index 0629c5178b..3cbe9e51cc 100644
--- a/target/riscv/packed_helper.c
+++ b/target/riscv/packed_helper.c
@@ -2229,3 +2229,158 @@ static inline void do_uksub64(CPURISCVState *env, void
*vd, void *va,
}
RVPR64_64_64(uksub64, 1, 8);
+
+/* 32-bit Multiply with 64-bit Add/Subtract Instructions */
+static inline uint64_t
+rvpr64_acc(CPURISCVState *env, target_ulong a,
+ target_ulong b, uint64_t c,
+ uint8_t step, uint8_t size, PackedFn4i *fn)
+{
+ int i, passes = sizeof(target_ulong) / size;
+ uint64_t result = 0;
+
+ for (i = 0; i < passes; i += step) {
+ fn(env, &result, &a, &b, &c, i);
+ }
+ return result;
+}
+
+#define RVPR64_ACC(NAME, STEP, SIZE) \
+uint64_t HELPER(NAME)(CPURISCVState *env, target_ulong a, \
+ target_ulong b, uint64_t c) \
+{ \
+ return rvpr64_acc(env, a, b, c, STEP, SIZE, (PackedFn4i *)do_##NAME);\
+}
+
+static inline void do_smar64(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int32_t *a = va, *b = vb;
+ int64_t *d = vd, *c = vc;
+ if (i == 0) {
+ *d = *c;
+ }
+ *d += (int64_t)a[H4(i)] * b[H4(i)];
+}
+
+RVPR64_ACC(smar64, 1, 4);
+
+static inline void do_smsr64(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int32_t *a = va, *b = vb;
+ int64_t *d = vd, *c = vc;
+ if (i == 0) {
+ *d = *c;
+ }
+ *d -= (int64_t)a[H4(i)] * b[H4(i)];
+}
+
+RVPR64_ACC(smsr64, 1, 4);
+
+static inline void do_umar64(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ uint32_t *a = va, *b = vb;
+ uint64_t *d = vd, *c = vc;
+ if (i == 0) {
+ *d = *c;
+ }
+ *d += (uint64_t)a[H4(i)] * b[H4(i)];
+}
+
+RVPR64_ACC(umar64, 1, 4);
+
+static inline void do_umsr64(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ uint32_t *a = va, *b = vb;
+ uint64_t *d = vd, *c = vc;
+ if (i == 0) {
+ *d = *c;
+ }
+ *d -= (uint64_t)a[H4(i)] * b[H4(i)];
+}
+
+RVPR64_ACC(umsr64, 1, 4);
+
+static inline void do_kmar64(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int32_t *a = va, *b = vb;
+ int64_t *d = vd, *c = vc;
+ int64_t m0 = (int64_t)a[H4(i)] * b[H4(i)];
+#ifdef TARGET_RISCV64
+ int64_t m1 = (int64_t)a[H4(i + 1)] * b[H4(i + 1)];
+ if (a[H4(i)] == INT32_MIN && b[H4(i)] == INT32_MIN &&
+ a[H4(i + 1)] == INT32_MIN && b[H4(i + 1)] == INT32_MIN) {
+ if (*c >= 0) {
+ *d = INT64_MAX;
+ env->vxsat = 1;
+ } else {
+ *d = sadd64(env, 0, *c + m0, m1);
+ }
+ } else {
+ *d = sadd64(env, 0, *c, m0 + m1);
+ }
+#else
+ *d = sadd64(env, 0, *c, m0);
+#endif
+}
+
+RVPR64_ACC(kmar64, 1, sizeof(target_ulong));
+
+static inline void do_kmsr64(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ int32_t *a = va, *b = vb;
+ int64_t *d = vd, *c = vc;
+
+ int64_t m0 = (int64_t)a[H4(i)] * b[H4(i)];
+#ifdef TARGET_RISCV64
+ int64_t m1 = (int64_t)a[H4(i + 1)] * b[H4(i + 1)];
+ if (a[H4(i)] == INT32_MIN && b[H4(i)] == INT32_MIN &&
+ a[H4(i + 1)] == INT32_MIN && b[H4(i + 1)] == INT32_MIN) {
+ if (*c <= 0) {
+ *d = INT64_MIN;
+ env->vxsat = 1;
+ } else {
+ *d = ssub64(env, 0, *c - m0, m1);
+ }
+ } else {
+ *d = ssub64(env, 0, *c, m0 + m1);
+ }
+#else
+ *d = ssub64(env, 0, *c, m0);
+#endif
+}
+
+RVPR64_ACC(kmsr64, 1, sizeof(target_ulong));
+
+static inline void do_ukmar64(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ uint32_t *a = va, *b = vb;
+ uint64_t *d = vd, *c = vc;
+
+ if (i == 0) {
+ *d = *c;
+ }
+ *d = saddu64(env, 0, *d, (uint64_t)a[H4(i)] * b[H4(i)]);
+}
+
+RVPR64_ACC(ukmar64, 1, 4);
+
+static inline void do_ukmsr64(CPURISCVState *env, void *vd, void *va,
+ void *vb, void *vc, uint8_t i)
+{
+ uint32_t *a = va, *b = vb;
+ uint64_t *d = vd, *c = vc;
+
+ if (i == 0) {
+ *d = *c;
+ }
+ *d = ssubu64(env, 0, *d, (uint64_t)a[i] * b[i]);
+}
+
+RVPR64_ACC(ukmsr64, 1, 4);
--
2.17.1
- [PATCH 13/38] target/riscv: SIMD 8-bit Miscellaneous Instructions, (continued)
- [PATCH 13/38] target/riscv: SIMD 8-bit Miscellaneous Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 14/38] target/riscv: 8-bit Unpacking Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 15/38] target/riscv: 16-bit Packing Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 16/38] target/riscv: Signed MSW 32x32 Multiply and Add Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 17/38] target/riscv: Signed MSW 32x16 Multiply and Add Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 18/38] target/riscv: Signed 16-bit Multiply 32-bit Add/Subtract Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 19/38] target/riscv: Signed 16-bit Multiply 64-bit Add/Subtract Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 20/38] target/riscv: Partial-SIMD Miscellaneous Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 21/38] target/riscv: 8-bit Multiply with 32-bit Add Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 22/38] target/riscv: 64-bit Add/Subtract Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 23/38] target/riscv: 32-bit Multiply 64-bit Add/Subtract Instructions,
LIU Zhiwei <=
- [PATCH 24/38] target/riscv: Signed 16-bit Multiply with 64-bit Add/Subtract Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 25/38] target/riscv: Non-SIMD Q15 saturation ALU Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 26/38] target/riscv: Non-SIMD Q31 saturation ALU Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 27/38] target/riscv: 32-bit Computation Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 28/38] target/riscv: Non-SIMD Miscellaneous Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 29/38] target/riscv: RV64 Only SIMD 32-bit Add/Subtract Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 30/38] target/riscv: RV64 Only SIMD 32-bit Shift Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 31/38] target/riscv: RV64 Only SIMD 32-bit Miscellaneous Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 32/38] target/riscv: RV64 Only SIMD Q15 saturating Multiply Instructions, LIU Zhiwei, 2021/02/12
- [PATCH 33/38] target/riscv: RV64 Only 32-bit Multiply Instructions, LIU Zhiwei, 2021/02/12