[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PATCH 51/55] target/arm: Implement MVE VADC, VSBC
From: |
Peter Maydell |
Subject: |
[PATCH 51/55] target/arm: Implement MVE VADC, VSBC |
Date: |
Mon, 7 Jun 2021 17:58:17 +0100 |
Implement the MVE VADC and VSBC insns. These perform an
add-with-carry or subtract-with-carry of the 32-bit elements in each
lane of the input vectors, where the carry-out of each add is the
carry-in of the next. The initial carry input is either 1 or is from
FPSCR.C; the carry out at the end is written back to FPSCR.C.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/helper-mve.h | 3 ++
target/arm/mve.decode | 6 ++++
target/arm/mve_helper.c | 30 +++++++++++++++++
target/arm/translate-mve.c | 69 ++++++++++++++++++++++++++++++++++++++
4 files changed, 108 insertions(+)
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index cd2cc6252f8..686e5d9a39b 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -248,6 +248,9 @@ DEF_HELPER_FLAGS_4(mve_vrhaddub, TCG_CALL_NO_WG, void, env,
ptr, ptr, ptr)
DEF_HELPER_FLAGS_4(mve_vrhadduh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
DEF_HELPER_FLAGS_4(mve_vrhadduw, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr)
+DEF_HELPER_FLAGS_5(mve_vadc, TCG_CALL_NO_WG, i32, env, ptr, ptr, ptr, i32)
+DEF_HELPER_FLAGS_5(mve_vsbc, TCG_CALL_NO_WG, i32, env, ptr, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index 6b969902df0..6a4aae7a1fc 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -30,6 +30,7 @@
&1op qd qm size
&2op qd qm qn size
&2scalar qd qn rm size
+&vadc qd qm qn i
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
# Note that both Rn and Qd are 3 bits only (no D bit)
@@ -42,6 +43,8 @@
@2op_sz28 .... .... .... .... .... .... .... .... &2op qd=%qd qm=%qm qn=%qn \
size=%size_28
+@vadc .... .... .... .... ... i:1 .... .... .... &vadc qd=%qd qm=%qm qn=%qn
+
# The _rev suffix indicates that Vn and Vm are reversed. This is
# the case for shifts. In the Arm ARM these insns are documented
# with the Vm and Vn fields in their usual places, but in the
@@ -160,6 +163,9 @@ VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0
... 1 @2op_sz28
VRHADD_S 111 0 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
VRHADD_U 111 1 1111 0 . .. ... 0 ... 0 0001 . 1 . 0 ... 0 @2op
+VADC 1110 1110 0 . 11 ... 0 ... . 1111 . 0 . 0 ... 0 @vadc
+VSBC 1111 1110 0 . 11 ... 0 ... . 1111 . 0 . 0 ... 0 @vadc
+
# Vector miscellaneous
VCLS 1111 1111 1 . 11 .. 00 ... 0 0100 01 . 0 ... 0 @1op
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index c9434479604..e07f12c8389 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -580,6 +580,36 @@ DO_2OP_U(vrshlu, DO_VRSHLU)
DO_2OP_S(vrhadds, DO_RHADD_S)
DO_2OP_U(vrhaddu, DO_RHADD_U)
+#define DO_VADC(OP, INV) \
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
+ void *vn, void *vm, uint32_t nzcv) \
+ { \
+ uint32_t *d = vd, *n = vn, *m = vm; \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ int carry = (nzcv & FPCR_C) ? 1 : 0; \
+ /* If we do no additions at all the flags are preserved */ \
+ bool updates_flags = (mask & 0x1111) != 0; \
+ for (e = 0; e < 16 / 4; e++, mask >>= 4) { \
+ uint64_t r = (uint64_t)n[H4(e)] + INV(m[H4(e)]) + carry; \
+ if (mask & 1) { \
+ carry = r >> 32; \
+ } \
+ uint64_t bytemask = mask_to_bytemask4(mask); \
+ d[H4(e)] &= ~bytemask; \
+ d[H4(e)] |= (r & bytemask); \
+ } \
+ mve_advance_vpt(env); \
+ if (updates_flags) { \
+ nzcv = carry ? FPCR_C : 0; \
+ } \
+ return nzcv; \
+ }
+
+/* VSBC differs only in inverting op2 before the additiona */
+DO_VADC(vadc, )
+DO_VADC(vsbc, DO_NOT)
+
static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool
*s)
{
if (val > max) {
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
index 9a88583385f..2ed499a6de2 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/translate-mve.c
@@ -33,6 +33,7 @@ typedef void MVEGenOneOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void MVEGenTwoOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void MVEGenTwoOpScalarFn(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
typedef void MVEGenDualAccOpFn(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_ptr,
TCGv_i64);
+typedef void MVEGenADCFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr,
TCGv_i32);
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
static inline long mve_qreg_offset(unsigned reg)
@@ -737,3 +738,71 @@ static bool trans_VPST(DisasContext *s, arg_VPST *a)
}
return true;
}
+
+static bool do_vadc(DisasContext *s, arg_vadc *a, MVEGenADCFn fn,
+ uint32_t fixed_carry)
+{
+ /*
+ * VADC and VSBC: these perform an add-with-carry or subtract-with-carry
+ * of the 32-bit elements in each lane of the input vectors, where the
+ * carry-out of each add is the carry-in of the next. The initial carry
+ * input is either fixed (for the I variant: 0 for VADCI, 1 for VSBCI,
+ * passed in as fixed_carry) or is from FPSCR.C; the carry out at the
+ * end is written back to FPSCR.C.
+ */
+
+ TCGv_ptr qd, qn, qm;
+ TCGv_i32 nzcv, fpscr;
+
+ if (!dc_isar_feature(aa32_mve, s)) {
+ return false;
+ }
+ if (a->qd > 7 || a->qn > 7 || a->qm > 7 || !fn) {
+ return false;
+ }
+ if (!mve_eci_check(s)) {
+ return true;
+ }
+ if (!vfp_access_check(s)) {
+ return true;
+ }
+
+ /*
+ * This insn is subject to beat-wise execution. Partial execution
+ * of an I=1 (initial carry input fixed) insn which does not
+ * execute the first beat must start with the current FPSCR.NZCV
+ * value, not the fixed constant input.
+ */
+ if (a->i && !mve_skip_first_beat(s)) {
+ /* Carry input is 0 (VADCI) or 1 (VSBCI), NZV zeroed */
+ nzcv = tcg_const_i32(fixed_carry);
+ } else {
+ /* Carry input from existing NZCV flag values */
+ nzcv = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
+ tcg_gen_andi_i32(nzcv, nzcv, FPCR_NZCV_MASK);
+ }
+ qd = mve_qreg_ptr(a->qd);
+ qn = mve_qreg_ptr(a->qn);
+ qm = mve_qreg_ptr(a->qm);
+ fn(nzcv, cpu_env, qd, qn, qm, nzcv);
+ fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
+ tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
+ tcg_gen_or_i32(fpscr, fpscr, nzcv);
+ store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
+ tcg_temp_free_i32(nzcv);
+ tcg_temp_free_ptr(qd);
+ tcg_temp_free_ptr(qn);
+ tcg_temp_free_ptr(qm);
+ mve_update_eci(s);
+ return true;
+}
+
+static bool trans_VADC(DisasContext *s, arg_vadc *a)
+{
+ return do_vadc(s, a, gen_helper_mve_vadc, 0);
+}
+
+static bool trans_VSBC(DisasContext *s, arg_vadc *a)
+{
+ return do_vadc(s, a, gen_helper_mve_vsbc, FPCR_C);
+}
--
2.20.1
- [PATCH 41/55] target/arm: Implement MVE VQDMULH, VQRDMULH (vector), (continued)
- [PATCH 41/55] target/arm: Implement MVE VQDMULH, VQRDMULH (vector), Peter Maydell, 2021/06/07
- [PATCH 44/55] target/arm: Implement MVE VQRSHL, Peter Maydell, 2021/06/07
- [PATCH 33/55] target/arm: Implement MVE VADD (scalar), Peter Maydell, 2021/06/07
- [PATCH 45/55] target/arm: Implement MVE VSHL insn, Peter Maydell, 2021/06/07
- [PATCH 46/55] target/arm: Implement MVE VRSHL, Peter Maydell, 2021/06/07
- [PATCH 51/55] target/arm: Implement MVE VADC, VSBC,
Peter Maydell <=
- [PATCH 35/55] target/arm: Implement MVE VHADD, VHSUB (scalar), Peter Maydell, 2021/06/07
- [PATCH 37/55] target/arm: Implement MVE VPST, Peter Maydell, 2021/06/07
- [PATCH 29/55] target/arm: Implement MVE VMLALDAV, Peter Maydell, 2021/06/07
- [PATCH 27/55] target/arm: Implement MVE VHADD, VHSUB, Peter Maydell, 2021/06/07