[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[PULL 086/114] target/arm: Macroize helper_gvec_{s,u}dot_{b,h}
From: |
Peter Maydell |
Subject: |
[PULL 086/114] target/arm: Macroize helper_gvec_{s,u}dot_{b,h} |
Date: |
Tue, 25 May 2021 16:02:56 +0100 |
From: Richard Henderson <richard.henderson@linaro.org>
We're about to add more variations on this theme.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210525010358.152808-65-richard.henderson@linaro.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
target/arm/vec_helper.c | 82 ++++++++++-------------------------------
1 file changed, 20 insertions(+), 62 deletions(-)
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index 8b7269d8e1e..cddf095c74a 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -543,73 +543,31 @@ void HELPER(sve2_sqrdmulh_idx_d)(void *vd, void *vn, void
*vm, uint32_t desc)
/* Integer 8 and 16-bit dot-product.
*
* Note that for the loops herein, host endianness does not matter
- * with respect to the ordering of data within the 64-bit lanes.
+ * with respect to the ordering of data within the quad-width lanes.
* All elements are treated equally, no matter where they are.
*/
-void HELPER(gvec_sdot_b)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
-{
- intptr_t i, opr_sz = simd_oprsz(desc);
- int32_t *d = vd, *a = va;
- int8_t *n = vn, *m = vm;
-
- for (i = 0; i < opr_sz / 4; ++i) {
- d[i] = (a[i] +
- n[i * 4 + 0] * m[i * 4 + 0] +
- n[i * 4 + 1] * m[i * 4 + 1] +
- n[i * 4 + 2] * m[i * 4 + 2] +
- n[i * 4 + 3] * m[i * 4 + 3]);
- }
- clear_tail(d, opr_sz, simd_maxsz(desc));
+#define DO_DOT(NAME, TYPED, TYPEN, TYPEM) \
+void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
+{ \
+ intptr_t i, opr_sz = simd_oprsz(desc); \
+ TYPED *d = vd, *a = va; \
+ TYPEN *n = vn; \
+ TYPEM *m = vm; \
+ for (i = 0; i < opr_sz / sizeof(TYPED); ++i) { \
+ d[i] = (a[i] + \
+ (TYPED)n[i * 4 + 0] * m[i * 4 + 0] + \
+ (TYPED)n[i * 4 + 1] * m[i * 4 + 1] + \
+ (TYPED)n[i * 4 + 2] * m[i * 4 + 2] + \
+ (TYPED)n[i * 4 + 3] * m[i * 4 + 3]); \
+ } \
+ clear_tail(d, opr_sz, simd_maxsz(desc)); \
}
-void HELPER(gvec_udot_b)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
-{
- intptr_t i, opr_sz = simd_oprsz(desc);
- uint32_t *d = vd, *a = va;
- uint8_t *n = vn, *m = vm;
-
- for (i = 0; i < opr_sz / 4; ++i) {
- d[i] = (a[i] +
- n[i * 4 + 0] * m[i * 4 + 0] +
- n[i * 4 + 1] * m[i * 4 + 1] +
- n[i * 4 + 2] * m[i * 4 + 2] +
- n[i * 4 + 3] * m[i * 4 + 3]);
- }
- clear_tail(d, opr_sz, simd_maxsz(desc));
-}
-
-void HELPER(gvec_sdot_h)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
-{
- intptr_t i, opr_sz = simd_oprsz(desc);
- int64_t *d = vd, *a = va;
- int16_t *n = vn, *m = vm;
-
- for (i = 0; i < opr_sz / 8; ++i) {
- d[i] = (a[i] +
- (int64_t)n[i * 4 + 0] * m[i * 4 + 0] +
- (int64_t)n[i * 4 + 1] * m[i * 4 + 1] +
- (int64_t)n[i * 4 + 2] * m[i * 4 + 2] +
- (int64_t)n[i * 4 + 3] * m[i * 4 + 3]);
- }
- clear_tail(d, opr_sz, simd_maxsz(desc));
-}
-
-void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, void *va, uint32_t desc)
-{
- intptr_t i, opr_sz = simd_oprsz(desc);
- uint64_t *d = vd, *a = va;
- uint16_t *n = vn, *m = vm;
-
- for (i = 0; i < opr_sz / 8; ++i) {
- d[i] = (a[i] +
- (uint64_t)n[i * 4 + 0] * m[i * 4 + 0] +
- (uint64_t)n[i * 4 + 1] * m[i * 4 + 1] +
- (uint64_t)n[i * 4 + 2] * m[i * 4 + 2] +
- (uint64_t)n[i * 4 + 3] * m[i * 4 + 3]);
- }
- clear_tail(d, opr_sz, simd_maxsz(desc));
-}
+DO_DOT(gvec_sdot_b, int32_t, int8_t, int8_t)
+DO_DOT(gvec_udot_b, uint32_t, uint8_t, uint8_t)
+DO_DOT(gvec_sdot_h, int64_t, int16_t, int16_t)
+DO_DOT(gvec_udot_h, uint64_t, uint16_t, uint16_t)
void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm,
void *va, uint32_t desc)
--
2.20.1
- [PULL 072/114] target/arm: Pass separate addend to FCMLA helpers, (continued)
- [PULL 072/114] target/arm: Pass separate addend to FCMLA helpers, Peter Maydell, 2021/05/25
- [PULL 076/114] target/arm: Implement SVE2 integer multiply-add (indexed), Peter Maydell, 2021/05/25
- [PULL 077/114] target/arm: Implement SVE2 saturating multiply-add high (indexed), Peter Maydell, 2021/05/25
- [PULL 081/114] target/arm: Implement SVE2 saturating multiply high (indexed), Peter Maydell, 2021/05/25
- [PULL 082/114] target/arm: Implement SVE2 multiply-add long (indexed), Peter Maydell, 2021/05/25
- [PULL 075/114] target/arm: Implement SVE2 integer multiply (indexed), Peter Maydell, 2021/05/25
- [PULL 079/114] target/arm: Implement SVE2 saturating multiply (indexed), Peter Maydell, 2021/05/25
- [PULL 078/114] target/arm: Implement SVE2 saturating multiply-add (indexed), Peter Maydell, 2021/05/25
- [PULL 083/114] target/arm: Implement SVE2 integer multiply long (indexed), Peter Maydell, 2021/05/25
- [PULL 085/114] target/arm: Implement SVE2 complex integer dot product, Peter Maydell, 2021/05/25
- [PULL 086/114] target/arm: Macroize helper_gvec_{s,u}dot_{b,h},
Peter Maydell <=
- [PULL 080/114] target/arm: Implement SVE2 signed saturating doubling multiply high, Peter Maydell, 2021/05/25
- [PULL 090/114] target/arm: Implement SVE2 crypto unary operations, Peter Maydell, 2021/05/25
- [PULL 073/114] target/arm: Split out formats for 2 vectors + 1 index, Peter Maydell, 2021/05/25
- [PULL 088/114] target/arm: Implement SVE mixed sign dot product (indexed), Peter Maydell, 2021/05/25
- [PULL 087/114] target/arm: Macroize helper_gvec_{s,u}dot_idx_{b,h}, Peter Maydell, 2021/05/25
- [PULL 093/114] target/arm: Implement SVE2 TBL, TBX, Peter Maydell, 2021/05/25
- [PULL 089/114] target/arm: Implement SVE mixed sign dot product, Peter Maydell, 2021/05/25
- [PULL 091/114] target/arm: Implement SVE2 crypto destructive binary operations, Peter Maydell, 2021/05/25
- [PULL 092/114] target/arm: Implement SVE2 crypto constructive binary operations, Peter Maydell, 2021/05/25
- [PULL 094/114] target/arm: Implement SVE2 FCVTNT, Peter Maydell, 2021/05/25