[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Qemu-devel] [PATCH v3 10/15] hardfloat: support float32/64 addition and
From: |
Emilio G. Cota |
Subject: |
[Qemu-devel] [PATCH v3 10/15] hardfloat: support float32/64 addition and subtraction |
Date: |
Wed, 4 Apr 2018 19:11:10 -0400 |
Performance results (single and double precision) for fp-bench:
1. Intel(R) Core(TM) i7-6700K CPU @ 4.00GHz
- before:
add-single: 135.07 MFlops
add-double: 131.60 MFlops
sub-single: 130.04 MFlops
sub-double: 133.01 MFlops
- after:
add-single: 443.04 MFlops
add-double: 301.95 MFlops
sub-single: 411.36 MFlops
sub-double: 293.15 MFlops
2. ARM Aarch64 A57 @ 2.4GHz
- before:
add-single: 44.79 MFlops
add-double: 49.20 MFlops
sub-single: 44.55 MFlops
sub-double: 49.06 MFlops
- after:
add-single: 93.28 MFlops
add-double: 88.27 MFlops
sub-single: 91.47 MFlops
sub-double: 88.27 MFlops
3. IBM POWER8E @ 2.1 GHz
- before:
add-single: 72.59 MFlops
add-double: 72.27 MFlops
sub-single: 75.33 MFlops
sub-double: 70.54 MFlops
- after:
add-single: 112.95 MFlops
add-double: 201.11 MFlops
sub-single: 116.80 MFlops
sub-double: 188.72 MFlops
Note that the IBM and ARM machines benefit from having
HARDFLOAT_2F{32,64}_USE_FP set to 0. Otherwise their performance
can suffer significantly:
- IBM Power8:
add-single: [1] 54.94 vs [0] 116.37 MFlops
add-double: [1] 58.92 vs [0] 201.44 MFlops
- Aarch64 A57:
add-single: [1] 80.72 vs [0] 93.24 MFlops
add-double: [1] 82.10 vs [0] 88.18 MFlops
On the Intel machine, having 2F64 set to 1 pays off, but it
doesn't for 2F32:
- Intel i7-6700K:
add-single: [1] 285.79 vs [0] 426.70 MFlops
add-double: [1] 302.15 vs [0] 278.82 MFlops
Signed-off-by: Emilio G. Cota <address@hidden>
---
fpu/softfloat.c | 106 +++++++++++++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 98 insertions(+), 8 deletions(-)
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 956b938..ca0b8ab 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -1080,8 +1080,8 @@ float16 __attribute__((flatten)) float16_add(float16 a,
float16 b,
return float16_round_pack_canonical(pr, status);
}
-float32 __attribute__((flatten)) float32_add(float32 a, float32 b,
- float_status *status)
+static float32 QEMU_SOFTFLOAT_ATTR
+soft_float32_add(float32 a, float32 b, float_status *status)
{
FloatParts pa = float32_unpack_canonical(a, status);
FloatParts pb = float32_unpack_canonical(b, status);
@@ -1090,8 +1090,8 @@ float32 __attribute__((flatten)) float32_add(float32 a,
float32 b,
return float32_round_pack_canonical(pr, status);
}
-float64 __attribute__((flatten)) float64_add(float64 a, float64 b,
- float_status *status)
+static float64 QEMU_SOFTFLOAT_ATTR
+soft_float64_add(float64 a, float64 b, float_status *status)
{
FloatParts pa = float64_unpack_canonical(a, status);
FloatParts pb = float64_unpack_canonical(b, status);
@@ -1110,8 +1110,8 @@ float16 __attribute__((flatten)) float16_sub(float16 a,
float16 b,
return float16_round_pack_canonical(pr, status);
}
-float32 __attribute__((flatten)) float32_sub(float32 a, float32 b,
- float_status *status)
+static float32 QEMU_SOFTFLOAT_ATTR
+soft_float32_sub(float32 a, float32 b, float_status *status)
{
FloatParts pa = float32_unpack_canonical(a, status);
FloatParts pb = float32_unpack_canonical(b, status);
@@ -1120,8 +1120,8 @@ float32 __attribute__((flatten)) float32_sub(float32 a,
float32 b,
return float32_round_pack_canonical(pr, status);
}
-float64 __attribute__((flatten)) float64_sub(float64 a, float64 b,
- float_status *status)
+static float64 QEMU_SOFTFLOAT_ATTR
+soft_float64_sub(float64 a, float64 b, float_status *status)
{
FloatParts pa = float64_unpack_canonical(a, status);
FloatParts pb = float64_unpack_canonical(b, status);
@@ -1130,6 +1130,96 @@ float64 __attribute__((flatten)) float64_sub(float64 a,
float64 b,
return float64_round_pack_canonical(pr, status);
}
+static float float_add(float a, float b)
+{
+ return a + b;
+}
+
+static float float_sub(float a, float b)
+{
+ return a - b;
+}
+
+static double double_add(double a, double b)
+{
+ return a + b;
+}
+
+static double double_sub(double a, double b)
+{
+ return a - b;
+}
+
+static bool f32_addsub_post(float32 a, float32 b, const struct float_status *s)
+{
+ return !(float32_is_zero(a) && float32_is_zero(b));
+}
+
+static bool
+float_addsub_post(float a, float b, const struct float_status *s)
+{
+ return !(fpclassify(a) == FP_ZERO && fpclassify(b) == FP_ZERO);
+}
+
+static bool f64_addsub_post(float64 a, float64 b, const struct float_status *s)
+{
+ return !(float64_is_zero(a) && float64_is_zero(b));
+}
+
+static bool
+double_addsub_post(double a, double b, const struct float_status *s)
+{
+ return !(fpclassify(a) == FP_ZERO && fpclassify(b) == FP_ZERO);
+}
+
+static float32 float32_addsub(float32 a, float32 b, float_status *s,
+ float_op2_func_t hard, f32_op2_func_t soft)
+{
+ if (QEMU_HARDFLOAT_2F32_USE_FP) {
+ return float_gen2(a, b, s, hard, soft, float_is_zon2,
float_addsub_post,
+ NULL, NULL);
+ } else {
+ return f32_gen2(a, b, s, hard, soft, f32_is_zon2, f32_addsub_post,
+ NULL, NULL);
+ }
+}
+
+static float64 float64_addsub(float64 a, float64 b, float_status *s,
+ double_op2_func_t hard, f64_op2_func_t soft)
+{
+ if (QEMU_HARDFLOAT_2F64_USE_FP) {
+ return double_gen2(a, b, s, hard, soft, double_is_zon2,
+ double_addsub_post, NULL, NULL);
+ } else {
+ return f64_gen2(a, b, s, hard, soft, f64_is_zon2, f64_addsub_post,
+ NULL, NULL);
+ }
+}
+
+float32 __attribute__((flatten))
+float32_add(float32 a, float32 b, float_status *s)
+{
+ return float32_addsub(a, b, s, float_add, soft_float32_add);
+}
+
+float32 __attribute__((flatten))
+float32_sub(float32 a, float32 b, float_status *s)
+{
+ return float32_addsub(a, b, s, float_sub, soft_float32_sub);
+}
+
+float64 __attribute__((flatten))
+float64_add(float64 a, float64 b, float_status *s)
+{
+ return float64_addsub(a, b, s, double_add, soft_float64_add);
+}
+
+float64 __attribute__((flatten))
+float64_sub(float64 a, float64 b, float_status *s)
+{
+ return float64_addsub(a, b, s, double_sub, soft_float64_sub);
+}
+
/*
* Returns the result of multiplying the floating-point values `a' and
* `b'. The operation is performed according to the IEC/IEEE Standard
--
2.7.4
- Re: [Qemu-devel] [PATCH v3 04/15] softfloat: add float{32, 64}_is_{de, }normal, (continued)
- [Qemu-devel] [PATCH v3 03/15] fp-test: add muladd variants, Emilio G. Cota, 2018/04/04
- [Qemu-devel] [PATCH v3 05/15] target/tricore: use float32_is_denormal, Emilio G. Cota, 2018/04/04
- [Qemu-devel] [PATCH v3 07/15] softfloat: rename canonicalize to sf_canonicalize, Emilio G. Cota, 2018/04/04
- [Qemu-devel] [PATCH v3 06/15] tests/fp: add fp-bench, a collection of simple floating point microbenchmarks, Emilio G. Cota, 2018/04/04
- [Qemu-devel] [PATCH v3 08/15] softfloat: add float{32, 64}_is_zero_or_normal, Emilio G. Cota, 2018/04/04
- [Qemu-devel] [PATCH v3 09/15] fpu: introduce hardfloat, Emilio G. Cota, 2018/04/04
- [Qemu-devel] [PATCH v3 11/15] hardfloat: support float32/64 multiplication, Emilio G. Cota, 2018/04/04
- [Qemu-devel] [PATCH v3 10/15] hardfloat: support float32/64 addition and subtraction,
Emilio G. Cota <=
- [Qemu-devel] [PATCH v3 12/15] hardfloat: support float32/64 division, Emilio G. Cota, 2018/04/04
- [Qemu-devel] [PATCH v3 15/15] hardfloat: support float32/64 comparison, Emilio G. Cota, 2018/04/04
- [Qemu-devel] [PATCH v3 14/15] hardfloat: support float32/64 square root, Emilio G. Cota, 2018/04/04
- [Qemu-devel] [PATCH v3 13/15] hardfloat: support float32/64 fused multiply-add, Emilio G. Cota, 2018/04/04
- Re: [Qemu-devel] [PATCH v3 00/15] fp-test + hardfloat, no-reply, 2018/04/04