qemu-devel
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 29/72] softfloat: Move pick_nan to softfloat-parts.c.inc


From: David Hildenbrand
Subject: Re: [PATCH 29/72] softfloat: Move pick_nan to softfloat-parts.c.inc
Date: Wed, 12 May 2021 20:16:33 +0200
User-agent: Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Thunderbird/78.8.1

On 08.05.21 03:47, Richard Henderson wrote:
At the same time, convert to pointers, rename to parts$N_pick_nan
and define a macro for parts_pick_nan using QEMU_GENERIC.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
  fpu/softfloat.c           | 62 ++++++++++++++++++++++-----------------
  fpu/softfloat-parts.c.inc | 25 ++++++++++++++++
  2 files changed, 60 insertions(+), 27 deletions(-)

diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 5b26696078..77efaedeaa 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -713,10 +713,39 @@ static void parts128_return_nan(FloatParts128 *a, 
float_status *s);
#define parts_return_nan(P, S) PARTS_GENERIC_64_128(return_nan, P)(P, S) +static FloatParts64 *parts64_pick_nan(FloatParts64 *a, FloatParts64 *b,
+                                      float_status *s);
+static FloatParts128 *parts128_pick_nan(FloatParts128 *a, FloatParts128 *b,
+                                        float_status *s);
+
+#define parts_pick_nan(A, B, S)    PARTS_GENERIC_64_128(pick_nan, A)(A, B, S)
+
  /*
   * Helper functions for softfloat-parts.c.inc, per-size operations.
   */
+#define FRAC_GENERIC_64_128(NAME, P) \
+    QEMU_GENERIC(P, (FloatParts128 *, frac128_##NAME), frac64_##NAME)
+
+static int frac64_cmp(FloatParts64 *a, FloatParts64 *b)
+{
+    return a->frac == b->frac ? 0 : a->frac < b->frac ? -1 : 1;
+}
+
+static int frac128_cmp(FloatParts128 *a, FloatParts128 *b)
+{
+    uint64_t ta = a->frac_hi, tb = b->frac_hi;
+    if (ta == tb) {
+        ta = a->frac_lo, tb = b->frac_lo;
+        if (ta == tb) {
+            return 0;
+        }
+    }
+    return ta < tb ? -1 : 1;
+}
+
+#define frac_cmp(A, B)  FRAC_GENERIC_64_128(cmp, A)(A, B)
+
  static void frac128_shl(FloatParts128 *a, int c)
  {
      shift128Left(a->frac_hi, a->frac_lo, c, &a->frac_hi, &a->frac_lo);
@@ -918,27 +947,6 @@ static FloatParts64 round_canonical(FloatParts64 p, 
float_status *s,
      return p;
  }
-static FloatParts64 pick_nan(FloatParts64 a, FloatParts64 b, float_status *s)
-{
-    if (is_snan(a.cls) || is_snan(b.cls)) {
-        float_raise(float_flag_invalid, s);
-    }
-
-    if (s->default_nan_mode) {
-        parts_default_nan(&a, s);
-    } else {
-        if (pickNaN(a.cls, b.cls,
-                    a.frac > b.frac ||
-                    (a.frac == b.frac && a.sign < b.sign), s)) {
-            a = b;
-        }
-        if (is_snan(a.cls)) {
-            parts_silence_nan(&a, s);
-        }
-    }
-    return a;
-}
-
  static FloatParts64 pick_nan_muladd(FloatParts64 a, FloatParts64 b, 
FloatParts64 c,
                                    bool inf_zero, float_status *s)
  {
@@ -1106,7 +1114,7 @@ static FloatParts64 addsub_floats(FloatParts64 a, 
FloatParts64 b, bool subtract,
              return a;
          }
          if (is_nan(a.cls) || is_nan(b.cls)) {
-            return pick_nan(a, b, s);
+            return *parts_pick_nan(&a, &b, s);
          }
          if (a.cls == float_class_inf) {
              if (b.cls == float_class_inf) {
@@ -1144,7 +1152,7 @@ static FloatParts64 addsub_floats(FloatParts64 a, 
FloatParts64 b, bool subtract,
              return a;
          }
          if (is_nan(a.cls) || is_nan(b.cls)) {
-            return pick_nan(a, b, s);
+            return *parts_pick_nan(&a, &b, s);
          }
          if (a.cls == float_class_inf || b.cls == float_class_zero) {
              return a;
@@ -1360,7 +1368,7 @@ static FloatParts64 mul_floats(FloatParts64 a, 
FloatParts64 b, float_status *s)
      }
      /* handle all the NaN cases */
      if (is_nan(a.cls) || is_nan(b.cls)) {
-        return pick_nan(a, b, s);
+        return *parts_pick_nan(&a, &b, s);
      }
      /* Inf * Zero == NaN */
      if ((a.cls == float_class_inf && b.cls == float_class_zero) ||
@@ -1887,7 +1895,7 @@ static FloatParts64 div_floats(FloatParts64 a, 
FloatParts64 b, float_status *s)
      }
      /* handle all the NaN cases */
      if (is_nan(a.cls) || is_nan(b.cls)) {
-        return pick_nan(a, b, s);
+        return *parts_pick_nan(&a, &b, s);
      }
      /* 0/0 or Inf/Inf */
      if (a.cls == b.cls
@@ -3295,14 +3303,14 @@ static FloatParts64 minmax_floats(FloatParts64 a, 
FloatParts64 b, bool ismin,
               * the invalid exception is raised.
               */
              if (is_snan(a.cls) || is_snan(b.cls)) {
-                return pick_nan(a, b, s);
+                return *parts_pick_nan(&a, &b, s);
              } else if (is_nan(a.cls) && !is_nan(b.cls)) {
                  return b;
              } else if (is_nan(b.cls) && !is_nan(a.cls)) {
                  return a;
              }
          }
-        return pick_nan(a, b, s);
+        return *parts_pick_nan(&a, &b, s);
      } else {
          int a_exp, b_exp;
diff --git a/fpu/softfloat-parts.c.inc b/fpu/softfloat-parts.c.inc
index 2a3075d6fe..11a71650f7 100644
--- a/fpu/softfloat-parts.c.inc
+++ b/fpu/softfloat-parts.c.inc
@@ -35,3 +35,28 @@ static void partsN(return_nan)(FloatPartsN *a, float_status 
*s)
          g_assert_not_reached();
      }
  }
+
+static FloatPartsN *partsN(pick_nan)(FloatPartsN *a, FloatPartsN *b,
+                                     float_status *s)
+{
+    if (is_snan(a->cls) || is_snan(b->cls)) {
+        float_raise(float_flag_invalid, s);
+    }
+
+    if (s->default_nan_mode) {
+        parts_default_nan(a, s);
+    } else {
+        int cmp = frac_cmp(a, b);
+        if (cmp == 0) {
+            cmp = a->sign < b->sign;
+        }
+
+        if (pickNaN(a->cls, b->cls, cmp > 0, s)) {
+            a = b;
+        }
+        if (is_snan(a->cls)) {
+            parts_silence_nan(a, s);
+        }
+    }
+    return a;
+}


I find the "*parts_pick_nan(&a, &b, s);" part a little ugly to read, but as long as this function isn't inline there isn't much the compiler could optimize when passing by value instead, so

Reviewed-by: David Hildenbrand <david@redhat.com>

--
Thanks,

David / dhildenb




reply via email to

[Prev in Thread] Current Thread [Next in Thread]