lmi-commits
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[lmi-commits] [lmi] master 02dde17 13/23: Avoid catastrophic cancellatio


From: Greg Chicares
Subject: [lmi-commits] [lmi] master 02dde17 13/23: Avoid catastrophic cancellation
Date: Tue, 27 Jul 2021 21:59:52 -0400 (EDT)

branch: master
commit 02dde17c87515225084b4d846e39633fb6b9eba6
Author: Gregory W. Chicares <gchicares@sbcglobal.net>
Commit: Gregory W. Chicares <gchicares@sbcglobal.net>

    Avoid catastrophic cancellation
    
    The added unit test (find an arbitrary number in [-1.0e300, 1.0e300],
    using the arithmetic mean for bisection) is interesting, though it
    doesn't elicit "the full catastrophe" that has been seen when using
    binary64_midpoint() instead of the arithmetic mean.
---
 zero.hpp      | 44 ++++++++++++++++++++++++++++++++++++--------
 zero_test.cpp | 19 ++++++++++++++-----
 2 files changed, 50 insertions(+), 13 deletions(-)

diff --git a/zero.hpp b/zero.hpp
index bbe3e9c..9222a46 100644
--- a/zero.hpp
+++ b/zero.hpp
@@ -502,6 +502,7 @@ root_type lmi_root
             }
         double tol = 2.0 * epsilon * std::fabs(b) + t;
         double m = 0.5 * (c - b);
+        double n = std::midpoint(b, c); // "next" iterate
         if(0.0 == fb || std::fabs(m) <= tol) // Note 2.
             {
             if
@@ -526,12 +527,12 @@ root_type lmi_root
         if(std::fabs(e) < tol)
             {
             impetus = dithering_near_root;
-            d = e = m;
+            d = e = n - b;
             }
         else if(std::fabs(fa) <= std::fabs(fb))
             {
             impetus = secant_out_of_bounds;
-            d = e = m;
+            d = e = n - b;
             }
         else
             {
@@ -583,6 +584,7 @@ root_type lmi_root
             if(k0 && k1)
                 {
                 d = p / q;
+                n = b + p / q;
                 }
             else
                 {
@@ -591,14 +593,14 @@ root_type lmi_root
                     : k1 ? guarantee_linear_convergence
                     :      pis_aller
                     ;
-                d = e = m;
+                d = e = n - b;
                 }
             }
         a = b;
         fa = fb;
         if(tol < std::fabs(d))
             {
-            b += d;
+            b = n;
             }
         else if(0.0 < m)
             {
@@ -666,6 +668,26 @@ root_type decimal_root
 }
 
 /// An instrumented translation of Brent's reference implementation.
+///
+/// Deviation from the original ALGOL:
+///
+/// The ALGOL original calculates and stores a correction term (called
+/// 'i' on page 49 of AfMWD, but 'd' in the ALGOL) for bisection as
+/// well as for other interpolation techniques, then adds it to 'b'
+/// when appropriate. This can lead to a catastrophic cancellation,
+/// as in this actual example:
+///   -1.02311777153193876348e+49 b
+///   -0.0106034417457945805141   c
+///   -3.18454409903526645858e+23 binary64_midpoint(c, b)
+///    1.02311777153193876348e+49 binary64_midpoint(c, b) - b
+///    0.0                   b + (binary64_midpoint(c, b) - b)
+/// which iterates to a new point outside the known [c,b] bounds. Even
+/// though no such drastic example has been seen with the arithmetic
+/// mean that Brent uses, less drastic examples occur in unit tests.
+/// The catastrophic cancellation is conditionally avoided by storing
+/// the next iterate in new variable 'n' (for "next") whenever 'd' is
+/// calculated, and then assigning it directly to 'b' instead of
+/// incrementing 'b' by 'd'.
 
 template<typename FunctionalType>
 double brent_zero
@@ -693,7 +715,7 @@ double brent_zero
         << '\n'
         ;
 
-    double c, d, e, fa, fb, fc, tol, m, p, q, r, s;
+    double c, d, e, fa, fb, fc, tol, m, n, p, q, r, s;
 
     auto expatiate = [&]()
         {
@@ -743,18 +765,19 @@ double brent_zero
         }
     tol = 2.0 * DBL_EPSILON * std::fabs(b) + t;
     m = 0.5 * (c - b);
+    n = std::midpoint(b, c);
     if(tol < std::fabs(m) && 0.0 != fb)
         {
         // See if a bisection is forced.
         if(std::fabs(e) < tol)
             {
             impetus = dithering_near_root;
-            d = e = m;
+            d = e = n - b;
             }
         else if(std::fabs(fa) <= std::fabs(fb))
             {
             impetus = secant_out_of_bounds;
-            d = e = m;
+            d = e = n - b;
             }
         else
             {
@@ -790,6 +813,7 @@ double brent_zero
             if(k0 && k1)
                 {
                 d = p / q;
+                n = b + p / q;
                 }
             else
                 {
@@ -798,13 +822,17 @@ double brent_zero
                     : k1 ? guarantee_linear_convergence
                     :      pis_aller
                     ;
-                d = e = m;
+                d = e = n - b;
                 }
             }
         a = b; fa = fb;
         if(tol < std::fabs(d))
             {
+#if 0  // See "catastrophic cancellation" above.
             b += d;
+#else  // 1
+            b = n;
+#endif // 1
             }
         else if(0.0 < m)
             {
diff --git a/zero_test.cpp b/zero_test.cpp
index be3dbd9..6e4f47e 100644
--- a/zero_test.cpp
+++ b/zero_test.cpp
@@ -824,15 +824,15 @@ void test_various_functions()
 //  test_a_function        (f01, root_01, -1.0, 4.0, 0.5 * 1.0e-19, __LINE__);
 //  test_a_decimal_function(f01, root_01, -1.0, 4.0, 18, __LINE__, 168);
 //  test_a_function        (f01, root_01, -1.0, 4.0, 0.5 * 1.0e-18, __LINE__);
-    test_a_decimal_function(f01, root_01, -1.0, 4.0, 17, __LINE__, 163);
+    test_a_decimal_function(f01, root_01, -1.0, 4.0, 17, __LINE__, 167);
     test_a_function        (f01, root_01, -1.0, 4.0, 0.5 * 1.0e-17, __LINE__);
-    test_a_decimal_function(f01, root_01, -1.0, 4.0, 16, __LINE__, 156);
+    test_a_decimal_function(f01, root_01, -1.0, 4.0, 16, __LINE__, 155);
     test_a_function        (f01, root_01, -1.0, 4.0, 0.5 * 1.0e-16, __LINE__);
-    test_a_decimal_function(f01, root_01, -1.0, 4.0, 15, __LINE__, 142);
+    test_a_decimal_function(f01, root_01, -1.0, 4.0, 15, __LINE__, 143);
     test_a_function        (f01, root_01, -1.0, 4.0, 0.5 * 1.0e-15, __LINE__);
     test_a_decimal_function(f01, root_01, -1.0, 4.0, 14, __LINE__, 128);
     test_a_function        (f01, root_01, -1.0, 4.0, 0.5 * 1.0e-14, __LINE__);
-    test_a_decimal_function(f01, root_01, -1.0, 4.0, 12, __LINE__, 112);
+    test_a_decimal_function(f01, root_01, -1.0, 4.0, 12, __LINE__, 117);
     test_a_function        (f01, root_01, -1.0, 4.0, 0.5 * 1.0e-12, __LINE__);
 
     auto f02 = [](double x) {return std::pow(x - 1.7, 17.0);};
@@ -929,7 +929,7 @@ void test_hodgepodge()
     // rather than a theoretical maximum. Perhaps they'll always
     // succeed, because floating-point behavior is determinate;
     // but small variations betoken no catastrophe.
-    LMI_TEST(169 <= r.n_eval && r.n_eval <= 173); // weak
+    LMI_TEST(168 <= r.n_eval && r.n_eval <= 172); // weak
 
     d = brent_zero(eq_2_1, -100.0, 100.0, 0.5);
     zeta = -100.0;
@@ -990,6 +990,15 @@ void test_hodgepodge()
     LMI_TEST(r.n_eval <= 3023);
     // Here, decimal_root() always chooses the bisection technique.
     LMI_TEST_EQUAL(55, r.n_eval); // weak
+
+    std::ostringstream oss;
+    r = lmi_root(signum_offset, -1.0e300, 1.0e300, 5.0e-19, oss);
+    LMI_TEST(root_is_valid == r.validity);
+    LMI_TEST(materially_equal(-1.0 / 3.0, r.root));
+    LMI_TEST(r.n_eval <= 3023);
+    LMI_TEST_EQUAL(1052, r.n_eval); // weak
+    // Display this to investigate further:
+//  std::cout << oss.str() << std::endl;
 }
 
 void test_former_rounding_problem()



reply via email to

[Prev in Thread] Current Thread [Next in Thread]