Post Reply 
(PC-12xx~14xx) qthsh Tanh-Sinh quadrature
03-28-2021, 02:02 PM (This post was last modified: 04-26-2022 12:23 AM by robve.)
Post: #1
(PC-12xx~14xx) qthsh Tanh-Sinh quadrature
TANH-SINH QUADRATURE and COMBINED TANH-SINH, EXP-SINH, SINH-SINH QUADRATURES
Estimate definite integral over open interval with improved and optimized Tanh-Sinh quadrature

Credit: Michalski & Mosig Tanh-Sinh rule, improved in this thread and in the accompanying technical paper
For details, see the technical paper: https://www.genivia.com/files/qthsh.pdf
See also: https://newtonexcelbach.com/2020/10/29/n...ure-v-5-0/

For SHARP PC-12xx to 14xx

373 bytes BASIC image (PC-1350) "qthsh" optimized Tanh-Sinh
936 bytes BASIC image (PC-1350) "quad" optimized combined Tanh-Sinh, Exp-Sinh, Sinh-Sinh

Precision: 1E-9 (adjustable)
Levels: N=6 (adjustable)

Example:
300 "F1" Y=4/(1+X*X): RETURN
RUN
f=F1
a=0
b=1
3.141592653
310 "F2" Y=ATN(1/X): RETURN
RADIAN
RUN
f=F2
a=0
b=1
1.131971754
320 "F3" Y=COS X*LN X: RETURN
RADIAN
RUN
f=F3
a=0
b=1
-9.460830706E-01
330 "F4" Y=X^-.8: RETURN
RUN
f=F4
a=0
b=1
5


Code:
' TANH-SINH QUADRATURE
' Estimate definite integral over open interval with improved and optimized Tanh-Sinh quadrature
' For SHARP PC-12xx to 14xx by Robert van Engelen
' Credit:
'   Michalski & Mosig Tanh-Sinh rule
' See also:
'   https://www.genivia.com/files/qthsh.pdf
'   https://newtonexcelbach.com/2020/10/29/numerical-integration-with-tanh-sinh-quadrature-v-5-0/

' Functions to integrate are defined with label "F1", "F2",... should return Y given X

' Algorithm:
'   double qthsh(double (*f)(double), double a, double b, double n, double eps) {
'     double c = (a+b)/2; // center (mean)
'     double d = (b-a)/2; // half distance
'     double s = f(c);
'     double e, v, h = 2;
'     int k = 0;
'     if (n <= 0) // use default levels n=6
'       n = 6; // 6 is optimal, 7 just as good taking longer
'     if (eps <= 0) // use default eps=1E=9
'       eps = 1E-9;
'     do {
'       double p = 0, q, fp = 0, fm = 0, t, eh;
'       h /= 2;
'       eh = exp(h);
'       t = eh;
'       if (k > 0)
'         eh *= eh;
'       do {
'         double u = exp(1/t-t);      // = exp(-2*sinh(j*h)) = 1/exp(sinh(j*h))^2
'         double r = 2*u/(1+u);       // = 1 - tanh(sinh(j*h))
'         double w = (t+1/t)*r/(1+u); // = cosh(j*h)/cosh(sinh(j*h))^2
'         double x = d*r;
'         if (a+x > a) {              // if too close to a then reuse previous fp
'           double y = f(a+x);
'           if (isfinite(y))
'             fp = y;                 // if f(x) is finite, add to local sum
'         }
'         if (b-x < b) {              // if too close to b then reuse previous fm
'           double y = f(b-x);
'           if (isfinite(y))
'             fm = y;                 // if f(x) is finite, add to local sum
'         }
'         q = w*(fp+fm);
'         p += q;
'         t *= eh;
'       } while (fabs(q) > eps*fabs(p));
'       v = s-p;
'       s += p;
'       ++k;
'     } while (fabs(v) > 10*eps*fabs(s) && k <= n);
'     e = fabs(v)/(fabs(s)+eps);
'     return d*s*h; // result with estimated relative error e
'   }

' VARIABLES
'  A,B     range
'  F$      function label to integrate
'  Y       result with error E
'  E       estimated relative error of the result
'  N       levels (up to 6 or 7)
'  C       (a+b)/2 center
'  D       (b-a)/2 half distance
'  H       step size h=2^-k
'  K       level counter
'  P,Q,S   quadrature sums
'  T       exp(j*h)
'  L       fp=f(a+x)
'  M       fm=f(b-x)
'  J,R,U,X scratch

100 "QTHSH" E=1E-9,N=6: INPUT "f=F";F$: F$="F"+F$
110 INPUT "a=";A
120 INPUT "b=";B
' init
130 C=(A+B)/2,D=(B-A)/2,X=C: GOSUB F$: S=Y,H=1,K=0
' outer loop
140 J=1,P=0,L=0,M=0
' inner loop
150 T=EXP(J*H),U=EXP(1/T-T),R=2*U/(1+U)
160 X=A+D*R: IF X>A GOSUB F$: L=Y
170 X=B-D*R: IF X<B GOSUB F$: M=Y
180 Q=(T+1/T)*R/(1+U)*(L+M),P=P+Q,J=J+1+(K>0)
190 IF ABS Q>E*ABS P GOTO 150
' exit inner loop
200 X=S-P,S=S+P,K=K+1
210 IF ABS X>10*E*ABS S IF K<=N LET H=H/2: GOTO 140
' exit outer loop, output result (and relative error estimate if >E)
220 Y=D*S*H,U=ABS X/(ABS S+E)
230 IF U>E LET E=U: PRINT Y,E: END
240 E=U: PRINT Y: END
' For machines with ON ERROR GOTO such as PC-1475, update the following:
' 100 "QTHSH" ON ERROR GOTO 290: E=1E-9,N=6: INPUT "f=F";F$: F$="F"+F$
' 160 X=A+D*R,Y=L: IF X>A GOSUB F$: L=Y
' 170 X=B-D*R,Y=M: IF X<B GOSUB F$: M=Y
' 290 IF ERN=2 RETURN

Code:
' COMBINED TANH-SINH, EXP-SINH, SINH-SINH QUADRATURE
' Estimate integral over open interval with improved Tanh-Sinh, Exp-Sinh and Sinh-Sinh quadratures
' For SHARP PC-12xx to 14xx by Robert van Engelen
' Credit:
'   Michalski & Mosig Tanh-Sinh rule
' See also:
'   https://www.genivia.com/files/qthsh.pdf
'   https://newtonexcelbach.com/2020/10/29/numerical-integration-with-tanh-sinh-quadrature-v-5-0/

' Functions to integrate are defined with label "F1", "F2",... should return Y given X

' Algorithm:
'   double quad(double (*f)(double), double a, double b, double n, double eps) {
'     double c = 0, d = 1, s, sign = 1, e, v, h = 2;
'     int k = 0, mode = 0; // Tanh-Sinh = 0, Exp-Sinh = 1, Sinh-Sinh = 2
'     if (b < a) { // swap bounds
'       v = b;
'       b = a;
'       a = v;
'       sign = -1;
'     }
'     if (isfinite(a) && isfinite(b)) {
'       c = (a+b)/2;
'       d = (b-a)/2;
'       v = c;
'     }
'     else if (isfinite(a)) {
'       mode = 1; // Exp-Sinh
'       c = a;
'       v = a+d;
'     }
'     else if (isfinite(b)) {
'       mode = 1; // Exp-Sinh
'       c = b;
'       v = b-d;
'       d = -d;
'       sign = -sign;
'     }
'     else {
'       mode = 2; // Sinh-Sinh
'       v = 0;
'     }
'     s = f(v);
'     do {
'       double p = 0, q, fp = 0, fm = 0, t, eh;
'       h /= 2;
'       t = eh = exp(h);
'       if (k > 0)
'         eh *= eh;
'       if (mode == 0) {                // Tanh-Sinh
'         do {
'           double u = exp(1/t-t);      // = exp(-2*sinh(j*h)) = 1/exp(sinh(j*h))^2
'           double r = 2*u/(1+u);       // = 1 - tanh(sinh(j*h))
'           double w = (t+1/t)*r/(1+u); // = cosh(j*h)/cosh(sinh(j*h))^2
'           double x = d*r;
'           if (a+x > a) {              // if too close to a then reuse previous fp
'             double y = f(a+x);
'             if (isfinite(y))
'               fp = y;                 // if f(x) is finite, add to local sum
'           }
'           if (b-x < b) {              // if too close to b then reuse previous fm
'             double y = f(b-x);
'             if (isfinite(y))
'               fm = y;                 // if f(x) is finite, add to local sum
'           }
'           q = w*(fp+fm);
'           p += q;
'           t *= eh;
'         } while (fabs(q) > eps*fabs(p));
'       }
'       else {
'         t /= 2;
'         do {
'           double r = exp(t-.25/t); // = exp(sinh(j*h))
'           double x, y, w = r;
'           q = 0;
'           if (mode == 1) {         // Exp-Sinh
'             x = c + d/r;
'             if (x == c)            // if x hit the finite endpoint then break
'               break;
'             y = f(x);
'             if (isfinite(y))       // if f(x) is finite, add to local sum
'               q += y/w;
'           }
'           else {                   // Sinh-Sinh
'             r = (r-1/r)/2;         // = sinh(sinh(j*h))
'             w = (w+1/w)/2;         // = cosh(sinh(j*h))
'             x = c - d*r;
'             y = f(x);
'             if (isfinite(y))       // if f(x) is finite, add to local sum
'               q += y*w;
'           }
'           x = c + d*r;
'           y = f(x);
'           if (isfinite(y))         // if f(x) is finite, add to local sum
'             q += y*w;
'           q *= t+.25/t;            // q *= cosh(j*h)
'           p += q;
'           t *= eh;
'         } while (fabs(q) > eps*fabs(p));
'       }
'       v = s-p;
'       s += p;
'       ++k;
'     } while (fabs(v) > 10*eps*fabs(s) && k <= n);
'     e = fabs(v)/(fabs(s)+eps);
'     return sign*d*s*h; // result with estimated relative error e
'   }

' VARIABLES
'  A,B     range, -9E99 and 9E99 are -inf and +inf
'  F$      function label to integrate
'  Y       result with error E
'  E       estimated relative error of the result
'  N       levels (up to 6 or 7)
'  C       center
'  D       half distance (Tanh-Sinh) or D=1 (Exp-Sinh)
'  G       sign
'  H       step size h=2^-k
'  K       level counter
'  P,Q,S   quadrature sums
'  T       exp(j*h)
'  L       f(x) point
'  M       f(x) point
'  J,R,U,X scratch

100 "QUAD" E=1E-9,N=6: INPUT "f=F";F$: F$="F"+F$
110 INPUT "a=";A
120 INPUT "b=";B
' init and swap bounds if b<a
130 D=1,G=1,H=1,K=0: IF B<A LET X=A,A=B,B=X,G=-1
140 IF ABS A<9E99 IF ABS B<9E99 GOTO 180
150 IF ABS A<9E99 LET C=A,X=A+D: GOTO 300
160 IF ABS B<9E99 LET C=B,X=B-D,D=-D,G=-G: GOTO 300
170 GOTO 400
' Tanh-Sinh
180 C=(A+B)/2,D=(B-A)/2,X=C: GOSUB F$: S=Y
' outer loop
190 J=1,P=0,L=0,M=0
' inner loop
200 T=EXP(J*H),U=EXP(1/T-T),R=2*U/(1+U)
210 X=A+D*R: IF X>A GOSUB F$: L=Y
220 X=B-D*R: IF X<B GOSUB F$: M=Y
230 Q=(T+1/T)*R/(1+U)*(L+M),P=P+Q,J=J+1+(K>0)
240 IF ABS Q>E*ABS P GOTO 200
' exit inner loop
250 X=S-P,S=S+P,K=K+1
260 IF ABS X>10*E*ABS S IF K<=N LET H=H/2: GOTO 190
' exit, output result (and relative error estimate if >E)
270 Y=G*D*S*H,U=ABS X/(ABS S+E)
280 IF U>E LET E=U: PRINT Y,E: END
290 E=U: PRINT Y: END
' Exp-Sinh
300 GOSUB F$: S=Y
' outer loop
310 J=1,P=0
' inner loop
320 T=EXP(J*H)/2,U=EXP(T-.25/T)
330 X=C+D/U: IF X=C GOTO 370
340 GOSUB F$: L=Y/U,X=C+D*U: GOSUB F$: M=Y*U
350 Q=(T+.25/T)*(L+M),P=P+Q,J=J+1+(K>0)
360 IF ABS Q>E*ABS P GOTO 320
' exit inner loop
370 X=S-P,S=S+P,K=K+1
380 IF ABS X>10*E*ABS S IF K<=N LET H=H/2: GOTO 310
390 GOTO 270
' Sinh-Sinh
400 X=0: GOSUB F$: S=Y
' outer loop
410 J=1,P=0
' inner loop
420 T=EXP(J*H)/2,U=EXP(T-.25/T)/2,R=U-.25/U
430 X=-R: GOSUB F$: L=Y,X=R: GOSUB F$: M=Y
440 Q=(T+.25/T)*(U+.25/U)*(L+M),P=P+Q,J=J+1+(K>0)
450 IF ABS Q>E*ABS P GOTO 420
' exit inner loop
460 X=S-P,S=S+P,K=K+1
470 IF ABS X>10*E*ABS S IF K<=N LET H=H/2: GOTO 410
480 GOTO 270
' For machines with ON ERROR GOTO such as PC-1475, update the following:
' 100 "QUAD" ON ERROR GOTO 490: E=1E-9,N=6: INPUT "f=F";F$: F$="F"+F$
' 210 X=A+D*R,Y=L: IF X>A GOSUB F$: L=Y
' 220 X=B-D*R,Y=M: IF X<B GOSUB F$: M=Y
' 490 IF ERN=2 RETURN

Edit: major update with the latest version.

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
03-28-2021, 06:59 PM (This post was last modified: 03-28-2021 07:00 PM by Albert Chan.)
Post: #2
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
Some issues I noticed for Tanh-Sinh quadrature.

Let x = tanh(pi/2*sinh(t))

∫(f(x), x=0 .. 1) = pi/2 * ∫(f(tanh(pi/2*sinh(t))) * (cosh(t)/cosh(pi*sinh(t)/2)^2), t = 0 .. inf)

Numerically, t does not need to go too high (certainly not inf)

lua> function ts(t) return tanh(pi/2*sinh(t)) end
lua> ts(2.8), ts(3), ts(3.2)
0.9999999999866902       0.999999999999957       1

At t=3, with double precision, we are very close to evaluating the end-point, f(1)
If f is already u-transformed, say, from f0, t is even smaller (to avoid touching f0(1))

∫(f0(x), x=0..1) = ∫(6*u*(1-u) * f0(u*u*(3-2*u)), u=0..1)       // RHS integrand = f(u)

lua> function u(u) return u*u*(3-2*u) end
lua> u(ts(2.2)), u(ts(2.4)), u(ts(2.6))
0.9999999999917426       0.9999999999999855       1

So, what is the big deal ?
If f(1) = ∞, u-transformation might make the algorithm fail.

>>> from mpmath import *
>>> f = lambda x: 1/sqrt(-ln(x))
>>> quadts(f, [0,1], error=True)
(mpf('1.7724538503750329'), mpf('1.0e-10'))
>>>
>>> uf = lambda u: 6*u*(1-u) * f(u*u*(3-2*u))
>>> quadts(uf, [0,1], error=True)
Traceback (most recent call last):
...
ZeroDivisionError
Find all posts by this user
Quote this message in a reply
03-29-2021, 01:10 AM
Post: #3
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
(03-28-2021 06:59 PM)Albert Chan Wrote:  Some issues I noticed for Tanh-Sinh quadrature.

Thanks for sharing your findings! It's great to play with these methods to see their practical strengths and weaknesses.

There are limitations to the applicability of the method that are generally known (see e.g. Boost Math Tanh-Sinh for a decent explanation) and issues that are caused in practice using this method. But I am sure I don't need to tell you that Wink

The implementation in C and BASIC I wrote are based on several other implementations (see links). I have not optimized the code or tried to fix problems that I already saw in the code early on (more about this further below).

(03-28-2021 06:59 PM)Albert Chan Wrote:  >>> from mpmath import *
>>> f = lambda x: 1/sqrt(-ln(x))

The crash with this function does not happen in the C and BASIC versions I've worked on (here output with k, error, and points for debugging):

k=3 err=2.29154e-08 pts=32
qthsh = 1.77245384129412


However, it happens to the same function after simply reversing the x range over [0,1]:

double f(double x) { return 1/sqrt(-log(1-x)); }

This crashes at k=2:

k=2 err=nan pts=17
int qthsh = -inf


When prepping my implementation, I noticed the following conditions in some existing implementations of the method I compared to look for certain details such as this, which I've adopted in the exact same way to avoid hitting the endpoints:

if (a+x > a)
  fp = f(a+x);
if (b-x < b)
  fm = f(b-x);


Do you spot the problem too?





If a=0 or b=0 then x can be arbitrarily small.

On the other hand, if a=1 or b=1 then this "fix" works the way it was intended.

This happens in the "best Tanh-Sinh implementation on the planet".

Further, perhaps in the Python and Lua implementations there is no guard at all, since your case failed either way?

Fortunately, we can simply modify the code to something like this:

if (1+x > 1) {
  fp = f(a+x);
  fm = f(b-x);
}


Hopefully the C/C++ compiler does not optimize the condition to x > 0, which is algebraically identical, but numerically destroys the benefits of having this test (because x > 0 is always true). The type of optimizations were a serious problem in the past. Compiler optimizations wrongly assumed that floating point arithmetic is associative and commutative. Modern C/C++ compilers should not mess this up.

With this simple change I propose, it works as expected:

k=3 err=2.29154e-08 pts=29
int qthsh = 1.77245384129412


I can't think of a problem with this change, though the value 1 could be picked smaller such as eps or MachEps for example. It depends on how close you allow x to approach the endpoints.

I also noticed in the public implementations of the method with this incomplete fix that fp and fm are simply the old f(a+x) and f(b-x) values i.e. at neighboring points closer to the center of the x range. I believe that should be fine, though I doubt this was researched thoroughly. It has an ad-hoc stink to it.

I wonder how many of these public Tanh-Sinh implementations are numerically flawed.

- Rob

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
03-29-2021, 02:28 AM (This post was last modified: 03-29-2021 05:00 PM by Albert Chan.)
Post: #4
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
(03-29-2021 01:10 AM)robve Wrote:  
(03-28-2021 06:59 PM)Albert Chan Wrote:  >>> from mpmath import *
>>> f = lambda x: 1/sqrt(-ln(x))

The crash with this function does not happen in the C and BASIC versions I've worked on ...

∫(f(x), x=0..1) did not fail in Python mpmath either.

The problem is uf = lambda u: 6*u*(1-u) * f(u*u*(3-2*u))

∫(uf(u), u=0..1) will touch f(x=1) = ∞, much earlier than expected.

>>> utox = lambda u: u*u*(3-2*u)
>>> utox(0.9999999962747)
0.99999999999999989
>>> utox(0.9999999962748)
1.0

Update: we can fix uf(u), to never evaluate end-points.

>>> def uf(u):                   # u = 0 .. 1
...           x = u*u*(3-2*u)
...           if x==0 or x==1: return 0
...           return 6*u*(1-u) * f(x)
...
>>> quadts(uf, [0,1])
mpf('1.7724538508925098')

Trivia: if u slightly outside valid range, x will still be inside [0, 1]

utox(ε) = ε²*(3-2ε) ≥ 0, even if ε is negative
utox(1-ε) = (1-ε)²*(3-2*(1-ε)) = 1 - ε²*(3-2ε) ≤ 1, even if ε is negative
Find all posts by this user
Quote this message in a reply
03-29-2021, 07:42 PM
Post: #5
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
Albert, thanks for sharing additional examples and details.

Let me first say that I'm still "playing around" with this method using several implementations to compare (but I do not have much time beyond weekends.)

Tanh-Sinh also crashes in Boost Math library for \( \int_0^1 1/\sqrt{-\log(1-x)} \,dx \) with the Abort message:

libc++abi.dylib: terminating with uncaught exception of type boost::wrapexcept<boost::math::evaluation_error>: Error in function tanh_sinh<double>::integrate: The tanh_sinh quadrature evaluated your function at a singular point and got -inf. Please narrow the bounds of integration or check your function for singularities.
Abort


but does not crash for \( \int_0^1 1/\sqrt{-\log x} \) which is the symmetric equivalent(!), i.e. x approaching 0 is more problematic than approaching 1. But the reason is probably different, since there is no explicit guard against evaluating at a+x or b-x when x is too small (IMHO a practice that can have problems of its own, i.e. to ignore function values may produce inaccurate error estimates and less precise integrals).

(03-29-2021 02:28 AM)Albert Chan Wrote:  The problem is uf = lambda u: 6*u*(1-u) * f(u*u*(3-2*u))

∫(uf(u), u=0..1) will touch f(x=1) = ∞, much earlier than expected.

This function works in
Boost Math: Tanh-sinh=0.5 err=3.66374e-14 points=74
C implementation in this post: qthsh=0.5 k=4 err=1.55431e-15 points=57

The Boost Math Tanh-Sinh is a rather straightforward implementation. By comparison, the WP 43S code is more clever, but both are not always robust to functions that have problems in close proximity to the endpoints.

The Tanh-Sinh transformation does produce more points closer to the endpoints and performs particularly well for U-shaped functions or L-shaped on the domain of integration. Functions that are sensitive to errors in close proximity to the endpoints may not be usable with this method, depending on how many levels are (internally) used and if the implementation has guards that attempt to prevent this (if there are any at all). For the latter case, the "fix" in the code I looked at is likely a response to this problem (but still fails).

(03-29-2021 02:28 AM)Albert Chan Wrote:  ∫(f(x), x=0..1) did not fail in Python mpmath either.

A quick look at mpmath's quadrature internals reveals some interesting details. The implementation produces a list of (w,x) with weights w and nodes x to sum, see
260     class TanhSinh(QuadratureRule):

The mpf precision is set extra high:

311     def calc_nodes(self, degree, prec, verbose=False):
...
329        extra = 20
330        ctx.prec += extra


After producing the Tanh-Sinh points in calc_nodes, the function to integrate is evaluated at all x in the node list, regardless how close x is to the endpoints:

308        S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)

where fdot ultiplies the weight w with f(x) represented in mpf, e.g. when w and x are mpf reals:

892     def fdot(ctx, A, B=None, conjugate=False):
944        real.append(mpf_mul(a._mpf_, b._mpf_))
...
972        s = mpf_sum(real, prec, rnd)
...
976        s = ctx.make_mpf(s)
977        return s


It looks like there is no error in this case due to higher precision. More luck than wisdom perhaps?

Let me add that Boost Math Tanh-Sinh does appear to offer a fix by passing the distance to the endpoint to the integrator:

sqrt(x / (1 - x * x))

This an example of an integral that has all its area close to a non-zero endpoint, the problem here is that the function being integrated returns "garbage" values for x very close to 1. We can easily fix this issue by passing a 2 argument functor to the integrator: the second argument gives the distance to the nearest endpoint, and we can use that information to return accurate values, and thus fix the integral calculation.

As stated in the table with example cases spanning simple to problematic.

And, of course:

* As always, there are caveats: For instance, if the function you want to integrate is not holomorphic on the unit disk,
* then the rapid convergence will be spoiled. In this case, a more appropriate quadrature is (say) Romberg, which does not
* require the function to be holomorphic, only differentiable up to some order.

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
03-30-2021, 02:00 AM
Post: #6
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
Albert, in the Tanh-Sinh programs that I based my C and BASIC implementation on, I found several issues that can be corrected and optimized. Love to get your feedback on those. I also "played around" a bit with the Python mpmath Tanh-Sinh implementation, which is terrible (slower than Gauss-Legendre for several test cases when it should be faster and inaccurate results, perhaps more about that later...)

a. the Tanh-Sinh code I looked at attempts to prevent evaluating the function at points a and b. However, as I've explained in my previous reply, these checks do not reliably work. I can think of at least two possible options to fix this, though it might also be fine to leave them out entirely, because it is known and understood that Tanh-Sinh may fail for certain functions with singularities at the endpoints.

Option 1: changing the inner loop termination condition from this:

      if (r == 0 || r == 1)
        break;
      x = d*r;
      if (a+x > a)
        fp = f(a+x);
      if (b-x < b)
        fm = f(b-x);


to the following lines of code that check if |r|<MachEps instead of r==0 and by removing the unnecessary ad-hoc guards that wrongly used the old fp and fm values (i.e. reused previous points), which I did not find any justification for and I seriously doubt is okay since it may affect the result and the error estimation:

      if (1+r == 1 || r == 1)
        break;
      x = d*r;
      fp = f(a+x);
      fm = f(b-x);


This prevents x from getting too close to the endpoints a and b. This change takes the relative distance of the proximity of x to a and b into account, i.e. scaled with the size of the integration domain d=(b-a)/2, note that we have x=d*r.

Option 2: the first option can still fail if d<<1 and both a and b are large, i.e when |a-b|<<1 and a,b>>1 (or when c>>1 center). So:

      if (d+r == d || r == 1)
        break;
      x = d*r;
      if (c+x == c)
        break;
      fp = f(a+x);
      fm = f(b-x);


These are two possible options to reduce the chances of hitting a and b too close.

b. The code I looked at lacks some optimizations to further speed up the code, especially when exp() is expensive to compute, e.g. in software on BASIC pocket computers and calculators.

We can apply two "loop strength reductions", which do not negatively affect precision. The first strength reduction I already applied to the BASIC program. It is a change from:

double h;
int k = 0;
...
do {
    double p = 0, q, v;
    int j = 1;
    h = pow(2, -k); // h=1,1/2,1/4,1/8,...
    do {
      double t = exp(h*j);
      ...
      j += 1+(k>0);
    } while (fabs(q) > fabs(p*eps));


to the strength reduced form to remove pow(2, -k):

double h = 2;
int k = 0;
...
do {
    double p = 0, q, v;
    h /= 2;
    do {
      double t = exp(h*j);
      ...
      j += 1+(k>0);
    } while (fabs(q) > fabs(p*eps));
...
} while (e >= tol && k <= n);
...
return d*s*h; // we need the final h here


Then we apply a second loop strength reduction by observing that exp(h*j)=exp(h)^j then strength-reduce the power ^j away to obtain:

double h = 2;
int k = 0;
...
do {
    double p = 0, q, v;
    double exph, exphj;
    h /= 2;
    exph = exp(h);
    exphj = exph;
    if (k > 0)
      exph *= exph
    do {
      double t = exphj;
      ...
      exphj *= exph;
    } while (fabs(q) > fabs(p*eps));
...
} while (e >= tol && k <= n);
...
return d*s*h; // we need the final h here


Note that exp(h) in the outer loop has up to k==6 or k==7 values, namely \( \{e, e^{1/2}, e^{1/4}, e^{1/8}, e^{1/16}, e^{1/32}, e^{1/64} \} \) and can be precomputed to speed this up even more.

I experimented with these changes to verify them and everything looks good. More testing is necessary and additional improvements might be possible. More to come...

- Rob

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
04-01-2021, 04:27 PM (This post was last modified: 04-02-2021 12:30 AM by robve.)
Post: #7
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
If anyone is interested, I uploaded a draft article here that observes and addresses the following issues in existing (said to be "the best") implementation of Tanh-Sinh:
- fix division by zero in e=fabs(v/s-1), which is not safe to execute, e.g. integrating f(x)=x-.5 causes floating point exception;
- fix underflow in the inner loop condition fabs(q)>fabs(p*eps), e.g. integrating f(x)=sin(pi*x*40) evaluates too many (315) points (versus 15);
- by comparison, Python mpmath Tanh-Sinh implementation's error estimation is not as reliable as can be;
- performance improvements to speed up qthsh Tanh-Sinh in C and BASIC, runs at least 10% faster on a pocket computer;
- permit ±inf singularities close to one or both of the endpoints.

This is work in progress and things may change, e.g. the number of functions tested so far is limited to 21 but those reveal some interesting pro/cons successes and failures of the method.

I hope this is useful to anyone. Any suggestions and comments are much appreciated.

- Rob

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
04-02-2021, 11:05 AM (This post was last modified: 04-02-2021 12:16 PM by emece67.)
Post: #8
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
Hi,

A question, the integration interval in your calculations, is always [0, 1]?

Regards.


EDIT: I answer myself: yes.
Find all posts by this user
Quote this message in a reply
04-02-2021, 02:46 PM (This post was last modified: 04-02-2021 07:35 PM by robve.)
Post: #9
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
(04-02-2021 11:05 AM)emece67 Wrote:  A question, the integration interval in your calculations, is always [0, 1]?

Yes. These are all definite Tanh-Sinh integrals. I have not looked at unbounded domains, for which Sinh-Sinh and Exp-Sinh are applicable. Functions that are typically integrated over non-[0,1] finite domains are normalized to the [0,1] domain in experiments. It is simpler that way to conduct the tests and for comparisons. By implication, this work also applies to non-[0,1] finite domains.

I've updated the draft article today with the latest improvements and corrections to Tanh-Sinh. Once this is done I will update the original post with the list of improvements and the final code in a couple of days.

- Rob

Edit: needed clarification: not updating the original post and code until the work is done in a couple of days

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
04-02-2021, 03:16 PM
Post: #10
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
https://www.genivia.com/files/qthsh.pdf, page 10 Wrote:mpmath: mp.dps=15, points=53, est.err=1e-14 (incorrect, actual is about 1e-9)

I was unable to produce above result, for ∫(x**3 - 2x*x + x, x = 0 .. 1)

>>> def f(x): f.n += 1; return x*x*x-2*x*x+x
...
>>> f.n = 0
>>> quadts(f, [0,1], error=True), f.n
((mpf('0.083333333333333329'), mpf('1.0e-22')), 53)

Integral look too simple to trip Tanh-Sinh quadrature.
With Gauss-Legendre quadrature, it took even less points.

>>> f.n = 0
>>> quadgl(f, [0,1], error=True), f.n
((mpf('0.083333333333333329'), mpf('9.26442286059391e-23')), 9)

Both algorithms return correct quadrature, 1/4 - 2/3 + 1/2 = 1/12
Find all posts by this user
Quote this message in a reply
04-02-2021, 03:40 PM
Post: #11
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
(04-02-2021 03:16 PM)Albert Chan Wrote:  
https://www.genivia.com/files/qthsh.pdf, page 10 Wrote:mpmath: mp.dps=15, points=53, est.err=1e-14 (incorrect, actual is about 1e-9)

I was unable to produce above result, for ∫(x**3 - 2x*x + x, x = 0 .. 1)

See the number in the table in the article: (53,1E-14#) which is about the same what you found. I neglected to update the title with the correct number. The relative error is about 1E-17~1E-16 to be fair.

The mpmath error estimation is a bit of a mystery to me, given the numbers and the way it is estimated in the code. It seems too high or too low in several cases.

- Rob

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
04-02-2021, 04:20 PM
Post: #12
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
(04-02-2021 02:46 PM)robve Wrote:  I've updated the draft article today with the latest improvements and corrections to Tanh-Sinh. Once this is done I will update the original post with the list of improvements and the final code.

I'll also post later my original Python code (from where I later derived the wp-34s code).

Then I used a battery of 140 integrals (now 161, after adding your 21 functions) for tests. Maybe this set is useful to you.

Regards.
Find all posts by this user
Quote this message in a reply
04-02-2021, 05:04 PM
Post: #13
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
(04-02-2021 04:20 PM)emece67 Wrote:  Then I used a battery of 140 integrals (now 161, after adding your 21 functions) for tests. Maybe this set is useful to you.

Thanks for contributing. That would be nice to look at. Eventually I may run the 1200 integrals in the VB spreadsheet. However, it is not about the number of functions per se (though the more the better), the variety matters too, i.e. include non-holomorphic functions, periodic, step functions, etc. Testing these can help to determine if the error estimate is usable, not only accurate. Tanh-Sinh is also not "symmetric", meaning that integrating 1/sqrt(-log(x)) over [0,1] should be the same as integrating 1/sqrt(-log(1-x)). Except for mpmath, that is not the case with the VB article code and Boost Math due to the singularities occurring earlier that apparent to be fatal.

I've used the default mpmath Tanh-Sinh parameters with mp.dps=15 in order to compare to C/C++ double fp. Levels is 6 (default) and eps is 1e-9.

Some observations:

- mpmath Tanh-Sinh appears to run up to 427 evaluations, never more with these settings and often too many than necessary. Something might be amiss with the convergence test?

- mpmath Tanh-Sinh sometimes produces error estimates that are a mystery when compared to the other Tanh-Sinh implementations. I could be mistaken, but the error estimate is performed in the same way as Gausss-Legendre in the code. Both methods produce node sets which are used for the quadrature and error estimation.

- Boost Math Tanh-Sinh explodes the number of points close to the zero endpoint (that's also one reason to include the zero endpoint in the tests to compare behaviors, not only the integral and error estimates.) For example, f(x)=4/(1+x*x) produces 147 points with a large number close to the 0 and 1 endpoints. See my draft article's graph on this function with Boost Math, and a lot that are very close to the 0 endpoint:

1.69367010239342e-108
7.55408323550392e-123
4.13900004783521e-139
1.55080149097904e-157
2.04586734643622e-178
4.47908407247427e-202
6.93948438714987e-229
2.89628710646752e-259

Of course, we expect points to be distributed towards the endpoints with Tanh-Sinh, but this is far more than necessary. The weights there are minuscule and negligible.

PS. Just to clarify: I am not really biased toward any integration method. They all have strengths and weaknesses that can favor one over the other depending on what the goal is (e.g. CPU time versus accuracy). The best integration results are obtained by applying a transformation when necessary, change variable when helpful, consider integration domain splitting, and then select the method to apply that works best for the integrand. A usable error estimate is just as important (if not more) than the integration result, unless we seek a rough estimate (e.g. 1e-3) that is good enough.

- Rob

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
04-02-2021, 07:12 PM
Post: #14
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
(03-29-2021 07:42 PM)robve Wrote:  sqrt(x / (1 - x * x))

This an example of an integral that has all its area close to a non-zero endpoint, the problem here is that the function being integrated returns "garbage" values for x very close to 1. We can easily fix this issue by passing a 2 argument functor to the integrator: the second argument gives the distance to the nearest endpoint, and we can use that information to return accurate values, and thus fix the integral calculation.

Can you explain what that meant ? Perhaps an example ?

To avoid garbage of x close to 1, I would have rewrite the integrand, with infinity at x=0.
(y=1-x², dy = -2x dx, would solved it exactly. But, no cheating ...)

∫(x/√(1-x²), x=0..1) = ∫((1-x)/√(x*(2-x)), x=0..1)

With infinity moved, at x=0, we can add many, many more points.

Since e^x grow faster than x^n, we have e^-x shrink faster then x^-n, for finite n.

∫(f(x), x=0 .. 1) = ∫f(e^-y)*(e^-y), y=0 .. inf)

Code:
def e_transform(f, exp=exp):    # interval [0,1] -> [0,inf]
    def ef(z):
        try: return f(z)*z
        except: return 0
    return lambda y: ef(exp(-y))

>>> f = lambda x: (1-x)/sqrt(x*(2-x))
>>> ef = e_transform(f)
>>> quadts(f, [0, 1])
0.999999999624821
>>> quadts(ef, [0, inf])
1.0

Similar results, with Gauss-Legendre quadrature:

>>> quadgl(f, [0, 1])
0.993619917982059
>>> quadgl(ef, [0, inf])
1.0
Find all posts by this user
Quote this message in a reply
04-02-2021, 08:14 PM
Post: #15
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
(04-02-2021 07:12 PM)Albert Chan Wrote:  
(03-29-2021 07:42 PM)robve Wrote:  sqrt(x / (1 - x * x))

This an example of an integral that has all its area close to a non-zero endpoint, the problem here is that the function being integrated returns "garbage" values for x very close to 1. We can easily fix this issue by passing a 2 argument functor to the integrator: the second argument gives the distance to the nearest endpoint, and we can use that information to return accurate values, and thus fix the integral calculation.

Can you explain what that meant ? Perhaps an example ?

What is meant is that when x is close to 1 so-called "catastrophic cancellation" occurs with loss of precision. Tanh-Sinh accumulates inaccurate values at this point.

I picked two Boost Math Tanh-Sinh points very close to 1 for this function to illustrate:

x=0.999999999999991 x^2=0.999999999999982 1-x^2=1.77635683940025e-14
x=0.999999999999999 x^2=0.999999999999999 1-x^2=1.33226762955019e-15

The first point for example has about 3~4 digits remaining in IEEE 754 double precision after subtraction 1-x2 with x2= 0.999999999999982, so roughly 1.77e-14 < 1-x^2 < 1.78e-14. This means that x/(1-x*x)) is accurate to 3~4 digits while being fairly large 5.6e13. This happens to a lot of points.

The "garbage" is not my wording, see Boost Tanh-Sinh. There could be a problem if the number of points integrated are mostly in the neighborhood of 1 for this function. Thinking that there are better examples to demonstrate this.

- Rob

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
04-02-2021, 09:07 PM
Post: #16
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
I also noticed other issues in the VB code in the Article, which I have not mentioned in my writeup, such as computing the relative error with:
errval = Abs(2 * sslast / ss - 1)
However, the distributive law does not apply to floating point arithmetic. This should be:
errval = Abs((2 * sslast - ss) / ss)
When 2*sslast ~ ss this is important.

However, this is a bit pointless to say, because both pieces of code may cause division by zero when the updated quadrature sum ss is zero. A function such as f(x)=x-.5 over [0,1] fails to integrate. It can get worse, because a division by zero produces errval=NaN. NaN is then compared to a threshold value. Comparisons to NaN are always false, even the test x==x is false, which is a neat way to detect that x==NaN by the way. NaN comparisons in loop conditions can lead to early termination (probably good) or non-termination (probably bad). In the VB code there is a WHILE instead of an UNTIL so NaN is "good" instead of "bad":
Loop While errval >= tol And k <= maxlevel

These kind of issues happen more often than we realize (so I'm hopefully not embarrassing anyone to point this out, because it happens a lot). Because FP is generally hard to use, a while ago I wrote a couple of lecture notes on the subject for graduate students. It's the usual fare on FP and SIMD and there is much more that I would like to add, in particular the reading material "what every computer scientist should know about floating point arithmetic".

If you're in to this kind of low level detail and there is not much to do because of the pandemic, then perhaps this is a text to get excited about!

- Rob

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
04-03-2021, 01:40 AM (This post was last modified: 04-03-2021 01:14 PM by Albert Chan.)
Post: #17
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
(04-02-2021 02:46 PM)robve Wrote:  Functions that are typically integrated over non-[0,1] finite domains are normalized to the [0,1] domain in experiments.
It is simpler that way to conduct the tests and for comparisons.

Here is a link to convert integral limits to [0, 1]: http://fmnt.info/blog/20180818_infinite-integrals.html

Example, my previous post e-transform, without changing integral limits.
Code:
def e_transform2(f, exp=exp):  # interval [0,1] -> [0,1]
    def ef(z):
        try: return f(z)*z
        except: return 0
    return lambda t: ef(exp(1-1/t))/(t*t)

>>> f = lambda x: x**-0.99
>>> ef2 = e_transform2(f)

>>> quad(f, [0,1])
mpf('35.65470993432163')
>>> quad(ef2, [0,1])
mpf('99.999999999999915')

Comment: although convenient to have unit interval, integral may be harder to compute.
Conversion of [0,∞] -> [0,1], compress the curve, possibly making it spiky.

Cas> f(x) := x^-0.99
Cas> ef1(t) := f(e^-t)*e^-t;     // [0,1] -> [0,inf]
Cas> ef2(t) := ef1(1/t-1)/(t*t); // [0,inf] -> [0,1]

ef1(t) = exp(-0.01*t), no spike, thus easy to integrate.
ef2(t) has a spike at t = 0.005

Cas> ef2(0.007) → 4939.99121224
Cas> ef2(0.005) → 5467.81701782
Cas> ef2(0.003) → 4003.61366011
Cas> ef2(0.001) → undef
Cas> limit(e2(t), t=exact(0.001)) → 1000000*exp(98901/100)/exp(999)

Numerically, e2(0.001) will be hit with overflow, ∞/∞ → undef (*)

(*) mpmath does not overflow, nor underflow

>>> 1000000*exp(98901/100)/exp(999)
mpf('45.856206642206899')

Thus, mpmath can easily plot the shape of spike (Cas cannot)
>>> plot(ef2, [0, 0.01])
Find all posts by this user
Quote this message in a reply
04-03-2021, 03:17 AM (This post was last modified: 04-03-2021 01:13 PM by emece67.)
Post: #18
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
Hi,

These are the results for my Python script:
Code:
   TNFE err_r err_t
 1 0047 8e-12 0e+00
 2 0049 5e-12 2e-15
 3 0049 1e-08 8e-15
 4 0049 2e-13 2e-15
 5 0049 7e-12 3e-15
 6 0049 5e-08 6e-08
 7 0049 2e-12 2e-12
 8 0025 5e-07 2e-08
 9 0025 5e-07 2e-08
10 0197 6e-04 6e-03
11 0197 6e-04 5e-03
12 0049 8e-08 1e-07
13 0197 5e-04 4e-03
14 0025 4e-07 2e-08
15 0025 4e-07 2e-08
16 0137 2e-15 0e+00
17 0049 7e-10 1e-11
18 0049 2e-12 3e-15
19 0197 3e-03 2e-03
20 0197 1e-02 1e-02
21 0197 1e-02 1e-02

where err_r is the reported error and err_t is the true error. TNFE is the total number of function evaluations. The only adjustable parameter is mp.dps (set to 15, seems to work, at least, with values 8~128). It worked as (I) expected, except for functions 10~11... they hurt!

The script is suffering now from some refactoring, so I expect to upload it to GitHub in a few days.

Regards.


EDIT: results modified as a result of code refactoring.
Find all posts by this user
Quote this message in a reply
04-03-2021, 01:38 PM (This post was last modified: 02-02-2022 08:10 AM by emece67.)
Post: #19
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
My original Python code for the double-exponential quadrature is now on GitHub.

It has suffered some refactoring from its original shape (that I used to derive the wp34s code).

Regards.
Find all posts by this user
Quote this message in a reply
04-04-2021, 01:50 AM
Post: #20
RE: (PC-12xx~14xx) qthsh Tanh-Sinh quadrature
(04-03-2021 01:40 AM)Albert Chan Wrote:  
(04-02-2021 02:46 PM)robve Wrote:  Functions that are typically integrated over non-[0,1] finite domains are normalized to the [0,1] domain in experiments.
It is simpler that way to conduct the tests and for comparisons.

Here is a link to convert integral limits to [0, 1]: http://fmnt.info/blog/20180818_infinite-integrals.html

Very nice to know. Just for clarification: we're only considering definite integrals at this time with Tanh-Sinh. A simple linear transformation suffices to rescale the bounds. The same area is computed with the same function points, but via a transformation. No tricks, just easier to analyze the point distributions in a normalized [0,1] domain. For example, NR2ed p.134 is transformed:
$$ \int_0^2 x^4 \log(x+\sqrt{x^2+1})\,dx = \int_0^1 32u^4 \log(2u+\sqrt{4u^2+1})\,du $$

I've tested other integrals including the one above that are not included in the article, but several of these are not very interesting for Tanh-Sinh, e.g. trig and other transcendental functions. We already know that Tanh-Sinh converges with double exponential rate for holomorphic complex-differentiable functions bounded on the interior of the unit disk within the Hardy space. The point distribution makes this method more resilient to singularities at the endpoints and is especially good when most of the area is located to the endpoint(s), e.g. L- and U-shaped functions.

As some have said, this method might never reach the level of a true general-purpose integrator, but who knows...

It's interesting to analyze the implementation differences, apply code optimizations and fixes when applicable to public code. Perhaps we can find a way to make the method stronger when exceptions occur due to singularities that kill the method (thanks Albert for pointing this out!). The approach that might be best to do this is to interpolate/extrapolate, see the last part of the (draft) article: "The integral of function 15 is now the same as the integral of function 14 as it should be, up to the last digit 1.77245382409889. Both are the same for the same specified eps." Though anecdotal at this early stage, this corroborates the interpolation approach. Inverting the integration bounds should give the same integration result, which for Tanh-Sinh is NOT necessarily the case for domains such as [0,1].

I did not have time today to look at emece67 Python code and results yet. As always, suggestions and comments are much appreciated.

- Rob

"I count on old friends to remain rational"
Visit this user's website Find all posts by this user
Quote this message in a reply
Post Reply 




User(s) browsing this thread: 4 Guest(s)