Skip to article frontmatterSkip to article content

Chapter 9

Functions

Barycentric polynomial interpolation
polyinterp.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
"""
    polyinterp(t, y)

Construct a callable polynomial interpolant through the points in
vectors `t`, `y` using the barycentric interpolation formula.
"""
function polyinterp(t, y)
    n = length(t) - 1
    C = (t[n+1] - t[1]) / 4           # scaling factor to ensure stability
    tc = t / C

    # Adding one node at a time, compute inverses of the weights.
    ω = ones(n+1)
    for m in 0:n-1
        d = tc[1:m+1] .- tc[m+2]    # vector of node differences
        @. ω[1:m+1] *= d            # update previous
        ω[m+2] = prod(-d)         # compute the new one
    end
    w = 1 ./ ω                      # go from inverses to weights

    # This function evaluates the interpolant at given x.
    p = function (x)
        Δ = x .- t
        if any(iszero.(Δ))     # we're at a node exactly
            # return the node's data value
            idx = findfirst(iszero.(Δ))
            f = y[idx]
        else
            terms = w ./ Δ
            f = sum(y .* terms) / sum(terms)
        end
    end
    return p
end
Trigonometric interpolation
triginterp.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
"""
    triginterp(t, y)

Construct the trigonometric interpolant for the points defined by
vectors `t` and `y`.
"""
function triginterp(t, y)
    N = length(t)

    τ(x) =
        if x == 0
            return 1.0
        else
            denom = isodd(N) ? N * sin(π * x / 2) : N * tan(π * x / 2)
            return sin(N * π * x / 2) / denom
        end

    return function (x)
        return sum(y[k] * τ(x - t[k]) for k in eachindex(y))
    end
end
Clenshaw–Curtis integration
ccint.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
"""
    ccint(f, n)

Perform Clenshaw-Curtis integration for the function `f` on `n`+1
nodes in [-1,1]. Returns the integral estimate and a vector of the
nodes used. Note: `n` must be even.
"""
function ccint(f, n)
    @assert iseven(n) "Value of `n` must be an even integer."
    # Find Chebyshev extreme nodes.
    θ = [i * π / n for i in 0:n]
    x = -cos.(θ)

    # Compute the C-C weights.
    c = similar(θ)
    c[[1, n+1]] .= 1 / (n^2 - 1)
    s = sum(cos.(2k * θ[2:n]) / (4k^2 - 1) for k in 1:n/2-1)
    v = @. 1 - 2s - cos(n * θ[2:n]) / (n^2 - 1)
    c[2:n] = 2v / n

    # Evaluate integrand and integral.
    I = dot(c, f.(x))   # vector inner product
    return I, x
end
Gauss–Legendre integration
glint.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
"""
    glint(f, n)

Perform Gauss-Legendre integration for the function `f` on `n` nodes
in (-1,1). Returns the integral estimate and a vector of the nodes used.
"""
function glint(f, n)
    # Nodes and weights are found via a tridiagonal eigenvalue problem.
    β = @. 0.5 / sqrt(1 - (2 * (1:n-1))^(-2))
    T = diagm(-1 => β, 1 => β)
    λ, V = eigen(T)
    p = sortperm(λ)
    x = λ[p]               # nodes
    c = @. 2V[1, p]^2       # weights

    # Evaluate the integrand and compute the integral.
    I = dot(c, f.(x))      # vector inner product
    return I, x
end
Integration over (,)(-\infty,\infty)
intinf.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
"""
    intinf(f, tol)

Perform adaptive doubly-exponential integration of function `f`
over (-Inf,Inf), with error tolerance `tol`. Returns the integral
estimate and a vector of the nodes used.
"""
function intinf(f, tol)
    x = t -> sinh(sinh(t))
    dx_dt = t -> cosh(t) * cosh(sinh(t))
    g = t -> f(x(t)) * dx_dt(t)

    # Find where to truncate the integration interval.
    M = 3
    while (abs(g(-M)) > tol / 100) || (abs(g(M)) > tol / 100)
        M += 0.5
        if isinf(x(M))
            @warn "Function may not decay fast enough."
            M -= 0.5
            break
        end
    end

    I, t = intadapt(g, -M, M, tol)
    return I, x.(t)
end
Integration with endpoint singularities
intsing.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
"""
    intsing(f, tol)

Adaptively integrate function `f` over (0,1), where `f` may be
singular at zero, with error tolerance `tol`. Returns the
integral estimate and a vector of the nodes used.
"""
function intsing(f, tol)
    x = t -> 2 / (1 + exp(2sinh(t)))
    dx_dt = t -> cosh(t) / cosh(sinh(t))^2
    g = t -> f(x(t)) * dx_dt(t)

    # Find where to truncate the integration interval.
    M = 3
    while abs(g(M)) > tol / 100
        M += 0.5
        if iszero(x(M))
            @warn "Function may grow too rapidly."
            M -= 0.5
            break
        end
    end

    I, t = intadapt(g, 0, M, tol)
    return I, x.(t)
end

Examples

9.1 Polynomial interpolation

Example 9.1.1

Here is a vector of nodes.

t = [ 1, 1.5, 2, 2.25, 2.75, 3 ]
n = length(t) - 1;

Let’s apply the definition of the cardinal Lagrange polynomial for k=2k=2. First we define a polynomial qq that is zero at all the nodes except i=ki=k. Then 2\ell_2 is found by normalizing qq by q(tk)q(t_k).

k = 2
q(x) = prod(x - t[i] for i in [0:k-1; k+1:n] .+ 1)
ℓₖ(x) = q(x) / q(t[k+1]);

A plot confirms the cardinal property of the result.

using Plots
plot(ℓₖ, 1, 3)
y = zeros(n+1);  y[k+1] = 1
scatter!(t, y, color=:black,
    xaxis=(L"x"),  yaxis=(L"\ell_2(x)"),
    title="Lagrange cardinal function")
Loading...

Observe that k\ell_k is not between zero and one everywhere, unlike a hat function.

Example 9.1.3

Consider the problem of interpolating log(x)\log(x) at these nodes:

t =  [ 1, 1.6, 1.9, 2.7, 3 ]
n = length(t) - 1;

Here n=4n=4 and f(5)(ξ)=4!/ξ5f^{(5)}(\xi) = 4!/\xi^5. For ξ[1,3]\xi\in[1,3] we can say that f(5)(ξ)4!|f^{(5)}(\xi)| \le 4!. Hence

f(x)p(x)15Φ(x).|f(x)-p(x)| \le \frac{1}{5} \left| \Phi(x) \right|.
using Polynomials
Φ(x) = prod(x - tᵢ for tᵢ in t)
plot(x -> 0.2 * abs(Φ(x)), 1, 3, label=L"\frac{1}{5}|\Phi(t)|")
p = Polynomials.fit(t, log.(t))
plot!(t -> abs(log(t) - p(t)), 1, 3, label=L"|f(x)-p(x)|")
scatter!(t, zeros(size(t)), color=:black,
    xaxis=(L"x"), title="Interpolation error and upper bound")
Loading...

The error is zero at the nodes, by the definition of interpolation. The error bound, as well as the error itself, has one local maximum between each consecutive pair of nodes.

9.2 The barycentric formula

Example 9.2.2
using Plots
f(x) = sin(exp(2x))
plot(f, 0, 1, label="function", legend=:bottomleft)
Loading...
t = (0:3) / 3
y = f.(t)
scatter!(t, y, color=:black, label="nodes")
Loading...
p = FNC.polyinterp(t, y)
plot!(p, 0, 1, label="interpolant", title="Interpolation on 4 nodes")
Loading...

The curves must intersect at the interpolation nodes. For n=6n=6 the interpolant is noticeably better.

plot(f, 0, 1, label="function", legend=:bottomleft)
t = (0:6) / 6
y = f.(t)
p = FNC.polyinterp(t, y)
scatter!(t, y, color=:black, label="nodes")
plot!(p, 0, 1, label="interpolant", title="Interpolation on 7 nodes")
Loading...

9.3 Stability of polynomial interpolation

Example 9.3.1

We choose a function over the interval [0,1][0,1].

f(x) = sin(exp(2x));

Here is a graph of ff and its polynomial interpolant using seven equally spaced nodes.

using Plots
plot(f, 0, 1, label="function", legend=:bottomleft)
t = range(0, 1, 7)    # 7 equally spaced nodes
y = f.(t)
scatter!(t, y, label="nodes")

p = FNC.polyinterp(t, y)
plot!(p, 0, 1, label="interpolant", title="Equispaced interpolant, n=6")
Loading...

This looks pretty good. We want to track the behavior of the error as nn increases. We will estimate the error in the continuous interpolant by sampling it at a large number of points and taking the max-norm.

n = 5:5:60
err = zeros(size(n))
x = range(0, 1, 2001)             # for measuring error
for (k, n) in enumerate(n)
    t = range(0, 1, n+1)          # equally spaced nodes
    y = f.(t)                     # interpolation data
    p = FNC.polyinterp(t, y)
    err[k] = norm((@. f(x) - p(x)), Inf)
end
plot(n, err, m=:o, 
    xaxis=(L"n"), yaxis=(:log10, "max error"),
    title="Interpolation error for equispaced nodes")
Loading...

The error initially decreases as one would expect but then begins to grow. Both phases occur at rates that are exponential in nn, i.e., O(KnO(K^n) for a constant KK, appearing linear on a semi-log plot.

Example 9.3.2

We plot Φ(x)|\Phi(x)| over the interval [1,1][-1,1] with equispaced nodes for different values of nn.

plot(xaxis=(L"x"), yaxis=(:log10, L"|\Phi(x)|", [1e-25, 1]), legend=:bottomleft)
x = range(-1, 1, 2001)
for n in 10:10:50
    t = range(-1, 1, n+1)
    Φ(x) = prod(x - t for t in t)
    scatter!(x, abs.(Φ.(x)), m=(1, stroke(0)), label="n=$n")
end
title!("Error indicator for equispaced nodes")
Loading...

Each time Φ passes through zero at an interpolation node, the value on the log scale should go to -\infty, which explains the numerous cusps on the curves.

Example 9.3.3

This function has infinitely many continuous derivatives on the entire real line and looks easy to approximate over [1,1][-1,1].

f(x) = 1 / (x^2 + 16)
plot(f, -1, 1, title="Test function", legend=:none)
Loading...

We start by doing equispaced polynomial interpolation for some small values of nn.

plot(xaxis=(L"x"), yaxis=(:log10, L"|f(x)-p(x)|", [1e-20, 1]))
x = range(-1, 1, 2501)
n = 4:4:12
for (k, n) in enumerate(n)
    t = range(-1, 1, n+1)           # equally spaced nodes
    y = f.(t)                       # interpolation data
    p = FNC.polyinterp(t, y)
    err = @. abs(f(x) - p(x))
    plot!(x, err, m=(1, :o, stroke(0)), label="degree $n")
end
title!("Error for low degrees")
Loading...

The convergence so far appears rather good, though not uniformly so. However, notice what happens as we continue to increase the degree.

n = @. 12 + 15 * (1:3)
plot(xaxis=(L"x"), yaxis=(:log10, L"|f(x)-p(x)|", [1e-20, 1]))
for (k, n) in enumerate(n)
    t = range(-1, 1, n+1)           # equally spaced nodes
    y = f.(t)                       # interpolation data
    p = FNC.polyinterp(t, y)
    err = @. abs(f(x) - p(x))
    plot!(x, err, m=(1, :o, stroke(0)), label="degree $n")
end
title!("Error for higher degrees")
Loading...

The convergence in the middle can’t get any better than machine precision relative to the function values. So maintaining the growing gap between the center and the ends pushes the error curves upward exponentially fast at the ends, wrecking the convergence.

Example 9.3.4

Now we look at the error indicator function Φ for Chebyshev node sets.

plot(xaxis=(L"x"), yaxis=(:log10, L"|\Phi(x)|", [1e-18, 1e-2]))
x = range(-1, 1, 2001)
for n in 10:10:50
    t = [-cospi(k / n) for k in 0:n]
    Φ(x) = prod(x - t for t in t)
    plot!(x, abs.(Φ.(x)), m=(1, :o, stroke(0)), label="n=$n")
end
title!("Error indicator for Chebyshev nodes")
Loading...

In contrast to the equispaced case, Φ|\Phi| decreases exponentially with nn almost uniformly across the interval.

Example 9.3.5

Here again is the function from Demo 9.3.3 that provoked the Runge phenomenon when using equispaced nodes.

f(x) = 1 / (x^2 + 16);
plot(label="", xaxis=(L"x"), yaxis=(:log10, L"|f(x)-p(x)|", [1e-20, 1]))
x = range(-1, 1, 2001)
for (k, n) in enumerate([4, 10, 16, 40])
    t = [-cospi(k / n) for k in 0:n]
    y = f.(t)                           # interpolation data
    p = FNC.polyinterp(t, y)
    err = @. abs(f(x) - p(x))
    plot!(x, err, m=(1, :o, stroke(0)), label="degree $n")
end
title!("Error for Chebyshev interpolants")
Loading...

By degree 16 the error is uniformly within machine epsilon, and, importantly, it stays there as nn increases. Note that as predicted by the error indicator function, the error is uniform over the interval at each value of nn.

Example 9.3.6
using Logging
disable_logging(Logging.Warn);

On the left, we use a log-log scale, which makes second-order algebraic convergence O(n4)O(n^{-4}) a straight line. On the right, we use a log-linear scale, which makes spectral convergence O(Kn)O(K^{-n}) linear.

n = 20:20:400
algebraic = @. 100 / n^4
spectral = @. 10 * 0.85^n
plot(n, [algebraic spectral], layout=(1, 2), subplot=1,
    xaxis=(L"n", :log10),  yaxis=(:log10, (1e-15, 1)),
    label=["algebraic" "spectral"],  title="Log-log")
plot!(n, [algebraic spectral], subplot=2,
    xaxis=L"n",  yaxis=(:log10, (1e-15, 1)),
    label=["algebraic" "spectral"],  title="log-linear")
Loading...

9.4 Orthogonal polynomials

Example 9.4.1

Let’s approximate exe^x over the interval [1,1][−1,1]. We can sample it at, say, 15 points, and find the best-fitting straight line to that data.

using Plots
plot(exp, -1, 1, label="function")
t = range(-1, 1, 15)
y = exp.(t)
V = [ti^j for ti in t, j in 0:1]  # Vandermonde-ish
c = V \ y
plot!(t -> c[1] + c[2] * t, -1, 1;
    label="linear fit for 15 points", legend=:bottomright,
    xaxis=("x"),  yaxis=("value"),
    title="Least-squares fit of exp(x)")
Loading...

There’s nothing special about 20 points. Choosing more doesn’t change the result much.

t = range(-1, 1, 150)
y = exp.(t)
V = [ ti^j for ti in t, j=0:1 ]
c = V \ y
plot!(t -> c[1] + c[2]*t, -1, 1,
    label="linear fit for 150 points",  legend=:bottomright,
    xaxis=("x"),  yaxis=("value"),
    title="Least-squares fit of exp(x)")
Loading...

This situation is unlike interpolation, where the degree of the interpolant increases with the number of nodes. Here, the linear fit is apparently approaching a limit that we may think of as a continuous least-squares fit.

n = 40:60:400
slope = zeros(size(n))
intercept = zeros(size(n))

for (k, n) in enumerate(n)
    t = range(-1, 1, n)
    y = exp.(t)
    V = [ ti^j for ti in t, j in 0:1 ]
    c = V \ y
    intercept[k], slope[k] = c
end

labels = ["n", "intercept", "slope"]
@pt :header=labels, [n intercept slope]

9.5 Trigonometric interpolation

Example 9.5.1

We will get a cardinal function without using an explicit formula, just by passing data that is 1 at one node and 0 at the others.

using Plots
N = 7
n = (N - 1) ÷ 2
t = 2 * (-n:n) / N
y = zeros(N)
y[n+1] = 1

p = FNC.triginterp(t, y);
plot(p, -1, 1)

scatter!(t, y, color=:black, 
    xaxis=(L"x"),  yaxis=(L"\tau(x)"),
    title="Trig cardinal function, N=$N")
Loading...

Here is a 2-periodic function and one of its interpolants.

f(x) = exp( sinpi(x) - 2*cospi(x) )
y = f.(t)
p = FNC.triginterp(t, y)

plot(f, -1, 1, label="function",
    xaxis=(L"x"),  yaxis=(L"p(x)"),
    title="Trig interpolation, N=$N", legend=:top)
scatter!(t, y, m=:o, color=:black, label="nodes")
plot!(p, -1, 1, label="interpolant")
Loading...

The convergence of the interpolant is spectral. We let NN go needlessly large here in order to demonstrate that unlike polynomials, trigonometric interpolation is stable on equally spaced nodes. Note that when NN is even, the value of nn is not an integer but works fine for defining the nodes.

N = 2:2:60
err = zeros(size(N))
x = range(-1, 1, 2501)  # for measuring error
for (k,N) in enumerate(N)
    n = (N-1) / 2;   t = 2*(-n:n) / N;
    p = FNC.triginterp(t, f.(t))
    err[k] = norm(f.(x) - p.(x), Inf)
end

plot(N, err, m=:o,
    xaxis=(L"N"),  yaxis=(:log10, "max error"),
    title="Convergence of trig interpolation")
Loading...
Example 9.5.2

This function has frequency content at 2π2\pi, 2π-2\pi, and π.

f(x) = 3 * cospi(2x) - cispi(x)    # cispi(x) := exp(1im * π * x)
f (generic function with 1 method)

To use fft, we set up nodes in the interval [0,2)[0,2).

n = 4;  N = 2n+1;
t = [ 2j / N for j in 0:N-1 ]      # nodes in [0,2)
y = f.(t);

We perform Fourier analysis using fft and then examine the resulting coefficients.

using FFTW
c = fft(y) / N
freq = [0:n; -n:-1]
@pt :header=["k", "coefficient"] [freq round.(c, sigdigits=5)]
Loading...

Note that 1.5e2iπx+1.5e2iπx=3cos(2πx)1.5 e^{2i\pi x}+1.5 e^{-2i\pi x} = 3 \cos(2\pi x), so this result is sensible.

Fourier’s greatest contribution to mathematics was to point out that every periodic function is just a combination of frequencies—infinitely many of them in general, but truncated for computational use. Here we look at the magnitudes of the coefficients for f(x)=exp(sin(πx))f(x) = \exp( \sin(\pi x) ).

f(x) = exp( sin(pi*x) )     # content at all frequencies
n = 9;  N = 2n+1;
t = [ 2j / N for j in 0:N-1 ]      # nodes in [0,2)
c = fft(f.(t)) / N

freq = [0:n; -n:-1]
scatter(freq, abs.(c);
    xaxis=(L"k", [-n, n]),  yaxis=(L"|c_k|", :log10), 
    title="Fourier coefficients",  legend=:none)
Loading...

The Fourier coefficients of smooth functions decay exponentially in magnitude as a function of the frequency. This decay rate is determines the convergence of the interpolation error.

9.6 Spectrally accurate integration

Example 9.6.1
f(t) = π * sqrt( cospi(t)^2 + sinpi(t)^2 / 4 );
n = 4:4:48
perim = zeros(size(n))
for (k, n) in enumerate(n)
    h = 2 / n
    t = @. h * (0:n-1) - 1
    perim[k] = h * sum(f.(t))
end
err = @. abs(perim - perim[end])    # use last value as "exact"
@ptconf formatters=ft_printf(["%d", "%.15f", "%.2e"], 1:3)
@pt :header=["n", "perimeter", "error estimate"] [n perim err][1:end-1, :]
Loading...

The approximations gain about one digit of accuracy for each constant increment of nn, which is consistent with spectral convergence.

Example 9.6.3

First consider the integral

1111+4x2dx=arctan(2).\int_{-1}^1 \frac{1}{1+4x^2} \, dx = \arctan(2).
f(x)= 1 / (1 + 4x^2);
exact = atan(2);

We compare the two spectral integration methods for a range of nn values.

using Plots
n = 8:4:96
err = zeros(length(n), 2)
for (k, n) in enumerate(n)
  err[k, 1] = abs(exact - FNC.ccint(f, n)[1])
  err[k, 2] = abs(exact - FNC.glint(f, n)[1])
end

err[iszero.(err)] .= NaN    # remove from log-scale plot
plot(n, err, m=:o, label=["CC" "GL"],
    xaxis=("number of nodes"),  yaxis=(:log10, "error", [1e-16, 1]), 
    title="Spectral integration")
Loading...

(The missing dots are where the error is exactly zero.) Gauss–Legendre does converge faster here, but at something less than twice the rate.

Now we try a more sharply peaked integrand:

1111+16x2dx=12arctan(4).\int_{-1}^1 \frac{1}{1+16x^2} \, dx = \frac{1}{2}\arctan(4).
f(x) = 1 / (1 + 16x^2);
exact = atan(4) / 2;
n = 8:4:96
err = zeros(length(n), 2)
for (k,n) in enumerate(n)
  err[k, 1] = abs(exact - FNC.ccint(f, n)[1])
  err[k, 2] = abs(exact - FNC.glint(f, n)[1])
end

err[iszero.(err)] .= NaN    # remove from log-scale plot
plot(n, err, m=:o, label=["CC" "GL"],
    xaxis=("number of nodes"),  yaxis=(:log10, "error", [1e-16, 1]), 
    title="Spectral integration")
Loading...

The two are very close until about n=40n=40, when the Clenshaw–Curtis method slows down.

Now let’s compare the spectral performance to that of our earlier adaptive method in intadapt. We will specify varying error tolerances and record the error as well as the total number of evaluations of ff.

tol = 10 .^(-2.0:-2:-14)
n = zeros(size(tol))  
errAdapt = zeros(size(tol))
for (k, tol) in enumerate(tol)
  Q, t = FNC.intadapt(f, -1, 1, tol)
  errAdapt[k] = abs(exact - Q)
  n[k] = length(t)
end

errAdapt[iszero.(errAdapt)] .= NaN
plot!(n, errAdapt, m=:o, label="intadapt")
plot!(n, n.^(-4), l=:dash, label="4th order",
        xaxis=(:log10),  title="Spectral vs 4th order" )
Loading...

At the core of intadapt is a fourth-order formula, and the results track that rate closely. For all but the most relaxed error tolerances, both spectral methods are far more efficient than the low-order counterpart. For other integrands, particularly those that vary nonuniformly across the interval, the adaptive method might be more competitive.

9.7 Improper integrals

Example 9.7.2
using Plots
f(x) = 1 / (1 + x^2)
plot(f, -4, 4, layout=(2, 1),
    xlabel=L"x", 
    yaxis=(:log10, L"f(x)", (1e-16, 2)),
    title="Original integrand")

ξ(t) = sinh( π * sinh(t) / 2 )
dξ_dt(t) = π/2 * cosh(t) * cosh(π * sinh(t) / 2)
g(t) = f(ξ(t)) * dξ_dt(t)

plot!(g,-4, 4, subplot=2,
    xlabel=L"t",
    yaxis=(:log10, L"f(x(t))\cdot x'(t)", (1e-16, 2)),
    title="Transformed integrand")
Loading...

This graph suggests that we capture all of the integrand values that are larger than machine epsilon by integrating in tt from -4 to 4.

Example 9.7.3
f(x) = 1 / (1 + x^2)
tol = [1 / 10^d for d in 5:0.5:14]
err = zeros(length(tol), 2)
len = zeros(Int, length(tol), 2)
for (i, tol) in enumerate(tol)
    I1, x1 = FNC.intadapt(f, -2/tol, 2/tol, tol)
    I2, x2 = FNC.intinf(f, tol)
    @. err[i,:] = abs(π - [I1, I2])
    @. len[i,:] = length([x1, x2])
end
plot(len, err, m=:o, label=["direct" "double exponential"])
n = [100, 10000]
plot!(n, 1000n.^(-4), 
    color=:black,  l=:dash,
    label="fourth-order",  legend=:bottomleft,
    xaxis=(:log10, "number of nodes"), 
    yaxis=(:log10, "error"),
    title="Comparison of integration methods")
Loading...

Both methods are roughly fourth-order due to Simpson’s formula in the underlying adaptive integration method. At equal numbers of evaluation nodes, however, the double exponential method is consistently 2--3 orders of magnitude more accurate.

Example 9.7.4
f(x) = 1 / (10 * sqrt(x))
tol = [1 / 10^d for d in 5:0.5:14]
err = zeros(length(tol), 2)
len = zeros(Int, length(tol), 2)
for (i, tol) in enumerate(tol)
    I1, x1 = FNC.intadapt(f, (tol/20)^2, 1, tol)
    I2, x2 = FNC.intsing(f, tol)
    @. err[i, :] = abs(0.2 - [I1, I2])
    @. len[i, :] = length([x1, x2])
end
plot(len, err, m=:o, label=["direct" "double exponential"])
n = [30, 3000]
plot!(n, 30n.^(-4);
    color=:black,  l=:dash,
    label="fourth-order",  legend=:bottomleft,
    xaxis=(:log10, "number of nodes"),
    yaxis=(:log10, "error"),
    title="Comparison of integration methods")
Loading...

As in Demo 9.7.3, the double exponential method is more accurate than direct integration by a few orders of magnitude. Equivalently, the same accuracy can be reached with many fewer nodes.