\section{Numeric Package}
\begin{Introduction}{Numeric Package}
The numeric package supplies algorithms based on approximation
techniques of numerical mathematics. The algorithms use
the \nameref{rounded} mode arithmetic of REDUCE, including
the variable precision feature which is exploited in some
algorithms in an adaptive manner in order to reach the
desired accuracy.
\end{Introduction}
\begin{Type}{Interval}
Intervals are generally coded as lower bound and
upper bound connected by the operator \name{..}, usually
associated to a variable in an
equation.
\begin{Syntax}
\meta{var} = \(\meta{low} .. \meta{high}\)
\end{Syntax}
where \meta{var} is a \nameref{kernel} and \meta{low}, \meta{high} are
numbers or expression which evaluate to numbers with \meta{low}<=\meta{high}.
\begin{Examples}
x= (2.5 .. 3.5)
\end{Examples}
means that the variable x is taken in the range from 2.5 up to
3.5.
\end{Type}
\begin{Concept}{numeric accuracy}
The keyword parameters \name{accuracy=a} and \name{iterations=i},
where \name{a}and \name{i} must be positive integer numbers, control the
iterative algorithms: the iteration is continued until
the local error is below 10**{-a}; if that is impossible
within \name{i} steps, the iteration is terminated with an
error message. The values reached so far are then returned
as the result.
\end{Concept}
\begin{Switch}{TRNUMERIC}
Normally the algorithms produce only a minimum of printed
output during their operation. In cases of an unsuccessful
or unexpected long operation a \name{trace of the iteration} can be
printed by setting \name{trnumeric} \name{on}.
\end{Switch}
\begin{Operator}{num_min}
\index{minimum}\index{steepest descent}\index{Fletcher Reeves}
The Fletcher Reeves version of the \name{steepest descent}
algorithms is used to find the \name{minimum} of a
function of one or more variables. The
function must have continuous partial derivatives with respect to all
variables. The starting point of the search can be
specified; if not, random values are taken instead.
The steepest descent algorithms in general find only local
minima.
\begin{Syntax}
\name{num\_min}\(\meta{exp},
\meta{var}[=\meta{val}] [,\meta{var}[=\meta{val}] ...
[,accuracy=\meta{a}] [,iterations=\meta{i}]\)
or
\name{num\_min}\(exp, \{
\meta{var}[=\meta{val}] [,\meta{var}[=\meta{val}] ...] \}
[,accuracy=\meta{a}] [,iterations=\meta{i}]\)
\end{Syntax}
where \meta{exp} is a function expression,
\meta{var} are the variables in \meta{exp} and
\meta{val} are the (optional) start values.
For \meta{a} and \meta{i} see \nameref{numeric accuracy}.
\name{Num\_min} tries to find the next local minimum along the descending
path starting at the given point. The result is a \nameref{list}
with the minimum function value as first element followed by a list
of \nameref{equation}\name{s}, where the variables are equated to the coordinates
of the result point.
\begin{Examples}
num_min(sin(x)+x/5, x)&\{4.9489585606,\{X=29.643767785\}\}\\
num_min(sin(x)+x/5, x=0)&\{ - 1.3342267466,\{X= - 1.7721582671\}\}
\end{Examples}
\end{Operator}
\begin{Operator}{num_solve}
\index{equation solving}\index{equation system}\index{Newton iteration}
\index{root}
An adaptively damped Newton iteration is used to find
an approximative root of a function (function vector) or the
solution of an \nameref{equation} (equation system). The expressions
must have continuous derivatives for all variables.
A starting point for the iteration can be given. If not given
random values are taken instead. When the number of
forms is not equal to the number of variables, the
Newton method cannot be applied. Then the minimum
of the sum of absolute squares is located instead.
With \nameref{complex} on, solutions with imaginary parts can be
found, if either the expression(s) or the starting point
contain a nonzero imaginary part.
\begin{Syntax}
\name{num\_solve}\(\meta{exp}, \meta{var}[=\meta{val}][,accuracy=\meta{a}][,iterations=\meta{i}]\)
or
\name{num\_solve}\(\{\meta{exp},...,\meta{exp}\}, \meta{var}[=\meta{val}],...,\meta{var}[=\meta{val}]
[,accuracy=\meta{a}][,iterations=\meta{i}]\)
or
\name{num\_solve}\(\{\meta{exp},...,\meta{exp}\}, \{\meta{var}[=\meta{val}],...,\meta{var}[=\meta{val}]\}
[,accuracy=\meta{a}][,iterations=\meta{i}]\)
\end{Syntax}
where \meta{exp} are function expressions,
\meta{var} are the variables,
\meta{val} are optional start values.
For \meta{a} and \meta{i} see \nameref{numeric accuracy}.
\name{num_solve} tries to find a zero/solution of the expression(s).
Result is a list of equations, where the variables are
equated to the coordinates of the result point.
The \nameindex{Jacobian matrix} is stored as side effect the shared
variable \name{jacobian}.
\begin{Examples}
num_solve({sin x=cos y, x + y = 1},{x=1,y=2});&
\{X= - 1.8561957251,Y=2.856195584\}\\
jacobian;&
\begin{multilineoutput}{5cm}
[COS(X) SIN(Y)]
[ ]
[ 1 1 ]
\end{multilineoutput}
\end{Examples}
\end{Operator}
\begin{Operator}{num_int}
\index{integration}
For the numerical evaluation of univariate integrals
over a finite interval the following strategy is used:
If \nameref{int} finds a formal antiderivative
which is bounded in the integration interval, this
is evaluated and the end points and the difference
is returned.
Otherwise a \nameref{Chebyshev fit} is computed,
starting with order 20, eventually up to order 80.
If that is recognized as sufficiently convergent
it is used for computing the integral by directly
integrating the coefficient sequence.
If none of these methods is successful, an
adaptive multilevel quadrature algorithm is used.
For multivariate integrals only the adaptive quadrature is used.
This algorithm tolerates isolated singularities.
The value \name{iterations} here limits the number of
local interval intersection levels.
\meta{a} is a measure for the relative total discretization
error (comparison of order 1 and order 2 approximations).
\begin{Syntax}
\name{num_int}\(\meta{exp},\meta{var}=\(\meta{l} .. \meta{u}\)
[,\meta{var}=\(\meta{l} .. \meta{u}\),...]
[,accuracy=\meta{a}][,iterations=\meta{i}]\)
\end{Syntax}
where \meta{exp} is the function to be integrated,
\meta{var} are the integration variables,
\meta{l} are the lower bounds,
\meta{u} are the upper bounds.
Result is the value of the integral.
\begin{Examples}
num_int(sin x,x=(0 .. 3.1415926));& 2.0000010334
\end{Examples}
\end{Operator}
\begin{Operator}{num_odesolve}
\index{Runge-Kutta}\index{initial value problem}\index{ODE}
The \name{Runge-Kutta} method of order 3 finds an approximate graph for
the solution of real \name{ODE initial value problem}.
\begin{Syntax}
\name{num_odesolve}\(\meta{exp},\meta{depvar}=\meta{start},
\meta{indep}=\(\meta{from} .. \meta{to}\)
[,accuracy=\meta{a}][,iterations=\meta{i}]\)
or
\name{num_odesolve}\(\{\meta{exp},\meta{exp},...\},
\{ \meta{depvar}=\meta{start},\meta{depvar}=\meta{start},...\}
\meta{indep}=\(\meta{from} .. \meta{to}\)
[,accuracy=\meta{a}][,iterations=\meta{i}]\)
\end{Syntax}
where
\meta{depvar} and \meta{start} specify the dependent variable(s)
and the starting point value (vector),
\meta{indep}, \meta{from} and \meta{to} specify the independent variable
and the integration interval (starting point and end point),
\meta{exp} are equations or expressions which
contain the first derivative of the independent variable
with respect to the dependent variable.
The ODEs are converted to an explicit form, which then is
used for a Runge Kutta iteration over the given range. The
number of steps is controlled by the value of \meta{i}
(default: 20). If the steps are too coarse to reach the desired
accuracy in the neighborhood of the starting point, the number is
increased automatically.
Result is a list of pairs, each representing a point of the
approximate solution of the ODE problem.
\begin{Examples}
depend(y,x);\\
num_odesolve(df(y,x)=y,y=1,x=(0 .. 1), iterations=5);&
\begin{multilineoutput}
\{\{0.0,1.0\},\{0.2,1.2214\},\{0.4,1.49181796\},\{0.6,1.8221064563\},
\{0.8,2.2255208258\},\{1.0,2.7182511366\}\}\\
\end{multilineoutput}
\end{Examples}
In most cases you must declare the dependency relation
between the variables explicitly using \nameref{depend};
otherwise the formal derivative might be converted to zero.
The operator \nameref{solve} is used to convert the form into
an explicit ODE. If that process fails or if it has no unique result,
the evaluation is stopped with an error message.
\end{Operator}
\begin{Operator}{bounds}
Upper and lower bounds of a real valued function over an
\nameref{interval} or a rectangular multivariate domain are computed
by the operator \name{bounds}. The algorithmic basis is the computation
with inequalities: starting from the interval(s) of the
variables, the bounds are propagated in the expression
using the rules for inequality computation. Some knowledge
about the behavior of special functions like ABS, SIN, COS, EXP, LOG,
fractional exponentials etc. is integrated and can be evaluated
if the operator \name{bounds} is called with rounded mode on
(otherwise only algebraic evaluation rules are available).
If \name{bounds} finds a singularity within an interval, the evaluation
is stopped with an error message indicating the problem part
of the expression.
\begin{Syntax}
\name{bounds}\(\meta{exp},\meta{var}=\(\meta{l} .. \meta{u}\)
[,\meta{var}=\(\meta{l} .. \meta{u}\) ...]\)
or
\name{bounds}\(\meta{exp},\{\meta{var}=\(\meta{l} .. \meta{u}\)
[,\meta{var}=\(\meta{l} .. \meta{u}\) ...]\}\)
\end{Syntax}
where \meta{exp} is the function to be investigated,
\meta{var} are the variables of \meta{exp},
\meta{l} and \meta{u} specify the area as set of \nameref{interval}\name{s}.
\name{bounds} computes upper and lower bounds for the expression in the
given area. An \nameref{interval} is returned.
\begin{Examples}
bounds(sin x,x=(1 .. 2));& -1 .. 1\\
on rounded;\\
bounds(sin x,x=(1 .. 2));& 0.84147098481 .. 1\\
bounds(x**2+x,x=(-0.5 .. 0.5));& - 0.25 .. 0.75\\
\end{Examples}
\end{Operator}
\begin{Concept}{Chebyshev fit}
\index{approximation}
The operator family \name{Chebyshev\_...} implements approximation
and evaluation of functions by the Chebyshev method.
Let \name{T(n,a,b,x)} be the Chebyshev polynomial of order \name{n}
transformed to the interval \name{(a,b)}.
Then a function \name{f(x)} can be
approximated in \name{(a,b)} by a series
\begin{verbatim}
for i := 0:n sum c(i)*T(i,a,b,x)
\end{verbatim}
The operator \name{chebyshev\_fit} computes this approximation and
returns a list, which has as first element the sum expressed
as a polynomial and as second element the sequence
of Chebyshev coefficients.
\name{Chebyshev\_df} and \name{Chebyshev\_int} transform a Chebyshev
coefficient list into the coefficients of the corresponding
derivative or integral respectively. For evaluating a Chebyshev
approximation at a given point in the basic interval the
operator \name{Chebyshev\_eval} can be used.
\name{Chebyshev\_eval} is based on a recurrence relation which is
in general more stable than a direct evaluation of the
complete polynomial.
\begin{Syntax}
\name{chebyshev\_fit}\(\meta{fcn},\meta{var}=\(\meta{lo} .. \meta{hi}\),\meta{n}\)
\name{chebyshev\_eval}\(\meta{coeffs},\meta{var}=\(\meta{lo} .. \meta{hi}\),
\meta{var}=\meta{pt}\)
\name{chebyshev\_df}\(\meta{coeffs},\meta{var}=\(\meta{lo} .. \meta{hi}\)\)
\name{chebyshev\_int}\(\meta{coeffs},\meta{var}=\(\meta{lo} .. \meta{hi}\)\)
\end{Syntax}
where \meta{fcn} is an algebraic expression (the target function),
\meta{var} is the variable of \meta{fcn},
\meta{lo} and \meta{hi} are
numerical real values which describe an \nameref{interval} \meta{lo} < \meta{hi},
the integer \meta{n} is the approximation order (set to 20 if missing),
\meta{pt} is a number in the interval and \meta{coeffs} is
a series of Chebyshev coefficients.
\begin{Examples}
on rounded;\\
w:=chebyshev_fit(sin x/x,x=(1 .. 3),5);&
\begin{multilineoutput}{6cm}
w := \{0.03824*x^3 - 0.2398*x^2 + 0.06514*x + 0.9778,
\{0.8991,-0.4066,-0.005198,0.009464,-0.00009511\}\}
\end{multilineoutput}\\
chebyshev_eval(second w, x=(1 .. 3), x=2.1);& 0.4111\\
\end{Examples}
\end{Concept}
\begin{Operator}{num_fit}
\index{approximation}\index{least squares}
The operator \name{num_fit} finds for a set of
points the linear combination of a given set of
functions (function basis) which approximates the
points best under the objective of the \name{least squares}
criterion (minimum of the sum of the squares of the deviation).
The solution is found as zero of the
gradient vector of the sum of squared errors.
\begin{Syntax}
\name{num_fit}\(\meta{vals},\meta{basis},\meta{var}=\meta{pts}\)
\end{Syntax}
where \meta{vals} is a list of numeric values,
\meta{var} is a variable used for the approximation,
\meta{pts} is a list of coordinate values which correspond to
\meta{var},
\meta{basis} is a set of functions varying in \name{var} which is used
for the approximation.
The result is a list containing as first element the
function which approximates the given values, and as
second element a list of coefficients which were used
to build this function from the basis.
\begin{Examples}
pts:=for i:=1 step 1 until 5 collect i$\\
vals:=for i:=1 step 1 until 5 collect\\
for j:=1:i product j$\\
num_fit(vals,{1,x,x**2},x=pts);&
\begin{multilineoutput}{6cm}
\{14.571428571*X^2 - 61.428571429*X + 54.6,\{54.6,
- 61.428571429,14.571428571\}\}
\end{multilineoutput}
\end{Examples}
\end{Operator}