Artifact 671111c6683df7fea5f7fd621fef42b38443fb7c6c2cc271ee529a4570fd6406:
- Executable file
r38/doc/util/r38.tex
— part of check-in
[f2fda60abd]
at
2011-09-02 18:13:33
on branch master
— Some historical releases purely for archival purposes
git-svn-id: https://svn.code.sf.net/p/reduce-algebra/code/trunk/historical@1375 2bfe0521-f11c-4a00-b80e-6202646ff360 (user: arthurcnorman@users.sourceforge.net, size: 940421) [annotate] [blame] [check-ins using] [more...]
% The REDUCE User's Manual --- LaTeX version. % Codemist Version with additional material in the same volume % To create this manual, the following steps are recommended: % latex r38 % bibtex r38 % latex r38 % latex r38 % makeindex r38 % latex r38 % dvipdfm r38 %% Does not contain %% bibl.tex \documentclass[11pt,letterpaper]{book} \usepackage{makeidx} % \usepackage{times} \usepackage[dvipdfm]{graphicx} \usepackage[dvipdfm]{hyperref} \hyphenation{unique} \hyphenation{effect} \hyphenation{Stand-ard} \hyphenation{libr-ary} \hyphenation{direct-ory} \hyphenation{state-ment} \hyphenation{argu-ment} \hyphenation{oper-ators} \hyphenation{symb-olic} \hyphenation{needs} \hyphenation{GVARSLAST} \hyphenation{ODE-SOLVE} \hyphenation{hyper-geometric} \hyphenation{equat-ion} \hyphenation{equat-ions} \hyphenation{OFF} \hyphenation{Opt-ions} \hyphenation{execu-tion} \hyphenation{poly-nom-ials} \hyphenation{func-t-ions} \hyphenation{Inte-grals} \hyphenation{Stutt-gart} % More space in TOC requires this in book.sty %\def\l@section{\@dottedtocline{1}{1.5em}{2.8em}} %\def\l@subsection{\@dottedtocline{2}{4.3em}{3.2em}} %\def\l@subsubsection{\@dottedtocline{3}{7.5em}{4.2em}} %\def\l@paragraph{\@dottedtocline{4}{10.5em}{5em}} %\def\l@subparagraph{\@dottedtocline{5}{12.5em}{6em}} \setlength{\parindent}{0pt} \setlength{\parskip}{6pt} \setlength{\hfuzz}{5pt} % don't complain about tiny overfull boxes \setlength{\vfuzz}{1pt} \renewcommand{\sloppy}{\tolerance=9999\relax%} \setlength{\emergencystretch}{0.2\hsize}} \tolerance=1000 \raggedbottom \newlength{\reduceboxwidth} \setlength{\reduceboxwidth}{4in} \newlength{\redboxwidth} \setlength{\redboxwidth}{3.5in} \newlength{\rboxwidth} \setlength{\rboxwidth}{2.6in} \newcommand{\REDUCE}{REDUCE} \newcommand{\RLISP}{RLISP} \newcommand{\underscore}{\_} \newcommand{\ttindex}[1]{{\renewcommand{\_}{\protect\underscore}% \index{#1@{\tt #1}}}} \newcommand{\COMPATNOTE}{{\em Compatibility Note:\ }} % \meta{...} is an alternative sentential form in descriptions using \it. \newcommand{\meta}[1]{\mbox{$\langle$\it#1\/$\rangle$}} % Will print out a heading in bold, and then indent the following text. \def\indented{\list{}{ \itemindent\listparindent \rightmargin\leftmargin}\item[]} \let\endindented=\endlist \newenvironment{describe}[1]{\par{\bf #1}\begin{indented}}{\end{indented}} % Close up default vertical spacings: \setlength{\topsep}{0.5\baselineskip} % above and below environments \setlength{\itemsep}{\topsep} \setlength{\abovedisplayskip}{\topsep} % for "long" equations \setlength{\belowdisplayskip}{\topsep} \newcommand{\key}[1]{\fbox{\sf #1}} \newcommand{\extendedmanual}[1]{#1} \pagestyle{empty} \makeindex \begin{document} \pagestyle{empty} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% BeginCodemist \vspace*{2.0in} \begin{center} {\Huge\bf {\REDUCE}} \\ [0.2cm] {\LARGE\bf User's and \\ Contributed Packages Manual\vspace{0.4cm} \\ Version 3.8} \vspace{0.5in}\large\bf Anthony C.\ Hearn \\ Santa Monica, CA \\ and Codemist Ltd. \vspace{0.1in} \bf Email: reduce@rand.org \vspace{0.5in} \large\bf July 2003 \end{center} \newpage \vspace*{3.0in} \noindent Copyright \copyright 2004 Anthony C. Hearn. All rights reserved. \\ \mbox{}\\ % \noindent Registered system holders may reproduce all or any part of this publication for internal purposes, provided that the source of the material is clearly acknowledged, and the copyright notice is retained. \newpage \pagestyle{headings} \centerline{\bf \large Preface} This volume has been prepared by Codemist Ltd. from the {\LaTeX} documentation sources distributed with {\REDUCE} 3.8. It incorporates the User's Manual, and documentation for all the User Contributed Packages as a second Part. A common index and table of contents has been prepared. We hope that this single volume will be more convenient for {\REDUCE} users than having two unrelated documents. Particularly in Part 2 the text of the authors has been extensively edited and modified and so the responsibility for any errors rests with us. Parts I and III were written by Anthony C. Hearn. Part II is based on texts by:\\ Werner Antweiler, Victor Adamchik, Joachim Apel, Alan Barnes, Andreas Bernig, Yu.~A.~Blinkov, Russell Bradford, Chris Cannam, Hubert Caprasse, C.~{Dicrescenzo}, Alain Dresse, Ladislav Drska, James W.~Eastwood, John Fitch, Kerry Gaskell, Barbara L.~Gates, Karin Gatermann, Hans-Gert Gr\"abe, David Harper, David {H}artley, Anthony C.~Hearn, J.~A.~van Hulzen, V.~Ilyin, Stanley L.~Kameny, Fujio Kako, C.~Kazasov, Wolfram Koepf, A.~Kryukov, Richard Liska, Kevin McIsaac, Malcolm A.~H.~MacCallum, Herbert Melenk, H.~M.~M\"oller, Winfried Neun, Julian Padget, Matt Rebbeck, F.~Richard-Jung, A.~Rodionov, Carsten and Franziska Sch\"obel, {Rainer} Sch\"opf, Stephen Scowcroft, Eberhard Schr\"{u}fer, Fritz Schwarz, M.~Spiridonova, A.~Taranov, Lisa Temme, Walter Tietze, V.~Tomov, E.~Tournier, Philip A.~Tuckey, G.~\"{U}\c{c}oluk, Mathias Warns, Thomas Wolf, Francis J.~Wright and A.~Yu.~Zharkov. \noindent \rightline{February 2004} \\ Codemist Ltd \\ ``Alta'', Horsecombe Vale \\ Combe Down \\ Bath, England \newpage \tableofcontents \part{{\REDUCE} User's Manual} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% EndCodemist %%\begin{titlepage} \vspace*{2.0in} \begin{center} {\Huge\bf {\REDUCE}} \\ [0.2cm] {\LARGE\bf User's Manual\vspace{0.4cm} \\ Version 3.8} \vspace{0.5in}\large\bf Anthony C.\ Hearn \\ Santa Monica, CA, USA \vspace{0.1in} \bf Email: reduce@rand.org \vspace{0.5in} \large\bf July 2003 \end{center} %%\end{titlepage} \newpage \vspace*{3.0in} \noindent Copyright \copyright 2003 Anthony C. Hearn. All rights reserved. \\ \mbox{}\\ % \noindent Registered system holders may reproduce all or any part of this publication for internal purposes, provided that the source of the material is clearly acknowledged, and the copyright notice is retained. \pagestyle{headings} \chapter*{Abstract} \addcontentsline{toc}{chapter}{Abstract} This document provides the user with a description of the algebraic programming system {\REDUCE}. The capabilities of this system include: \begin{enumerate} \item expansion and ordering of polynomials and rational functions, \item substitutions and pattern matching in a wide variety of forms, \item automatic and user controlled simplification of expressions, \item calculations with symbolic matrices, \item arbitrary precision integer and real arithmetic, \item facilities for defining new functions and extending program syntax, \item analytic differentiation and integration, \item factorization of polynomials, \item facilities for the solution of a variety of algebraic equations, \item facilities for the output of expressions in a variety of formats, \item facilities for generating numerical programs from symbolic input, \item Dirac matrix calculations of interest to high energy physicists. \end{enumerate} \chapter*{Acknowledgment} The production of this version of the manual has been the result of the contributions of a large number of individuals who have taken the time and effort to suggest improvements to previous versions, and to draft new sections. Particular thanks are due to Gerry Rayna, who provided a draft rewrite of most of the first half of the manual. Other people who have made significant contributions have included John Fitch, Martin Griss, Stan Kameny, Jed Marti, Herbert Melenk, Don Morrison, Arthur Norman, Eberhard Schr\"ufer, Larry Seward and Walter Tietze. Finally, Richard Hitt produced a {\TeX} version of the {\REDUCE} 3.3 manual, which has been a useful guide for the production of the {\LaTeX} version of this manual. \chapter{Introductory Information} \index{Introduction}{\REDUCE} is a system for carrying out algebraic operations accurately, no matter how complicated the expressions become. It can manipulate polynomials in a variety of forms, both expanding and factoring them, and extract various parts of them as required. {\REDUCE} can also do differentiation and integration, but we shall only show trivial examples of this in this introduction. Other topics not considered include the use of arrays, the definition of procedures and operators, the specific routines for high energy physics calculations, the use of files to eliminate repetitious typing and for saving results, and the editing of the input text. Also not considered in any detail in this introduction are the many options that are available for varying computational procedures, output forms, number systems used, and so on. {\REDUCE} is designed to be an interactive system, so that the user can input an algebraic expression and see its value before moving on to the next calculation. For those systems that do not support interactive use, or for those calculations, especially long ones, for which a standard script can be defined, {\REDUCE} can also be used in batch mode. In this case, a sequence of commands can be given to {\REDUCE} and results obtained without any user interaction during the computation. In this introduction, we shall limit ourselves to the interactive use of {\REDUCE}, since this illustrates most completely the capabilities of the system. When {\REDUCE} is called, it begins by printing a banner message like: {\small\begin{verbatim} REDUCE 3.8, 15-Jul-2003 ... \end{verbatim}} where the version number and the system release date will change from time to time. It then prompts the user for input by: {\small\begin{verbatim} 1: \end{verbatim}} You can now type a {\REDUCE} statement, terminated by a semicolon to indicate the end of the expression, for example: {\small\begin{verbatim} (x+y+z)^2; \end{verbatim}} This expression would normally be followed by another character (a \key{Return} on an ASCII keyboard) to ``wake up'' the system, which would then input the expression, evaluate it, and return the result: {\small\begin{verbatim} 2 2 2 X + 2*X*Y + 2*X*Z + Y + 2*Y*Z + Z \end{verbatim}} Let us review this simple example to learn a little more about the way that {\REDUCE} works. First, we note that {\REDUCE} deals with variables, and constants like other computer languages, but that in evaluating the former, a variable can stand for itself. Expression evaluation normally follows the rules of high school algebra, so the only surprise in the above example might be that the expression was expanded. {\REDUCE} normally expands expressions where possible, collecting like terms and ordering the variables in a specific manner. However, expansion, ordering of variables, format of output and so on is under control of the user, and various declarations are available to manipulate these. Another characteristic of the above example is the use of lower case on input and upper case on output. In fact, input may be in either mode, but output is usually in lower case. To make the difference between input and output more distinct in this manual, all expressions intended for input will be shown in lower case and output in upper case. However, for stylistic reasons, we represent all single identifiers in the text in upper case. Finally, the numerical prompt can be used to reference the result in a later computation. As a further illustration of the system features, the user should try: {\small\begin{verbatim} for i:= 1:40 product i; \end{verbatim}} The result in this case is the value of 40!, {\small\begin{verbatim} 815915283247897734345611269596115894272000000000 \end{verbatim}} You can also get the same result by saying {\small\begin{verbatim} factorial 40; \end{verbatim}} Since we want exact results in algebraic calculations, it is essential that integer arithmetic be performed to arbitrary precision, as in the above example. Furthermore, the {\tt FOR} statement in the above is illustrative of a whole range of combining forms that {\REDUCE} supports for the convenience of the user. Among the many options in {\REDUCE} is the use of other number systems, such as multiple precision floating point with any specified number of digits --- of use if roundoff in, say, the $100^{th}$ digit is all that can be tolerated. In many cases, it is necessary to use the results of one calculation in succeeding calculations. One way to do this is via an assignment for a variable, such as {\small\begin{verbatim} u := (x+y+z)^2; \end{verbatim}} If we now use {\tt U} in later calculations, the value of the right-hand side of the above will be used. The results of a given calculation are also saved in the variable {\tt WS}\ttindex{WS} (for WorkSpace), so this can be used in the next calculation for further processing. For example, the expression {\small\begin{verbatim} df(ws,x); \end{verbatim}} following the previous evaluation will calculate the derivative of {\tt (x+y+z)\verb|^|2} with respect to {\tt X}. Alternatively, {\small\begin{verbatim} int(ws,y); \end{verbatim}} would calculate the integral of the same expression with respect to y. {\REDUCE} is also capable of handling symbolic matrices. For example, {\small\begin{verbatim} matrix m(2,2); \end{verbatim}} declares m to be a two by two matrix, and {\small\begin{verbatim} m := mat((a,b),(c,d)); \end{verbatim}} gives its elements values. Expressions that include {\tt M} and make algebraic sense may now be evaluated, such as {\tt 1/m} to give the inverse, {\tt 2*m - u*m\verb|^|2} to give us another matrix and {\tt det(m)} to give us the determinant of {\tt M}. {\REDUCE} has a wide range of substitution capabilities. The system knows about elementary functions, but does not automatically invoke many of their well-known properties. For example, products of trigonometrical functions are not converted automatically into multiple angle expressions, but if the user wants this, he can say, for example: {\small\begin{verbatim} (sin(a+b)+cos(a+b))*(sin(a-b)-cos(a-b)) where cos(~x)*cos(~y) = (cos(x+y)+cos(x-y))/2, cos(~x)*sin(~y) = (sin(x+y)-sin(x-y))/2, sin(~x)*sin(~y) = (cos(x-y)-cos(x+y))/2; \end{verbatim}} where the tilde in front of the variables {\tt X} and {\tt Y} indicates that the rules apply for all values of those variables. The result of this calculation is {\small\begin{verbatim} -(COS(2*A) + SIN(2*B)) \end{verbatim}} \extendedmanual{See also the user-contributed packages ASSIST (chapter~\ref{ASSIST}), CAMAL (chapter~\ref{CAMAL}) and TRIGSIMP (chapter~\ref{TRIGSIMP}).} Another very commonly used capability of the system, and an illustration of one of the many output modes of {\REDUCE}, is the ability to output results in a FORTRAN compatible form. Such results can then be used in a FORTRAN based numerical calculation. This is particularly useful as a way of generating algebraic formulas to be used as the basis of extensive numerical calculations. For example, the statements {\small\begin{verbatim} on fort; df(log(x)*(sin(x)+cos(x))/sqrt(x),x,2); \end{verbatim}} will result in the output {\small\begin{verbatim} ANS=(-4.*LOG(X)*COS(X)*X**2-4.*LOG(X)*COS(X)*X+3.* . LOG(X)*COS(X)-4.*LOG(X)*SIN(X)*X**2+4.*LOG(X)* . SIN(X)*X+3.*LOG(X)*SIN(X)+8.*COS(X)*X-8.*COS(X)-8. . *SIN(X)*X-8.*SIN(X))/(4.*SQRT(X)*X**2) \end{verbatim}} These algebraic manipulations illustrate the algebraic mode of {\REDUCE}. {\REDUCE} is based on Standard Lisp. A symbolic mode is also available for executing Lisp statements. These statements follow the syntax of Lisp, e.g. {\small\begin{verbatim} symbolic car '(a); \end{verbatim}} Communication between the two modes is possible. With this simple introduction, you are now in a position to study the material in the full {\REDUCE} manual in order to learn just how extensive the range of facilities really is. If further tutorial material is desired, the seven {\REDUCE} Interactive Lessons by David R. Stoutemyer are recommended. These are normally distributed with the system. \chapter{Structure of Programs} A {\REDUCE} program\index{Program structure} consists of a set of functional commands which are evaluated sequentially by the computer. These commands are built up from declarations, statements and expressions. Such entities are composed of sequences of numbers, variables, operators, strings, reserved words and delimiters (such as commas and parentheses), which in turn are sequences of basic characters. \section{The {\REDUCE} Standard Character Set} \index{Character set}The basic characters which are used to build {\REDUCE} symbols are the following: \begin{enumerate} \item The 26 letters {\tt a} through {\tt z} \item The 10 decimal digits {\tt 0} through {\tt 9} \item The special characters \_\_ ! " \$ \% ' ( ) * + , - . / : ; $<$ $>$ = \{ \} $<$blank$>$ \end{enumerate} With the exception of strings and characters preceded by an exclamation mark\index{Exclamation mark}, the case of characters is ignored: depending of the underlying LISP they will all be converted internally into lower case or upper case: {\tt ALPHA}, {\tt Alpha} and {\tt alpha} represent the same symbol. Most implementations allow you to switch this conversion off. The operating instructions for a particular implementation should be consulted on this point. For portability, we shall limit ourselves to the standard character set in this exposition. \section{Numbers} \index{Number}There are several different types of numbers available in \REDUCE. Integers consist of a signed or unsigned sequence of decimal digits written without a decimal point, for example: {\small\begin{verbatim} -2, 5396, +32 \end{verbatim}} In principle, there is no practical limit on the number of digits permitted as exact arithmetic is used in most implementations. (You should however check the specific instructions for your particular system implementation to make sure that this is true.) For example, if you ask for the value of $2^{2000}$ you get it displayed as a number of 603 decimal digits, taking up nine lines of output on an interactive display. It should be borne in mind of course that computations with such long numbers can be quite slow. Numbers that aren't integers are usually represented as the quotient of two integers, in lowest terms: that is, as rational numbers. In essentially all versions of {\REDUCE} it is also possible (but not always desirable!) to ask {\REDUCE} to work with floating point approximations to numbers again, to any precision. Such numbers are called {\em real}. \index{Real} They can be input in two ways: \begin{enumerate} \item as a signed or unsigned sequence of any number of decimal digits with an embedded or trailing decimal point. \item as in 1. followed by a decimal exponent which is written as the letter {\tt E} followed by a signed or unsigned integer. \end{enumerate} e.g. {\tt 32. +32.0 0.32E2} and {\tt 320.E-1} are all representations of 32. The declaration {\tt SCIENTIFIC\_NOTATION}\ttindex{SCIENTIFIC\_NOTATION} controls the output format of floating point numbers. At the default settings, any number with five or less digits before the decimal point is printed in a fixed-point notation, e.g., {\tt 12345.6}. Numbers with more than five digits are printed in scientific notation, e.g., {\tt 1.234567E+5}. Similarly, by default, any number with eleven or more zeros after the decimal point is printed in scientific notation. To change these defaults, {\tt SCIENTIFIC\_NOTATION} can be used in one of two ways. {\tt SCIENTIFIC\_NOTATION} {\em m};, where {\em m\/} is a positive integer, sets the printing format so that a number with more than {\em m\/} digits before the decimal point, or {\em m\/} or more zeros after the decimal point, is printed in scientific notation. {\tt SCIENTIFIC\_NOTATION} \{{\em m,n}\}, with {\em m\/} and {\em n\/} both positive integers, sets the format so that a number with more than {\em m\/} digits before the decimal point, or {\em n\/} or more zeros after the decimal point is printed in scientific notation. {\it CAUTION:} The unsigned part of any number\index{Number} may {\em not\/} begin with a decimal point, as this causes confusion with the {\tt CONS} (.) operator, i.e., NOT ALLOWED: {\tt .5 -.23 +.12}; use {\tt 0.5 -0.23 +0.12} instead. \section{Identifiers} Identifiers\index{Identifier} in {\REDUCE} consist of one or more alphanumeric characters (i.e. alphabetic letters or decimal digits) the first of which must be alphabetic. The maximum number of characters allowed is implementation dependent, although twenty-four is permitted in most implementations. In addition, the underscore character (\_) is considered a letter if it is {\it within} an identifier. For example, {\small\begin{verbatim} a az p1 q23p a_very_long_variable \end{verbatim}} are all identifiers, whereas {\small\begin{verbatim} _a \end{verbatim}} is not. A sequence of alphanumeric characters in which the first is a digit is interpreted as a product. For example, {\tt 2ab3c} is interpreted as {\tt 2*ab3c}. There is one exception to this: If the first letter after a digit is {\tt E}, the system will try to interpret that part of the sequence as a real number\index{Real}, which may fail in some cases. For example, {\tt 2E12} is the real number $2.0*10^{12}$, {\tt 2e3c} is 2000.0*C, and {\tt 2ebc} gives an error. Special characters, such as $-$, *, and blank, may be used in identifiers too, even as the first character, but each must be preceded by an exclamation mark in input. For example: {\small\begin{verbatim} light!-years d!*!*n good! morning !$sign !5goldrings \end{verbatim}} {\it CAUTION:} Many system identifiers have such special characters in their names (especially * and =). If the user accidentally picks the name of one of them for his own purposes it may have catastrophic consequences for his {\REDUCE} run. Users are therefore advised to avoid such names. Identifiers are used as variables, labels and to name arrays, operators and procedures. \subsection*{Restrictions} The reserved words listed in another section may not be used as identifiers. No spaces may appear within an identifier, and an identifier may not extend over a line of text. (Hyphenation of an identifier, by using a reserved character as a hyphen before an end-of-line character is possible in some versions of {\REDUCE}). \section{Variables} Every variable\index{Variable} is named by an identifier, and is given a specific type. The type is of no concern to the ordinary user. Most variables are allowed to have the default type, called {\em scalar}. These can receive, as values, the representation of any ordinary algebraic expression. In the absence of such a value, they stand for themselves. \subsection*{Reserved Variables} Several variables\index{Reserved variable} in {\REDUCE} have particular properties which should not be changed by the user. These variables include: \begin{list}{}{\renewcommand{\makelabel}[1]{{\tt#1}\hspace{\fill}}% \settowidth{\labelwidth}{\tt INFINITY}% \setlength{\labelsep}{1em}% \settowidth{\leftmargin}{\tt INFINITY\hspace*{\labelsep}}} \item[E] Intended to represent the base of \ttindex{E} the natural logarithms. {\tt log(e)}, if it occurs in an expression, is automatically replaced by 1. If {\tt ROUNDED}\ttindex{ROUNDED} is on, {\tt E} is replaced by the value of E to the current degree of floating point precision\index{Numerical precision}. \item[I] Intended to represent the square \ttindex{I} root of $-1$. {\tt i\verb|^|2} is replaced by $-1$, and appropriately for higher powers of {\tt I}. This applies only to the symbol {\tt I} used on the top level, not as a formal parameter in a procedure, a local variable, nor in the context {\tt for i:= ...} \item[INFINITY] Intended to represent $\infty$ \ttindex{INFINITY} in limit and power series calculations for example. Note however that the current system does {\em not\/} do proper arithmetic on $\infty$. For example, {\tt infinity + infinity} is {\tt 2*infinity}. \item[NIL] In {\REDUCE} (algebraic mode only) taken as a synonym for zero. Therefore {\tt NIL} cannot be used as a variable. \item[PI] Intended to represent the circular \ttindex{PI} constant. With {\tt ROUNDED} on, it is replaced by the value of $\pi$ to the current degree of floating point precision. \item[T] Should not be used as a formal \ttindex{T} parameter or local variable in procedures, since conflict arises with the symbolic mode meaning of T as {\em true}. \end{list} Other reserved variables, such as {\tt LOW\_POW}, described in other sections, are listed in Appendix A. Using these reserved variables\index{Reserved variable} inappropriately will lead to errors. There are also internal variables used by {\REDUCE} that have similar restrictions. These usually have an asterisk in their names, so it is unlikely a casual user would use one. An example of such a variable is {\tt K!*} used in the asymptotic command package. Certain words are reserved in {\REDUCE}. They may only be used in the manner intended. A list of these is given in the section ``Reserved Identifiers''. There are, of course, an impossibly large number of such names to keep in mind. The reader may therefore want to make himself a copy of the list, deleting the names he doesn't think he is likely to use by mistake. \section{Strings} Strings\index{String} are used in {\tt WRITE} statements, in other output statements (such as error messages), and to name files. A string consists of any number of characters enclosed in double quotes. For example: {\small\begin{verbatim} "A String". \end{verbatim}} Lower case characters within a string are not converted to upper case. The string {\tt ""} represents the empty string. A double quote may be included in a string by preceding it by another double quote. Thus {\tt "a""b"} is the string {\tt a"b}, and {\tt """"} is the string {\tt "}. \section{Comments} Text can be included in program\index{Program} listings for the convenience of human readers, in such a way that {\REDUCE} pays no attention to it. There are two ways to do this: \begin{enumerate} \item Everything from the word {\tt COMMENT}\ttindex{COMMENT} to the next statement terminator, normally ; or \$, is ignored. Such comments can be placed anywhere a blank could properly appear. (Note that {\tt END} and $>>$ are {\em not\/} treated as {\tt COMMENT} delimiters!) \item Everything from the symbol {\tt \%}\index{Percent sign} to the end of the line on which it appears is ignored. Such comments can be placed as the last part of any line. Statement terminators have no special meaning in such comments. Remember to put a semicolon before the {\tt \%} if the earlier part of the line is intended to be so terminated. Remember also to begin each line of a multi-line {\tt \%} comment with a {\tt \%} sign. \end{enumerate} \section{Operators} \label{sec-operators} Operators\index{Operator} in {\REDUCE} are specified by name and type. There are two types, infix\index{Infix operator} and prefix. \index{Prefix operator} Operators can be purely abstract, just symbols with no properties; they can have values assigned (using {\tt :=} or simple {\tt LET} declarations) for specific arguments; they can have properties declared for some collection of arguments (using more general {\tt LET} declarations); or they can be fully defined (usually by a procedure declaration). Infix operators\index{Infix operator} have a definite precedence with respect to one another, and normally occur between their arguments. For example: \begin{quote} \begin{tabbing} {\tt a + b - c} \hspace{1.5in} \= (spaces optional) \\ {\tt x<y and y=z} \> (spaces required where shown) \end{tabbing} \end{quote} Spaces can be freely inserted between operators and variables or operators and operators. They are required only where operator names are spelled out with letters (such as the {\tt AND} in the example) and must be unambiguously separated from another such or from a variable (like {\tt Y}). Wherever one space can be used, so can any larger number. Prefix operators occur to the left of their arguments, which are written as a list enclosed in parentheses and separated by commas, as with normal mathematical functions, e.g., {\small\begin{verbatim} cos(u) df(x^2,x) q(v+w) \end{verbatim}} Unmatched parentheses, incorrect groupings of infix operators \index{Infix operator} and the like, naturally lead to syntax errors. The parentheses can be omitted (replaced by a space following the operator\index{Operator} name) if the operator is unary and the argument is a single symbol or begins with a prefix operator name: \begin{quote} \begin{tabbing} {\tt cos y} \hspace{1.75in} \= means cos(y) \\ {\tt cos (-y)} \> -- parentheses necessary \\ {\tt log cos y} \> means log(cos(y)) \\ {\tt log cos (a+b)} \> means log(cos(a+b)) \end{tabbing} \end{quote} but \begin{quote} \begin{tabbing} {\tt cos a*b} \hspace{1.6in} \= means (cos a)*b \\ {\tt cos -y} \> is erroneous (treated as a variable \\ \> ``cos'' minus the variable y) \end{tabbing} \end{quote} A unary prefix operator\index{Prefix operator} has a precedence \index{Operator precedence} higher than any infix operator, including unary infix operators. \index{Infix operator} In other words, {\REDUCE} will always interpret {\tt cos~y + 3} as {\tt (cos~y) + 3} rather than as {\tt cos(y + 3)}. Infix operators may also be used in a prefix format on input, e.g., {\tt +(a,b,c)}. On output, however, such expressions will always be printed in infix form (i.e., {\tt a + b + c} for this example). A number of prefix operators are built into the system with predefined properties. Users may also add new operators and define their rules for simplification. The built in operators are described in another section. \subsection*{Built-In Infix Operators} The following infix operators\index{Infix operator} are built into the system. They are all defined internally as procedures. {\small\begin{verbatim} <infix operator>::= where|:=|or|and|member|memq|=|neq|eq| >=|>|<=|<|+|-|*|/|^|**|. \end{verbatim}} These operators may be further divided into the following subclasses: {\small\begin{verbatim} <assignment operator> ::= := <logical operator> ::= or|and|member|memq <relational operator> ::= =|neq|eq|>=|>|<=|< <substitution operator> ::= where <arithmetic operator> ::= +|-|*|/|^|** <construction operator> ::= . \end{verbatim}} {\tt MEMQ} and {\tt EQ} are not used in the algebraic mode of {\REDUCE}. They are explained in the section on symbolic mode. {\tt WHERE} is described in the section on substitutions. In previous versions of {\REDUCE}, {\em not} was also defined as an infix operator. In the present version it is a regular prefix operator, and interchangeable with {\em null}. For compatibility with the intermediate language used by {\REDUCE}, each special character infix operator\index{Infix operator} has an alternative alphanumeric identifier associated with it. These identifiers may be used interchangeably with the corresponding special character names on input. This correspondence is as follows: \begin{quote} \begin{tabbing} {\tt := setq} \hspace{0.5in} \= (the assignment operator) \\ {\tt = equal} \\ {\tt >= geq} \\ {\tt > greaterp} \\ {\tt <= leq} \\ {\tt < lessp} \\ {\tt + plus} \\ {\tt - difference} \> (if unary, {\tt minus}) \\ {\tt * times} \\ {\tt / quotient} \> (if unary, {\tt recip}) \\ {\tt \verb|^| or ** expt} \> (raising to a power) \\ {\tt . cons} \end{tabbing} \end{quote} Note: {\tt NEQ} is used to mean {\em not equal}. There is no special symbol provided for it. The above operators\index{Operator} are binary, except {\tt NOT} which is unary and {\tt +} and {\tt *} which are nary (i.e., taking an arbitrary number of arguments). In addition, {\tt -} and {\tt /} may be used as unary operators, e.g., /2 means the same as 1/2. Any other operator is parsed as a binary operator using a left association rule. Thus {\tt a/b/c} is interpreted as {\tt (a/b)/c}. There are two exceptions to this rule: {\tt :=} and {\tt .} are right associative. Example: {\tt a:=b:=c} is interpreted as {\tt a:=(b:=c)}. Unlike ALGOL and PASCAL, {\tt \verb|^|} is left associative. In other words, {\tt a\verb|^|b\verb|^|c} is interpreted as {\tt (a\verb|^|b)\verb|^|c}. The operators\index{Operator} {\tt $<$}, {\tt $<$=}, {\tt $>$}, {\tt $>$=} can only be used for making comparisons between numbers. No meaning is currently assigned to this kind of comparison between general expressions. Parentheses may be used to specify the order of combination. If parentheses are omitted then this order is by the ordering of the precedence list\index{Operator precedence} defined by the right-hand side of the {\tt <infix operator>}\index{Infix operator} table at the beginning of this section, from lowest to highest. In other words, {\tt WHERE} has the lowest precedence, and {\tt .} (the dot operator) the highest. \chapter{Expressions} {\REDUCE} expressions\index{Expression} may be of several types and consist of sequences of numbers, variables, operators, left and right parentheses and commas. The most common types are as follows: \section{Scalar Expressions} \index{Scalar}Using the arithmetic operations {\tt + - * / \verb|^|} (power) and parentheses, scalar expressions are composed from numbers, ordinary ``scalar'' variables (identifiers), array names with subscripts, operator or procedure names with arguments and statement expressions. {\it Examples:} {\small\begin{verbatim} x x^3 - 2*y/(2*z^2 - df(x,z)) (p^2 + m^2)^(1/2)*log (y/m) a(5) + b(i,q) \end{verbatim}} The symbol ** may be used as an alternative to the caret symbol (\verb+^+) for forming powers, particularly in those systems that do not support a caret symbol. Statement expressions, usually in parentheses, can also form part of a scalar\index{Scalar} expression, as in the example {\small\begin{verbatim} w + (c:=x+y) + z . \end{verbatim}} When the algebraic value of an expression is needed, {\REDUCE} determines it, starting with the algebraic values of the parts, roughly as follows: Variables and operator symbols with an argument list have the algebraic values they were last assigned, or if never assigned stand for themselves. However, array elements have the algebraic values they were last assigned, or, if never assigned, are taken to be 0. Procedures are evaluated with the values of their actual parameters. In evaluating expressions, the standard rules of algebra are applied. Unfortunately, this algebraic evaluation of an expression is not as unambiguous as is numerical evaluation. This process is generally referred to as ``simplification''\index{Simplification} in the sense that the evaluation usually but not always produces a simplified form for the expression. There are many options available to the user for carrying out such simplification\index{Simplification}. If the user doesn't specify any method, the default method is used. The default evaluation of an expression involves expansion of the expression and collection of like terms, ordering of the terms, evaluation of derivatives and other functions and substitution for any expressions which have values assigned or declared (see assignments and {\tt LET} statements). In many cases, this is all that the user needs. The declarations by which the user can exercise some control over the way in which the evaluation is performed are explained in other sections. For example, if a real (floating point) number is encountered during evaluation, the system will normally convert it into a ratio of two integers. If the user wants to use real arithmetic, he can effect this by the command {\tt on rounded;}.\ttindex{ROUNDED} Other modes for coefficient arithmetic are described elsewhere. If an illegal action occurs during evaluation (such as division by zero) or functions are called with the wrong number of arguments, and so on, an appropriate error message is generated. % A list of such error messages is given in an appendix. \section{Integer Expressions} \index{Integer}These are expressions which, because of the values of the constants and variables in them, evaluate to whole numbers. {\it Examples:} {\small\begin{verbatim} 2, 37 * 999, (x + 3)^2 - x^2 - 6*x \end{verbatim}} are obviously integer expressions. {\small\begin{verbatim} j + k - 2 * j^2 \end{verbatim}} is an integer expression when {\tt J} and {\tt K} have values that are integers, or if not integers are such that ``the variables and fractions cancel out'', as in {\small\begin{verbatim} k - 7/3 - j + 2/3 + 2*j^2. \end{verbatim}} \section{Boolean Expressions} \label{sec-boolean} A boolean expression\index{Boolean} returns a truth value. In the algebraic mode of {\REDUCE}, boolean expressions have the syntactical form: {\small\begin{verbatim} <expression> <relational operator> <expression> \end{verbatim}} or {\small\begin{verbatim} <boolean operator> (<arguments>) \end{verbatim}} or {\small\begin{verbatim} <boolean expression> <logical operator> <boolean expression>. \end{verbatim}} Parentheses can also be used to control the precedence of expressions. In addition to the logical and relational operators defined earlier as infix operators, the following boolean operators are also defined:\\ \mbox{}\\ \ttindex{EVENP}\ttindex{FIXP}\ttindex{FREEOF}\ttindex{NUMBERP} \ttindex{ORDP}\ttindex{PRIMEP} {\renewcommand{\arraystretch}{2} \begin{tabular}{lp{\redboxwidth}} {\tt EVENP(U)} & determines if the number {\tt U} is even or not; \\ {\tt FIXP(U)} & determines if the expression {\tt U} is integer or not; \\ {\tt FREEOF(U,V)} & determines if the expression {\tt U} does not contain the kernel {\tt V} anywhere in its structure; \\ {\tt NUMBERP(U)} & determines if {\tt U} is a number or not; \\ {\tt ORDP(U,V)} & determines if {\tt U} is ordered ahead of {\tt V} by some canonical ordering (based on the expression structure and an internal ordering of identifiers); \\ {\tt PRIMEP(U)} & true if {\tt U} is a prime object, i.e., any object other than 0 and plus or minus 1 which is only exactly divisible by itself or a unit. \\ \end{tabular}} {\it Examples:} {\small\begin{verbatim} j<1 x>0 or x=-2 numberp x fixp x and evenp x numberp x and x neq 0 \end{verbatim}} Boolean expressions can only appear directly within {\tt IF}, {\tt FOR}, {\tt WHILE}, and {\tt UNTIL} statements, as described in other sections. Such expressions cannot be used in place of ordinary algebraic expressions, or assigned to a variable. NB: For those familiar with symbolic mode, the meaning of some of these operators is different in that mode. For example, {\tt NUMBERP} is true only for integers and reals in symbolic mode. When two or more boolean expressions are combined with {\tt AND}, they are evaluated one by one until a {\em false\/} expression is found. The rest are not evaluated. Thus {\small\begin{verbatim} numberp x and numberp y and x>y \end{verbatim}} does not attempt to make the {\tt x>y} comparison unless {\tt X} and {\tt Y} are both verified to be numbers. Similarly, evaluation of a sequence of boolean expressions connected by {\tt OR} stops as soon as a {\em true\/} expression is found. NB: In a boolean expression, and in a place where a boolean expression is expected, the algebraic value 0 is interpreted as {\em false}, while all other algebraic values are converted to {\em true}. So in algebraic mode a procedure can be written for direct usage in boolean expressions, returning say 1 or 0 as its value as in {\small\begin{verbatim} procedure polynomialp(u,x); if den(u)=1 and deg(u,x)>=1 then 1 else 0; \end{verbatim}} One can then use this in a boolean construct, such as {\small\begin{verbatim} if polynomialp(q,z) and not polynomialp(q,y) then ... \end{verbatim}} In addition, any procedure that does not have a defined return value (for example, a block without a {\tt RETURN} statement in it) has the boolean value {\em false}. \section{Equations} Equations\index{Equation} are a particular type of expression with the syntax {\small\begin{verbatim} <expression> = <expression>. \end{verbatim}} In addition to their role as boolean expressions, they can also be used as arguments to several operators (e.g., {\tt SOLVE}), and can be returned as values. Under normal circumstances, the right-hand-side of the equation is evaluated but not the left-hand-side. This also applies to any substitutions made by the {\tt SUB}\ttindex{SUB} operator. If both sides are to be evaluated, the switch {\tt EVALLHSEQP}\ttindex{EVALLHSEQP} should be turned on. To facilitate the handling of equations, two selectors, {\tt LHS} \ttindex{LHS} and {\tt RHS},\ttindex{RHS} which return the left- and right-hand sides of a equation\index{Equation} respectively, are provided. For example, {\small\begin{verbatim} lhs(a+b=c) -> a+b and rhs(a+b=c) -> c. \end{verbatim}} \section{Proper Statements as Expressions} Several kinds of proper statements\index{Proper statement} deliver an algebraic or numerical result of some kind, which can in turn be used as an expression or part of an expression. For example, an assignment statement itself has a value, namely the value assigned. So {\small\begin{verbatim} 2 * (x := a+b) \end{verbatim}} is equal to {\tt 2*(a+b)}, as well as having the ``side-effect''\index{Side effect} of assigning the value {\tt a+b} to {\tt X}. In context, {\small\begin{verbatim} y := 2 * (x := a+b); \end{verbatim}} sets {\tt X} to {\tt a+b} and {\tt Y} to {\tt 2*(a+b)}. The sections on the various proper statement\index{Proper statement} types indicate which of these statements are also useful as expressions. \chapter{Lists} A list\index{List} is an object consisting of a sequence of other objects (including lists themselves), separated by commas and surrounded by braces. Examples of lists are: {\small\begin{verbatim} {a,b,c} {1,a-b,c=d} {{a},{{b,c},d},e}. \end{verbatim}} The empty list is represented as {\small\begin{verbatim} {}. \end{verbatim}} \section{Operations on Lists}\index{List operation} Several operators in the system return their results as lists, and a user can create new lists using braces and commas. Alternatively, one can use the operator LIST to construct a list. An important class of operations on lists are MAP and SELECT operations. For details, please refer to the chapters on MAP, SELECT and the FOR command. See also the documentation on the ASSIST package. To facilitate the use of lists, a number of operators are also available for manipulating them. {\tt PART(<list>,n)}\ttindex{PART} for example will return the $n^{th}$ element of a list. {\tt LENGTH}\ttindex{LENGTH} will return the length of a list. Several operators are also defined uniquely for lists. For those familiar with them, these operators in fact mirror the operations defined for Lisp lists. These operators are as follows: \subsection{LIST} The operator LIST is an alternative to the usage of curly brackets. LIST accepts an arbitrary number of arguments and returns a list of its arguments. This operator is useful in cases where operators have to be passed as arguments. E.g., {\small\begin{verbatim} list(a,list(list(b,c),d),e); -> {{a},{{b,c},d},e} \end{verbatim}} \subsection{FIRST} This operator\ttindex{FIRST} returns the first member of a list. An error occurs if the argument is not a list, or the list is empty. \subsection{SECOND} {\tt SECOND}\ttindex{SECOND} returns the second member of a list. An error occurs if the argument is not a list or has no second element. \subsection{THIRD} This operator\ttindex{THIRD} returns the third member of a list. An error occurs if the argument is not a list or has no third element. \subsection{REST} {\tt REST}\ttindex{REST} returns its argument with the first element removed. An error occurs if the argument is not a list, or is empty. \subsection{$.$ (Cons) Operator} This operator\ttindex{. (CONS)} adds (``conses'') an expression to the front of a list. For example: {\small\begin{verbatim} a . {b,c} -> {a,b,c}. \end{verbatim}} \subsection{APPEND} This operator\ttindex{APPEND} appends its first argument to its second to form a new list. {\it Examples:} {\small\begin{verbatim} append({a,b},{c,d}) -> {a,b,c,d} append({{a,b}},{c,d}) -> {{a,b},c,d}. \end{verbatim}} \subsection{REVERSE} The operator {\tt REVERSE}\ttindex{REVERSE} returns its argument with the elements in the reverse order. It only applies to the top level list, not any lower level lists that may occur. Examples are:\index{List operation} {\small\begin{verbatim} reverse({a,b,c}) -> {c,b,a} reverse({{a,b,c},d}) -> {d,{a,b,c}}. \end{verbatim}} \subsection{List Arguments of Other Operators} If an operator other than those specifically defined for lists is given a single argument that is a list, then the result of this operation will be a list in which that operator is applied to each element of the list. For example, the result of evaluating {\tt log\{a,b,c\}} is the expression {\tt \{LOG(A),LOG(B),LOG(C)\}}. There are two ways to inhibit this operator distribution. Firstly, the switch {\tt LISTARGS},\ttindex{LISTARGS} if on, will globally inhibit such distribution. Secondly, one can inhibit this distribution for a specific operator by the declaration {\tt LISTARGP}.\ttindex{LISTARGP} For example, with the declaration {\tt listargp log}, {\tt log\{a,b,c\}} would evaluate to {\tt LOG(\{A,B,C\})}. If an operator has more than one argument, no such distribution occurs. \subsection{Caveats and Examples} Some of the natural list operations such as {\it member} or {\it delete} are available only after loading the package {\it ASSIST}. Please note that a non-list as second argument to CONS (a "dotted pair" in LISP terms) is not allowed and causes an "invalid as list" error. {\small\begin{verbatim} a := 17 . 4; ***** 17 4 invalid as list \end{verbatim}} Also, the initialization of a scalar variable is not the empty list -- one has to set list type variables explicitly, as in the following example: {\small\begin{verbatim} load_package assist; procedure lotto (n,m); begin scalar list_1_n, luckies, hit; list_1_n := {}; luckies := {}; for k:=1:n do list_1_n := k . list_1_n; for k:=1:m do << hit := part(list_1_n,random(n-k+1) + 1); list_1_n := delete(hit,list_1_n); luckies := hit . luckies >>; return luckies; end; % In Germany, try lotto (49,6); \end{verbatim}} {\it Another example:} Find all coefficients of a multivariate polynomial with respect to a list of variables: {\small\begin{verbatim} procedure allcoeffs(q,lis); % q : polynomial, lis: list of vars allcoeffs1 (list q,lis); procedure allcoeffs1(q,lis); if lis={} then q else allcoeffs1(foreach qq in q join coeff(qq,first lis),rest lis); \end{verbatim}} \chapter{Statements} A statement\index{Statement} is any combination of reserved words and expressions, and has the syntax \index{Proper statement} {\small\begin{verbatim} <statement> ::= <expression>|<proper statement> \end{verbatim}} A {\REDUCE} program consists of a series of commands which are statements followed by a terminator:\index{Terminator}\index{Semicolon} \index{Dollar sign} {\small\begin{verbatim} <terminator> ::= ;|$ \end{verbatim}} The division of the program into lines is arbitrary. Several statements can be on one line, or one statement can be freely broken onto several lines. If the program is run interactively, statements ending with ; or \$ are not processed until an end-of-line character is encountered. This character can vary from system to system, but is normally the \key{Return} key on an ASCII terminal. Specific systems may also use additional keys as statement terminators. If a statement is a proper statement\index{Proper statement}, the appropriate action takes place. Depending on the nature of the proper statement some result or response may or may not be printed out, and the response may or may not depend on the terminator used. If a statement is an expression, it is evaluated. If the terminator is a semicolon, the result is printed. If the terminator is a dollar sign, the result is not printed. Because it is not usually possible to know in advance how large an expression will be, no explicit format statements are offered to the user. However, a variety of output declarations are available so that the output can be produced in different forms. These output declarations are explained in Section~\ref{sec-output}. The following sub-sections describe the types of proper statements \index{Proper statement} in {\REDUCE}. \section{Assignment Statements} These statements\index{Assignment} have the syntax {\small\begin{verbatim} <assignment statement> ::= <expression> := <expression> \end{verbatim}} The {\tt <expression>} on the left side is normally the name of a variable, an operator symbol with its list of arguments filled in, or an array name with the proper number of integer subscript values within the array bounds. For example: \begin{quote} \begin{tabbing} {\tt a1 := b + c} \\ {\tt h(l,m) := x-2*y} \hspace{1in} \= (where {\tt h} is an operator) \\ {\tt k(3,5) := x-2*y} \> (where {\tt k} is a 2-dim. array) \end{tabbing} \end{quote} More general assignments\index{Assignment} such as {\tt a+b := c} are also allowed. The effect of these is explained in Section~\ref{sec-gensubs}. An assignment statement causes the expression on the right-hand-side to be evaluated. If the left-hand-side is a variable, the value of the right-hand-side is assigned to that unevaluated variable. If the left-hand-side is an operator or array expression, the arguments of that operator or array are evaluated, but no other simplification done. The evaluated right-hand-side is then assigned to the resulting expression. For example, if {\tt A} is a single-dimensional array, {\tt a(1+1) := b} assigns the value {\tt B} to the array element {\tt a(2)}. If a semicolon is used as the terminator when an assignment \index{Assignment} is issued as a command (i.e. not as a part of a group statement or procedure or other similar construct), the left-hand side symbol of the assignment statement is printed out, followed by a ``{\tt :=}'', followed by the value of the expression on the right. It is also possible to write a multiple assignment statement: \index{Multiple assignment statement} {\small\begin{verbatim} <expression> := ... := <expression> := <expression> \end{verbatim}} In this form, each {\tt <expression>} but the last is set to the value of the last {\tt <expression>}. If a semicolon is used as a terminator, each expression except the last is printed followed by a ``{\tt :=}'' ending with the value of the last expression. \subsection{Set Statement} In some cases, it is desirable to perform an assignment in which {\em both\/} the left- and right-hand sides of an assignment\index{Assignment} are evaluated. In this case, the {\tt SET}\ttindex{SET} statement can be used with the syntax: {\small\begin{verbatim} SET(<expression>,<expression>); \end{verbatim}} For example, the statements {\small\begin{verbatim} j := 23; set(mkid(a,j),x); \end{verbatim}} assigns the value {\tt X} to {\tt A23}. \section{Group Statements} The group statement\index{Group statement} is a construct used where {\REDUCE} expects a single statement, but a series of actions needs to be performed. It is formed by enclosing one or more statements (of any kind) between the symbols {\tt $<<$} and {\tt $>>$}, separated by semicolons or dollar signs -- it doesn't matter which. The statements are executed one after another. Examples will be given in the sections on {\tt IF}\ttindex{IF} and other types of statements in which the {\tt $<<$} \ldots {\tt $>>$} construct is useful. If the last statement in the enclosed group has a value, then that is also the value of the group statement. Care must be taken not to have a semicolon or dollar sign after the last grouped statement, if the value of the group is relevant: such an extra terminator causes the group to have the value NIL or zero. \section{Conditional Statements} The conditional statement\index{Conditional statement} has the following syntax: {\small\begin{verbatim} <conditional statement> ::= IF <boolean expression> THEN <statement> [ELSE <statement>] \end{verbatim}} The boolean expression is evaluated. If this is {\em true}, the first {\tt <statement>} is executed. If it is {\em false}, the second is. {\it Examples:} {\small\begin{verbatim} if x=5 then a:=b+c else d:=e+f if x=5 and numberp y then <<ff:=q1; a:=b+c>> else <<ff:=q2; d:=e+f>> \end{verbatim}} Note the use of the group statement\index{Group statement}. \\ Conditional statements associate to the right; i.e.,\ttindex{IF} {\small\begin{verbatim} IF <a> THEN <b> ELSE IF <c> THEN <d> ELSE <e> \end{verbatim}} is equivalent to: {\small\begin{verbatim} IF <a> THEN <b> ELSE (IF <c> THEN <d> ELSE <e>) \end{verbatim}} In addition, the construction {\small\begin{verbatim} IF <a> THEN IF <b> THEN <c> ELSE <d> \end{verbatim}} parses as {\small\begin{verbatim} IF <a> THEN (IF <b> THEN <c> ELSE <d>). \end{verbatim}} If the value of the conditional statement\index{Conditional statement} is of primary interest, it is often called a conditional expression instead. Its value is the value of whichever statement was executed. (If the executed statement has no value, the conditional expression has no value or the value 0, depending on how it is used.) {\it Examples:} {\small\begin{verbatim} a:=if x<5 then 123 else 456; b:=u + v^(if numberp z then 10*z else 1) + w; \end{verbatim}} If the value is of no concern, the {\tt ELSE} clause may be omitted if no action is required in the {\em false\/} case. {\small\begin{verbatim} if x=5 then a:=b+c; \end{verbatim}} Note: As explained in Section~\ref{sec-boolean},a if a scalar or numerical expression is used in place of the boolean expression -- for example, a variable is written there -- the {\em true\/} alternative is followed unless the expression has the value 0. \section{FOR Statements} The {\tt FOR} statement is used to define a variety of program loops\index{Loop}. Its general syntax is as follows:\ttindex{UNTIL} \ttindex{DO}\ttindex{PRODUCT}\ttindex{SUM}\ttindex{COLLECT}\ttindex{JOIN} \begin{small} \[ \mbox{\tt FOR} \left\{ \begin{array}{@{}ccc@{}} \mbox{\tt \meta{var} := \meta{number} } \left\{ \begin{array}{@{}c@{}} \mbox{\tt STEP \meta{number} UNTIL} \\ \mbox{\tt :} \end{array} \right\} \mbox{\tt \meta{number}} \\[3mm] \multicolumn{1}{c}{\mbox{\tt EACH \meta{var} \(\left\{ \begin{tabular}{@{}c@{}} IN \\ ON \end{tabular} \right\}\) \meta{list}}} \end{array} \right\} \mbox{\tt \meta{action} \meta{exprn}} \] \end{small}% % where \begin{center} \tt \meta{action} ::= do|product|sum|collect|join. \end{center} The assignment\index{Assignment} form of the {\tt FOR} statement defines an iteration over the indicated numerical range. If expressions that do not evaluate to numbers are used in the designated places, an error will result. The {\tt FOR EACH}\ttindex{FOR EACH} form of the {\tt FOR} statement is designed to iterate down a list. Again, an error will occur if a list is not used. The action {\tt DO}\ttindex{DO} means that {\tt <exprn>} is simply evaluated and no value kept; the statement returning 0 in this case (or no value at the top level). {\tt COLLECT} means that the results of evaluating {\tt <exprn>} each time are linked together to make a list, and {\tt JOIN} means that the values of {\tt <exprn>} are themselves lists that are joined to make one list (similar to {\tt CONC} in Lisp). Finally, {\tt PRODUCT}\ttindex{PRODUCT} and {\tt SUM}\ttindex{SUM} form the respective combined value out of the values of {\tt <exprn>}. In all cases, {\tt <exprn>} is evaluated algebraically within the scope of the current value of {\tt <var>}. If {\tt <action>} is {\tt DO}\ttindex{DO}, then nothing else happens. In other cases, {\tt <action>} is a binary operator that causes a result to be built up and returned by {\tt FOR}. In those cases, the loop\index{Loop} is initialized to a default value ({\tt 0} for {\tt SUM},\ttindex{SUM} {\tt 1} for {\tt PRODUCT},\ttindex{PRODUCT} and an empty list for the other actions). The test for the end condition is made before any action is taken. As in Pascal, if the variable is out of range in the assignment case, or the {\tt <list>} is empty in the {\tt FOR EACH}\ttindex{FOR EACH} case, {\tt <exprn>} is not evaluated at all. {\it Examples:} \begin{enumerate} \item If {\tt A}, {\tt B} have been declared to be arrays, the following stores $5^{2}$ through $10^{2}$ in {\tt A(5)} through {\tt A(10)}, and at the same time stores the cubes in the {\tt B} array: {\small\begin{verbatim} for i := 5 step 1 until 10 do <<a(i):=i^2; b(i):=i^3>> \end{verbatim}} \item As a convenience, the common construction {\small\begin{verbatim} STEP 1 UNTIL \end{verbatim}} may be abbreviated to a colon. Thus, instead of the above we could write: {\small\begin{verbatim} for i := 5:10 do <<a(i):=i^2; b(i):=i^3>> \end{verbatim}} \item The following sets {\tt C} to the sum of the squares of 1,3,5,7,9; and {\tt D} to the expression {\tt x*(x+1)*(x+2)*(x+3)*(x+4):} {\small\begin{verbatim} c := for j:=1 step 2 until 9 sum j^2; d := for k:=0 step 1 until 4 product (x+k); \end{verbatim}} \item The following forms a list of the squares of the elements of the list {\tt \{a,b,c\}:}\ttindex{FOR EACH} {\small\begin{verbatim} for each x in {a,b,c} collect x^2; \end{verbatim}} \item The following forms a list of the listed squares of the elements of the list {\tt \{a,b,c\}} (i.e., {\tt \{\{A\verb|^|2\},\{B\verb|^|2\},\{C\verb|^|2\}\}):} {\small\begin{verbatim} for each x in {a,b,c} collect {x^2}; \end{verbatim}} \item The following also forms a list of the squares of the elements of the list {\tt \{a,b,c\},} since the {\tt JOIN} operation joins the individual lists into one list:\ttindex{FOR EACH} {\small\begin{verbatim} for each x in {a,b,c} join {x^2}; \end{verbatim}} \end{enumerate} The control variable used in the {\tt FOR} statement is actually a new variable, not related to the variable of the same name outside the {\tt FOR} statement. In other words, executing a statement {\tt for i:=} \ldots doesn't change the system's assumption that $i^{2} = -1$. Furthermore, in algebraic mode, the value of the control variable is substituted in {\tt <exprn>} only if it occurs explicitly in that expression. It will not replace a variable of the same name in the value of that expression. For example: {\small\begin{verbatim} b := a; for a := 1:2 do write b; \end{verbatim}} prints {\tt A} twice, not 1 followed by 2. \section{WHILE \ldots DO} The\ttindex{WHILE} {\tt FOR \ldots DO}\ttindex{DO} feature allows easy coding of a repeated operation in which the number of repetitions is known in advance. If the criterion for repetition is more complicated, {\tt WHILE \ldots DO} can often be used. Its syntax is: {\small\begin{verbatim} WHILE <boolean expression> DO <statement> \end{verbatim}} The {\tt WHILE \ldots DO} controls the single statement following {\tt DO}. If several statements are to be repeated, as is almost always the case, they must be grouped using the $<<$ \ldots $>>$ or {\tt BEGIN \ldots END} as in the example below. The {\tt WHILE} condition is tested each time {\em before\/} the action following the {\tt DO} is attempted. If the condition is false to begin with, the action is not performed at all. Make sure that what is to be tested has an appropriate value initially. {\it Example:} Suppose we want to add up a series of terms, generated one by one, until we reach a term which is less than 1/1000 in value. For our simple example, let us suppose the first term equals 1 and each term is obtained from the one before by taking one third of it and adding one third its square. We would write: {\small\begin{verbatim} ex:=0; term:=1; while num(term - 1/1000) >= 0 do <<ex := ex+term; term:=(term + term^2)/3>>; ex; \end{verbatim}} As long as {\tt TERM} is greater than or equal to ({\tt >=}) 1/1000 it will be added to {\tt EX} and the next {\tt TERM} calculated. As soon as {\tt TERM} becomes less than 1/1000 the {\tt WHILE} test fails and the {\tt TERM} will not be added. \section{REPEAT \ldots UNTIL} \ttindex{REPEAT} {\tt REPEAT \ldots UNTIL} is very similar in purpose to {\tt WHILE \ldots DO}. Its syntax is: {\small\begin{verbatim} REPEAT <statement> UNTIL <boolean expression> \end{verbatim}} (PASCAL users note: Only a single statement -- usually a group statement -- is allowed between the {\tt REPEAT} and the {\tt UNTIL.)} There are two essential differences: \begin{enumerate} \item The test is performed {\em after\/} the controlled statement (or group of statements) is executed, so the controlled statement is always executed at least once. \item The test is a test for when to stop rather than when to continue, so its ``polarity'' is the opposite of that in {\tt WHILE \ldots DO.} \end{enumerate} As an example, we rewrite the example from the {\tt WHILE \ldots DO} section: \begin{samepage} {\small\begin{verbatim} ex:=0; term:=1; repeat <<ex := ex+term; term := (term + term^2)/3>> until num(term - 1/1000) < 0; ex; \end{verbatim}} \end{samepage} In this case, the answer will be the same as before, because in neither case is a term added to {\tt EX} which is less than 1/1000. \section{Compound Statements} \index{Compound statement}Often the desired process can best (or only) be described as a series of steps to be carried out one after the other. In many cases, this can be achieved by use of the group statement\index{Group statement}. However, each step often provides some intermediate result, until at the end we have the final result wanted. Alternatively, iterations on the steps are needed that are not possible with constructs such as {\tt WHILE}\ttindex{WHILE} or {\tt REPEAT}\ttindex{REPEAT} statements. In such cases the steps of the process must be enclosed between the words {\tt BEGIN} and {\tt END}\ttindex{BEGIN \ldots END} forming what is technically called a {\em block\/}\index{Block} or {\em compound\/} statement. Such a compound statement can in fact be used wherever a group statement appears. The converse is not true: {\tt BEGIN \ldots END} can be used in ways that {\tt $<<$} \ldots {\tt $>>$} cannot. If intermediate results must be formed, local variables must be provided in which to store them. {\em Local\/} means that their values are deleted as soon as the block's operations are complete, and there is no conflict with variables outside the block that happen to have the same name. Local variables are created by a {\tt SCALAR}\ttindex{SCALAR} declaration immediately after the {\tt BEGIN}: {\small\begin{verbatim} scalar a,b,c,z; \end{verbatim}} If more convenient, several {\tt SCALAR} declarations can be given one after another: {\small\begin{verbatim} scalar a,b,c; scalar z; \end{verbatim}} In place of {\tt SCALAR} one can also use the declarations {\tt INTEGER}\ttindex{INTEGER} or {\tt REAL}\ttindex{REAL}. In the present version of {\REDUCE} variables declared {\tt INTEGER} are expected to have only integer values, and are initialized to 0. {\tt REAL} variables on the other hand are currently treated as algebraic mode {\tt SCALAR}s. {\it CAUTION:} {\tt INTEGER}, {\tt REAL} and {\tt SCALAR} declarations can only be given immediately after a {\tt BEGIN}. An error will result if they are used after other statements in a block (including {\tt ARRAY} and {\tt OPERATOR} declarations, which are global in scope), or outside the top-most block (e.g., at the top level). All variables declared {\tt SCALAR} are automatically initialized to zero in algebraic mode ({\tt NIL} in symbolic mode). Any symbols not declared as local variables in a block refer to the variables of the same name in the current calling environment. In particular, if they are not so declared at a higher level (e.g., in a surrounding block or as parameters in a calling procedure), their values can be permanently changed. Following the {\tt SCALAR}\ttindex{SCALAR} declaration(s), if any, write the statements to be executed, one after the other, separated by delimiters (e.g., {\tt ;} or {\tt \$}) (it doesn't matter which). However, from a stylistic point of view, {\tt ;} is preferred. The last statement in the body, just before {\tt END}, need not have a terminator (since the {\tt BEGIN \ldots END} are in a sense brackets confining the block statements). The last statement must also be the command {\tt RETURN}\ttindex{RETURN} followed by the variable or expression whose value is to be the value returned by the procedure. If the {\tt RETURN} is omitted (or nothing is written after the word {\tt RETURN}) the procedure will have no value or the value zero, depending on how it is used (and {\tt NIL} in symbolic mode). Remember to put a terminator after the {\tt END}. {\it Example:} Given a previously assigned integer value for {\tt N}, the following block will compute the Legendre polynomial of degree {\tt N} in the variable {\tt X}: {\small\begin{verbatim} begin scalar seed,deriv,top,fact; seed:=1/(y^2 - 2*x*y +1)^(1/2); deriv:=df(seed,y,n); top:=sub(y=0,deriv); fact:=for i:=1:n product i; return top/fact end; \end{verbatim}} \subsection{Compound Statements with GO TO} It is possible to have more complicated structures inside the {\tt BEGIN \ldots END}\ttindex{BEGIN \ldots END} brackets than indicated in the previous example. That the individual lines of the program need not be assignment\index{Assignment} statements, but could be almost any other kind of statement or command, needs no explanation. For example, conditional statements, and {\tt WHILE}\ttindex{WHILE} and {\tt REPEAT} \ttindex{REPEAT} constructions, have an obvious role in defining more intricate blocks. If these structured constructs don't suffice, it is possible to use labels \index{Label} and {\tt GO} {\tt TO}s\ttindex{GO TO} within a compound statement,\index{Compound statement} and also to use {\tt RETURN} \ttindex{RETURN} in places within the block other than just before the {\tt END}. The following subsections discuss these matters in detail. For many readers the following example, presenting one possible definition of a process to calculate the factorial of {\tt N} for preassigned {\tt N} will suffice: {\it Example:} {\small\begin{verbatim} begin scalar m; m:=1; l: if n=0 then return m; m:=m*n; n:=n-1; go to l end; \end{verbatim}} \subsection{Labels and GO TO Statements} \index{Label}\ttindex{GO TO}Within a {\tt BEGIN \ldots END} compound statement it is possible to label statements, and transfer to them out of sequence using {\tt GO} {\tt TO} statements. Only statements on the top level inside compound statements can be labeled, not ones inside subsidiary constructions like {\tt $<<$} \ldots {\tt $>>$}, {\tt IF} \ldots {\tt THEN} \ldots , {\tt WHILE} \ldots {\tt DO} \ldots , etc. Labels and {\tt GO TO} statements have the syntax: {\small\begin{verbatim} <go to statement> ::= GO TO <label> | GOTO <label> <label> ::= <identifier> <labeled statement> ::= <label>:<statement> \end{verbatim}} Note that statement names cannot be used as labels. While {\tt GO TO} is an unconditional transfer, it is frequently used in conditional statements such as {\small\begin{verbatim} if x>5 then go to abcd; \end{verbatim}} giving the effect of a conditional transfer. Transfers using {\tt GO TO}s can only occur within the block in which the {\tt GO TO} is used. In other words, you cannot transfer from an inner block to an outer block using a {\tt GO TO}. However, if a group statement occurs within a compound statement, it is possible to jump out of that group statement to a point within the compound statement using a {\tt GO TO}. \subsection{RETURN Statements} The value corresponding to a {\tt BEGIN \ldots END} compound statement, \ttindex{BEGIN \ldots END} such as a procedure body, is normally 0 ({\tt NIL} in symbolic mode). By executing a {\tt RETURN}\ttindex{RETURN} statement in the compound statement a different value can be returned. After a {\tt RETURN} statement is executed, no further statements within the compound statement are executed. {\tt Examples:} {\small\begin{verbatim} return x+y; return m; return; \end{verbatim}} Note that parentheses are not required around the {\tt x+y}, although they are permitted. The last example is equivalent to {\tt return 0} or {\tt return nil}, depending on whether the block is used as part of an expression or not. Since {\tt RETURN}\ttindex{RETURN} actually moves up only one block\index{Block} level, in a sense the casual user is not expected to understand, we tabulate some cautions concerning its use. \begin{enumerate} \item {\tt RETURN} can be used on the top level inside the compound statement, i.e. as one of the statements bracketed together by the {\tt BEGIN \ldots END}\ttindex{BEGIN \ldots END} \item {\tt RETURN} can be used within a top level {\tt $<<$} \ldots {\tt $>>$} construction within the compound statement. In this case, the {\tt RETURN} transfers control out of both the group statement and the compound statement. \item {\tt RETURN} can be used within an {\tt IF} \ldots {\tt THEN} \ldots {\tt ELSE} \ldots on the top level within the compound statement. \end{enumerate} NOTE: At present, there is no construct provided to permit early termination of a {\tt FOR}\ttindex{FOR}, {\tt WHILE}\ttindex{WHILE}, or {\tt REPEAT}\ttindex{REPEAT} statement. In particular, the use of {\tt RETURN} in such cases results in a syntax error. For example, {\small\begin{verbatim} begin scalar y; y := for i:=0:99 do if a(i)=x then return b(i); ... \end{verbatim}} will lead to an error. \chapter{Commands and Declarations} A command\index{Command} is an order to the system to do something. Some commands cause visible results (such as calling for input or output); others, usually called declarations\index{Declaration}, set options, define properties of variables, or define procedures. Commands are formally defined as a statement followed by a terminator {\small\begin{verbatim} <command> ::= <statement> <terminator> <terminator> ::= ;|$ \end{verbatim}} Some {\REDUCE} commands and declarations are described in the following sub-sections. \section{Array Declarations} Array\ttindex{ARRAY} declarations in {\REDUCE} are similar to FORTRAN dimension statements. For example: {\small\begin{verbatim} array a(10),b(2,3,4); \end{verbatim}} Array indices each range from 0 to the value declared. An element of an array is referred to in standard FORTRAN notation, e.g. {\tt A(2)}. We can also use an expression for defining an array bound, provided the value of the expression is a positive integer. For example, if {\tt X} has the value 10 and {\tt Y} the value 7 then {\tt array c(5*x+y)} is the same as {\tt array c(57)}. If an array is referenced by an index outside its range, an error occurs. If the array is to be one-dimensional, and the bound a number or a variable (not a more general expression) the parentheses may be omitted: {\small\begin{verbatim} array a 10, c 57; \end{verbatim}} The operator {\tt LENGTH}\ttindex{LENGTH} applied to an array name returns a list of its dimensions. All array elements are initialized to 0 at declaration time. In other words, an array element has an {\em instant evaluation\/}\index{Instant evaluation} property and cannot stand for itself. If this is required, then an operator should be used instead. Array declarations can appear anywhere in a program. Once a symbol is declared to name an array, it can not also be used as a variable, or to name an operator or a procedure. It can however be re-declared to be an array, and its size may be changed at that time. An array name can also continue to be used as a parameter in a procedure, or a local variable in a compound statement, although this use is not recommended, since it can lead to user confusion over the type of the variable. Arrays once declared are global in scope, and so can then be referenced anywhere in the program. In other words, unlike arrays in most other languages, a declaration within a block (or a procedure) does not limit the scope of the array to that block, nor does the array go away on exiting the block (use {\tt CLEAR} instead for this purpose). \section{Mode Handling Declarations}\index{Mode} The {\tt ON}\ttindex{ON} and {\tt OFF}\ttindex{OFF} declarations are available to the user for controlling various system options. Each option is represented by a {\em switch\/}\index{Switch} name. {\tt ON} and {\tt OFF} take a list of switch names as argument and turn them on and off respectively, e.g., {\small\begin{verbatim} on time; \end{verbatim}} causes the system to print a message after each command giving the elapsed CPU time since the last command, or since {\tt TIME}\ttindex{TIME} was last turned off, or the session began. Another useful switch with interactive use is {\tt DEMO},\ttindex{DEMO} which causes the system to pause after each command in a file (with the exception of comments) until a \key{Return} is typed on the terminal. This enables a user to set up a demonstration file and step through it command by command. As with most declarations, arguments to {\tt ON} and {\tt OFF} may be strung together separated by commas. For example, {\small\begin{verbatim} off time,demo; \end{verbatim}} will turn off both the time messages and the demonstration switch. We note here that while most {\tt ON} and {\tt OFF} commands are obeyed almost instantaneously, some trigger time-consuming actions such as reading in necessary modules from secondary storage. A diagnostic message is printed if {\tt ON}\ttindex{ON} or {\tt OFF} \ttindex{OFF} are used with a switch that is not known to the system. For example, if you misspell {\tt DEMO} and type {\small\begin{verbatim} on demq; \end{verbatim}} you will get the message\index{Switch} {\small\begin{verbatim} ***** DEMQ not defined as switch. \end{verbatim}} \section{END} The identifier {\tt END}\ttindex{END} has two separate uses. 1) Its use in a {\tt BEGIN \ldots END} bracket has been discussed in connection with compound statements. 2) Files to be read using {\tt IN} should end with an extra {\tt END}; command. The reason for this is explained in the section on the {\tt IN} command. This use of {\tt END} does not allow an immediately preceding {\tt END} (such as the {\tt END} of a procedure definition), so we advise using {\tt ;END;} there. %3) A command {\tt END}; entered at the top level transfers control to the %Lisp system\index{Lisp} which is the host of the {\REDUCE} system. All %files opened by {\tt IN} or {\tt OUT} statements are closed in the %process. {\tt END;} does not stop {\REDUCE}. Those familiar with Lisp can %experiment with typing identifiers and ({\tt <function name> <argument %list>}) lists to see the value returned by Lisp. (No terminators, other %than the RETURN key, should be used.) The data structures created during %the {\REDUCE} run are accessible. %You remain in this Lisp mode until you explicitly re-enter {\REDUCE} by %saying {\tt (BEGIN)} at the Lisp top level. In most systems, a Lisp error %also returns you to {\REDUCE} (exceptions are noted in the operating %instructions for your particular {\REDUCE} implementation). In either %case, you will return to {\REDUCE} in the same mode, algebraic or %symbolic, that you were in before the {\tt END};. If you are in %Lisp mode\index{Lisp mode} by mistake -- which is usually the case, %the result of typing more {\tt END}s\ttindex{END} than {\tt BEGIN}s -- %type {\tt (BEGIN)} in parentheses and hit the RETURN key. \section{BYE Command}\ttindex{BYE} The command {\tt BYE}; (or alternatively {\tt QUIT};)\ttindex{QUIT} stops the execution of {\REDUCE}, closes all open output files, and returns you to the calling program (usually the operating system). Your {\REDUCE} session is normally destroyed. \section{SHOWTIME Command}\ttindex{SHOWTIME} {\tt SHOWTIME}; prints the elapsed time since the last call of this command or, on its first call, since the current {\REDUCE} session began. The time is normally given in milliseconds and gives the time as measured by a system clock. The operations covered by this measure are system dependent. \section{DEFINE Command} The command {\tt DEFINE}\ttindex{DEFINE} allows a user to supply a new name for any identifier or replace it by any well-formed expression. Its argument is a list of expressions of the form {\small\begin{verbatim} <identifier> = <number>|<identifier>|<operator>| <reserved word>|<expression> \end{verbatim}} {\it Example:} {\small\begin{verbatim} define be==,x=y+z; \end{verbatim}} means that {\tt BE} will be interpreted as an equal sign, and {\tt X} as the expression {\tt y+z} from then on. This renaming is done at parse time, and therefore takes precedence over any other replacement declared for the same identifier. It stays in effect until the end of the {\REDUCE} run. The identifiers {\tt ALGEBRAIC} and {\tt SYMBOLIC} have properties which prevent {\tt DEFINE}\ttindex{DEFINE} from being used on them. To define {\tt ALG} to be a synonym for {\tt ALGEBRAIC}, use the more complicated construction {\small\begin{verbatim} put('alg,'newnam,'algebraic); \end{verbatim}} \chapter{Built-in Prefix Operators} In the following subsections are descriptions of the most useful prefix \index{Prefix} operators built into {\REDUCE} that are not defined in other sections (such as substitution operators). Some are fully defined internally as procedures; others are more nearly abstract operators, with only some of their properties known to the system. In many cases, an operator is described by a prototypical header line as follows. Each formal parameter is given a name and followed by its allowed type. The names of classes referred to in the definition are printed in lower case, and parameter names in upper case. If a parameter type is not commonly used, it may be a specific set enclosed in brackets {\tt \{} \ldots {\tt \}}. Operators that accept formal parameter lists of arbitrary length have the parameter and type class enclosed in square brackets indicating that zero or more occurrences of that argument are permitted. Optional parameters and their type classes are enclosed in angle brackets. \section{Numerical Operators}\index{Numerical operator} {\REDUCE} includes a number of functions that are analogs of those found in most numerical systems. With numerical arguments, such functions return the expected result. However, they may also be called with non-numerical arguments. In such cases, except where noted, the system attempts to simplify the expression as far as it can. In such cases, a residual expression involving the original operator usually remains. These operators are as follows: \subsection{ABS} {\tt ABS}\ttindex{ABS} returns the absolute value of its single argument, if that argument has a numerical value. A non-numerical argument is returned as an absolute value, with an overall numerical coefficient taken outside the absolute value operator. For example: {\small\begin{verbatim} abs(-3/4) -> 3/4 abs(2a) -> 2*ABS(A) abs(i) -> 1 abs(-x) -> ABS(X) \end{verbatim}} \subsection{CEILING}\ttindex{CEILING} This operator returns the ceiling (i.e., the least integer greater than the given argument) if its single argument has a numerical value. A non-numerical argument is returned as an expression in the original operator. For example: {\small\begin{verbatim} ceiling(-5/4) -> -1 ceiling(-a) -> CEILING(-A) \end{verbatim}} \subsection{CONJ}\ttindex{CONJ} This returns the complex conjugate of an expression, if that argument has an numerical value. A non-numerical argument is returned as an expression in the operators {\tt REPART}\ttindex{REPART} and {\tt IMPART}\ttindex{IMPART}. For example: {\small\begin{verbatim} conj(1+i) -> 1-I conj(a+i*b) -> REPART(A) - REPART(B)*I - IMPART(A)*I - IMPART(B) \end{verbatim}} \subsection{FACTORIAL}\ttindex{FACTORIAL} If the single argument of {\tt FACTORIAL} evaluates to a non-negative integer, its factorial is returned. Otherwise an expression involving {\tt FACTORIAL} is returned. For example: {\small\begin{verbatim} factorial(5) -> 120 factorial(a) -> FACTORIAL(A) \end{verbatim}} \subsection{FIX}\ttindex{FIX} This operator returns the fixed value (i.e., the integer part of the given argument) if its single argument has a numerical value. A non-numerical argument is returned as an expression in the original operator. For example: {\small\begin{verbatim} fix(-5/4) -> -1 fix(a) -> FIX(A) \end{verbatim}} \subsection{FLOOR}\ttindex{FLOOR} This operator returns the floor (i.e., the greatest integer less than the given argument) if its single argument has a numerical value. A non-numerical argument is returned as an expression in the original operator. For example: {\small\begin{verbatim} floor(-5/4) -> -2 floor(a) -> FLOOR(A) \end{verbatim}} \subsection{IMPART}\ttindex{IMPART} This operator returns the imaginary part of an expression, if that argument has an numerical value. A non-numerical argument is returned as an expression in the operators {\tt REPART}\ttindex{REPART} and {\tt IMPART}. For example: {\small\begin{verbatim} impart(1+i) -> 1 impart(a+i*b) -> REPART(B) + IMPART(A) \end{verbatim}} \subsection{MAX/MIN} {\tt MAX} and {\tt MIN}\ttindex{MAX}\ttindex{MIN} can take an arbitrary number of expressions as their arguments. If all arguments evaluate to numerical values, the maximum or minimum of the argument list is returned. If any argument is non-numeric, an appropriately reduced expression is returned. For example: {\small\begin{verbatim} max(2,-3,4,5) -> 5 min(2,-2) -> -2. max(a,2,3) -> MAX(A,3) min(x) -> X \end{verbatim}} {\tt MAX} or {\tt MIN} of an empty list returns 0. \subsection{NEXTPRIME}\ttindex{NEXTPRIME} {\tt NEXTPRIME} returns the next prime greater than its integer argument, using a probabilistic algorithm. A type error occurs if the value of the argument is not an integer. For example: {\small\begin{verbatim} nextprime(5) -> 7 nextprime(-2) -> 2 nextprime(-7) -> -5 nextprime 1000000 -> 1000003 \end{verbatim}} whereas {\tt nextprime(a)} gives a type error. \subsection{RANDOM}\ttindex{RANDOM} {\tt random(}{\em n\/}{\tt)} returns a random number $r$ in the range $0 \leq r < n$. A type error occurs if the value of the argument is not a positive integer in algebraic mode, or positive number in symbolic mode. For example: {\small\begin{verbatim} random(5) -> 3 random(1000) -> 191 \end{verbatim}} whereas {\tt random(a)} gives a type error. \subsection{RANDOM\_NEW\_SEED}\ttindex{RANDOM\_NEW\_SEED} {\tt random\_new\_seed(}{\em n\/}{\tt)} reseeds the random number generator to a sequence determined by the integer argument $n$. It can be used to ensure that a repeatable pseudo-random sequence will be delivered regardless of any previous use of {\tt RANDOM}, or can be called early in a run with an argument derived from something variable (such as the time of day) to arrange that different runs of a REDUCE program will use different random sequences. When a fresh copy of REDUCE is first created it is as if {\tt random\_new\_seed(1)} has been obeyed. A type error occurs if the value of the argument is not a positive integer. \subsection{REPART}\ttindex{REPART} This returns the real part of an expression, if that argument has an numerical value. A non-numerical argument is returned as an expression in the operators {\tt REPART} and {\tt IMPART}\ttindex{IMPART}. For example: {\small\begin{verbatim} repart(1+i) -> 1 repart(a+i*b) -> REPART(A) - IMPART(B) \end{verbatim}} \subsection{ROUND}\ttindex{ROUND} This operator returns the rounded value (i.e, the nearest integer) of its single argument if that argument has a numerical value. A non-numeric argument is returned as an expression in the original operator. For example: {\small\begin{verbatim} round(-5/4) -> -1 round(a) -> ROUND(A) \end{verbatim}} \subsection{SIGN}\ttindex{SIGN} {\tt SIGN} tries to evaluate the sign of its argument. If this is possible {\tt SIGN} returns one of 1, 0 or -1. Otherwise, the result is the original form or a simplified variant. For example: {\small\begin{verbatim} sign(-5) -> -1 sign(-a^2*b) -> -SIGN(B) \end{verbatim}} Note that even powers of formal expressions are assumed to be positive only as long as the switch {\tt COMPLEX} is off. \section{Mathematical Functions} {\REDUCE} knows that the following represent mathematical functions \index{Mathematical function} that can take arbitrary scalar expressions as their single argument: {\small\begin{verbatim} ACOS ACOSH ACOT ACOTH ACSC ACSCH ASEC ASECH ASIN ASINH ATAN ATANH ATAN2 COS COSH COT COTH CSC CSCH DILOG EI EXP HYPOT LN LOG LOGB LOG10 SEC SECH SIN SINH SQRT TAN TANH \end{verbatim}} \ttindex{ACOS}\ttindex{ACOSH}\ttindex{ACOT} \ttindex{ACOTH}\ttindex{ACSC}\ttindex{ACSCH}\ttindex{ASEC} \ttindex{ASECH}\ttindex{ASIN} \ttindex{ASINH}\ttindex{ATAN}\ttindex{ATANH} \ttindex{ATAN2}\ttindex{COS} \ttindex{COSH}\ttindex{COT}\ttindex{COTH}\ttindex{CSC} \ttindex{CSCH}\ttindex{DILOG}\ttindex{Ei}\ttindex{EXP} \ttindex{HYPOT}\ttindex{LN}\ttindex{LOG}\ttindex{LOGB}\ttindex{LOG10} \ttindex{SEC}\ttindex{SECH}\ttindex{SIN} \ttindex{SINH}\ttindex{SQRT}\ttindex{TAN}\ttindex{TANH} where {\tt LOG} is the natural logarithm (and equivalent to {\tt LN}), and {\tt LOGB} has two arguments of which the second is the logarithmic base. The derivatives of all these functions are also known to the system. {\REDUCE} knows various elementary identities and properties of these functions. For example: {\small\begin{verbatim} cos(-x) = cos(x) sin(-x) = - sin (x) cos(n*pi) = (-1)^n sin(n*pi) = 0 log(e) = 1 e^(i*pi/2) = i log(1) = 0 e^(i*pi) = -1 log(e^x) = x e^(3*i*pi/2) = -i \end{verbatim}} Beside these identities, there are a lot of simplifications for elementary functions defined in the {\REDUCE} system as rulelists. In order to view these, the SHOWRULES operator can be used, e.g. {\small\begin{verbatim} SHOWRULES tan; {tan(~n*arbint(~i)*pi + ~(~ x)) => tan(x) when fixp(n), tan(~x) => trigquot(sin(x),cos(x)) when knowledge_about(sin,x,tan) , ~x + ~(~ k)*pi tan(----------------) ~d x k 1 => - cot(---) when x freeof pi and abs(---)=---, d d 2 ~(~ w) + ~(~ k)*pi w + remainder(k,d)*pi tan(--------------------) => tan(-----------------------) ~(~ d) d k when w freeof pi and ratnump(---) and fixp(k) d k and abs(---)>=1, d tan(atan(~x)) => x, 2 df(tan(~x),~x) => 1 + tan(x) } \end{verbatim}} For further simplification, especially of expressions involving trigonometric functions, see the TRIGSIMP\ttindex{TRIGSIMP} package documentation. Functions not listed above may be defined in the special functions package SPECFN\ttindex{SPECFN}. The user can add further rules for the reduction of expressions involving these operators by using the {\tt LET}\ttindex{LET} command. % The square root function can be input using the name {\tt SQRT}, or the % power operation {\tt \verb|^|(1/2)}. On output, unsimplified square roots % are normally represented by the operator {\tt SQRT} rather than a % fractional power. In many cases it is desirable to expand product arguments of logarithms, or collect a sum of logarithms into a single logarithm. Since these are inverse operations, it is not possible to provide rules for doing both at the same time and preserve the {\REDUCE} concept of idempotent evaluation. As an alternative, REDUCE provides two switches {\tt EXPANDLOGS} \ttindex{EXPANDLOGS} and {\tt COMBINELOGS}\ttindex{COMBINELOGS} to carry out these operations. Both are off by default. Thus to expand {\tt LOG(X*Y)} into a sum of logs, one can say {\small\begin{verbatim} ON EXPANDLOGS; LOG(X*Y); \end{verbatim}} and to combine this sum into a single log: {\small\begin{verbatim} ON COMBINELOGS; LOG(X) + LOG(Y); \end{verbatim}} At the present time, it is possible to have both switches on at once, which could lead to infinite recursion. However, an expression is switched from one form to the other in this case. Users should not rely on this behavior, since it may change in the next release. The current version of {\REDUCE} does a poor job of simplifying surds. In particular, expressions involving the product of variables raised to non-integer powers do not usually have their powers combined internally, even though they are printed as if those powers were combined. For example, the expression {\small\begin{verbatim} x^(1/3)*x^(1/6); \end{verbatim}} will print as {\small\begin{verbatim} SQRT(X) \end{verbatim}} but will have an internal form containing the two exponentiated terms. If you now subtract {\tt sqrt(x)} from this expression, you will {\em not\/} get zero. Instead, the confusing form {\small\begin{verbatim} SQRT(X) - SQRT(X) \end{verbatim}} will result. To combine such exponentiated terms, the switch {\tt COMBINEEXPT}\ttindex{COMBINEEXPT} should be turned on. The square root function can be input using the name {\tt SQRT}, or the power operation {\tt \verb|^|(1/2)}. On output, unsimplified square roots are normally represented by the operator {\tt SQRT} rather than a fractional power. With the default system switch settings, the argument of a square root is first simplified, and any divisors of the expression that are perfect squares taken outside the square root argument. The remaining expression is left under the square root. % However, if the switch {\tt REDUCED}\ttindex{REDUCED} is on, % multiplicative factors in the argument of the square root are also % separated, becoming individual square roots. Thus with {\tt REDUCED} off, Thus the expression {\small\begin{verbatim} sqrt(-8a^2*b) \end{verbatim}} becomes {\small\begin{verbatim} 2*a*sqrt(-2*b). \end{verbatim}} % whereas with {\tt REDUCED} on, it would become % {\small\begin{verbatim} % 2*a*i*sqrt(2)*sqrt(b) . % \end{verbatim}} % The switch {\tt REDUCED}\ttindex{REDUCED} also applies to other rational % powers in addition to square roots. Note that such simplifications can cause trouble if {\tt A} is eventually given a value that is a negative number. If it is important that the positive property of the square root and higher even roots always be preserved, the switch {\tt PRECISE}\ttindex{PRECISE} should be set on (the default value). This causes any non-numerical factors taken out of surds to be represented by their absolute value form. With % both {\tt REDUCED} and {\tt PRECISE} on then, the above example would become {\small\begin{verbatim} 2*abs(a)*sqrt(-2*b). \end{verbatim}} The statement that {\REDUCE} knows very little about these functions applies only in the mathematically exact {\tt off rounded} mode. If {\tt ROUNDED}\ttindex{ROUNDED} is on, any of the functions {\small\begin{verbatim} ACOS ACOSH ACOT ACOTH ACSC ACSCH ASEC ASECH ASIN ASINH ATAN ATANH ATAN2 COS COSH COT COTH CSC CSCH EXP HYPOT LN LOG LOGB LOG10 SEC SECH SIN SINH SQRT TAN TANH \end{verbatim}} \ttindex{ACOS}\ttindex{ACOSH}\ttindex{ACOT}\ttindex{ACOTH} \ttindex{ACSC}\ttindex{ACSCH}\ttindex{ASEC}\ttindex{ASECH} \ttindex{ASIN}\ttindex{ASINH}\ttindex{ATAN}\ttindex{ATANH} \ttindex{ATAN2}\ttindex{COS}\ttindex{COSH}\ttindex{COT} \ttindex{COTH}\ttindex{CSC}\ttindex{CSCH}\ttindex{EXP}\ttindex{HYPOT} \ttindex{LN}\ttindex{LOG}\ttindex{LOGB}\ttindex{LOG10}\ttindex{SEC} \ttindex{SECH}\ttindex{SIN}\ttindex{SINH}\ttindex{SQRT}\ttindex{TAN} \ttindex{TANH} when given a numerical argument has its value calculated to the current degree of floating point precision. In addition, real (non-integer valued) powers of numbers will also be evaluated. If the {\tt COMPLEX} switch is turned on in addition to {\tt ROUNDED}, these functions will also calculate a real or complex result, again to the current degree of floating point precision, if given complex arguments. For example, with {\tt on rounded,complex;} {\small\begin{verbatim} 2.3^(5.6i) -> -0.0480793490914 - 0.998843519372*I cos(2+3i) -> -4.18962569097 - 9.10922789376*I \end{verbatim}} \section{DF Operator} The operator {\tt DF}\ttindex{DF} is used to represent partial differentiation\index{Differentiation} with respect to one or more variables. It is used with the syntax: {\small\begin{verbatim} DF(EXPRN:algebraic[,VAR:kernel<,NUM:integer>]):algebraic. \end{verbatim}} The first argument is the expression to be differentiated. The remaining arguments specify the differentiation variables and the number of times they are applied. The number {\tt NUM} may be omitted if it is 1. For example, \begin{quote} \begin{tabbing} {\tt df(y,x1,2,x2,x3,2)} \= = $\partial^{5}y/\partial x_{1}^{2} \ \partial x_{2}\partial x_{3}^{2}.$\kill {\tt df(y,x)} \> = $\partial y/\partial x$ \\ {\tt df(y,x,2)} \> = $\partial^{2}y/\partial x^{2}$ \\ {\tt df(y,x1,2,x2,x3,2)} \> = $\partial^{5}y/\partial x_{1}^{2} \ \partial x_{2}\partial x_{3}^{2}.$ \end{tabbing} \end{quote} The evaluation of {\tt df(y,x)} proceeds as follows: first, the values of {\tt Y} and {\tt X} are found. Let us assume that {\tt X} has no assigned value, so its value is {\tt X}. Each term or other part of the value of {\tt Y} that contains the variable {\tt X} is differentiated by the standard rules. If {\tt Z} is another variable, not {\tt X} itself, then its derivative with respect to {\tt X} is taken to be 0, unless {\tt Z} has previously been declared to {\tt DEPEND} on {\tt X}, in which case the derivative is reported as the symbol {\tt df(z,x)}. \subsection{Adding Differentiation Rules} The {\tt LET}\ttindex{LET} statement can be used to introduce rules for differentiation of user-defined operators. Its general form is {\small\begin{verbatim} FOR ALL <var1>,...,<varn> LET DF(<operator><varlist>,<vari>)=<expression> \end{verbatim}} where {\tt <varlist>} ::= ({\tt <var1>},\dots,{\tt <varn>}), and {\tt <var1>},...,{\tt <varn>} are the dummy variable arguments of {\tt <operator>}. An analogous form applies to infix operators. {\it Examples:} {\small\begin{verbatim} for all x let df(tan x,x)= 1 + tan(x)^2; \end{verbatim}} (This is how the tan differentiation rule appears in the {\REDUCE} source.) {\small\begin{verbatim} for all x,y let df(f(x,y),x)=2*f(x,y), df(f(x,y),y)=x*f(x,y); \end{verbatim}} Notice that all dummy arguments of the relevant operator must be declared arbitrary by the {\tt FOR ALL} command, and that rules may be supplied for operators with any number of arguments. If no differentiation rule appears for an argument in an operator, the differentiation routines will return as result an expression in terms of {\tt DF}\ttindex{DF}. For example, if the rule for the differentiation with respect to the second argument of {\tt F} is not supplied, the evaluation of {\tt df(f(x,z),z)} would leave this expression unchanged. (No {\tt DEPEND} declaration is needed here, since {\tt f(x,z)} obviously ``depends on'' {\tt Z}.) Once such a rule has been defined for a given operator, any future differentiation\index{Differentiation} rules for that operator must be defined with the same number of arguments for that operator, otherwise we get the error message {\small\begin{verbatim} Incompatible DF rule argument length for <operator> \end{verbatim}} \section{INT Operator} {\tt INT}\ttindex{INT} is an operator in {\REDUCE} for indefinite integration\index{Integration}\index{Indefinite integration} using a combination of the Risch-Norman algorithm and pattern matching. It is used with the syntax: {\small\begin{verbatim} INT(EXPRN:algebraic,VAR:kernel):algebraic. \end{verbatim}} This will return correctly the indefinite integral for expressions comprising polynomials, log functions, exponential functions and tan and atan. The arbitrary constant is not represented. If the integral cannot be done in closed terms, it returns a formal integral for the answer in one of two ways: \begin{enumerate} \item It returns the input, {\tt INT(\ldots,\ldots)} unchanged. \item It returns an expression involving {\tt INT}s of some other functions (sometimes more complicated than the original one, unfortunately). \end{enumerate} Rational functions can be integrated when the denominator is factorizable by the program. In addition it will attempt to integrate expressions involving error functions, dilogarithms and other trigonometric expressions. In these cases it might not always succeed in finding the solution, even if one exists. {\it Examples:} {\small\begin{verbatim} int(log(x),x) -> X*(LOG(X) - 1), int(e^x,x) -> E**X. \end{verbatim}} The program checks that the second argument is a variable and gives an error if it is not. {\it Note:} If the {\tt int} operator is called with 4 arguments, {\REDUCE} will implicitly call the definite integration package (DEFINT) and this package will interpret the third and fourth arguments as the lower and upper limit of integration, respectively. For details, consult the documentation on the DEFINT package. \subsection{Options} The switch {\tt TRINT} when on will trace the operation of the algorithm. It produces a great deal of output in a somewhat illegible form, and is not of much interest to the general user. It is normally off. If the switch {\tt FAILHARD} is on the algorithm will terminate with an error if the integral cannot be done in closed terms, rather than return a formal integration form. {\tt FAILHARD} is normally off. The switch {\tt NOLNR} suppresses the use of the linear properties of integration in cases when the integral cannot be found in closed terms. It is normally off. \subsection{Advanced Use} If a function appears in the integrand that is not one of the functions {\tt EXP, ERF, TAN, ATAN, LOG, DILOG}\ttindex{EXP}\ttindex{ERF} \ttindex{TAN}\ttindex{ATAN}\ttindex{LOG}\ttindex{DILOG} then the algorithm will make an attempt to integrate the argument if it can, differentiate it and reach a known function. However the answer cannot be guaranteed in this case. If a function is known to be algebraically independent of this set it can be flagged transcendental by {\small\begin{verbatim} flag('(trilog),'transcendental); \end{verbatim}} in which case this function will be added to the permitted field descriptors for a genuine decision procedure. If this is done the user is responsible for the mathematical correctness of his actions. The standard version does not deal with algebraic extensions. Thus integration of expressions involving square roots and other like things can lead to trouble. A contributed package that supports integration of functions involving square roots is available, however (ALGINT\extendedmanual{, chapter~\ref{ALGINT}}). In addition there is a definite integration package, DEFINT\extendedmanual{( chapter~\ref{DEFINT})}. \subsection{References} A. C. Norman \& P. M. A. Moore, ``Implementing the New Risch Algorithm'', Proc. 4th International Symposium on Advanced Comp. Methods in Theor. Phys., CNRS, Marseilles, 1977. S. J. Harrington, ``A New Symbolic Integration System in Reduce'', Comp. Journ. 22 (1979) 2. A. C. Norman \& J. H. Davenport, ``Symbolic Integration --- The Dust Settles?'', Proc. EUROSAM 79, Lecture Notes in Computer Science 72, Springer-Verlag, Berlin Heidelberg New York (1979) 398-407. %\subsection{Definite Integration} \index{Definite integration} % %If {\tt INT} is used with the syntax % %{\small\begin{verbatim} % INT(EXPRN:algebraic,VAR:kernel,LOWER:algebraic,UPPER:algebraic):algebraic. %\end{verbatim}} % %The definite integral of {\tt EXPRN} with respect to {\tt VAR} is %calculated between the limits {\tt LOWER} and {\tt UPPER}. In the present %system, this is calculated either by pattern matching, or by first finding %the indefinite integral, and then substituting the limits into this. \section{LENGTH Operator} {\tt LENGTH}\ttindex{LENGTH} is a generic operator for finding the length of various objects in the system. The meaning depends on the type of the object. In particular, the length of an algebraic expression is the number of additive top-level terms its expanded representation. {\it Examples:} {\small\begin{verbatim} length(a+b) -> 2 length(2) -> 1. \end{verbatim}} Other objects that support a length operator include arrays, lists and matrices. The explicit meaning in these cases is included in the description of these objects. \section{MAP Operator}\ttindex{MAP} The {\tt MAP} operator applies a uniform evaluation pattern to all members of a composite structure: a matrix, a list, or the arguments of an operator expression. The evaluation pattern can be a unary procedure, an operator, or an algebraic expression with one free variable. It is used with the syntax: {\small\begin{verbatim} MAP(U:function,V:object) \end{verbatim}} Here {\tt object} is a list, a matrix or an operator expression. {\tt Function} can be one of the following: \begin{enumerate} \item the name of an operator for a single argument: the operator is evaluated once with each element of {\tt object} as its single argument; \item an algebraic expression with exactly one free variable, that is a variable preceded by the tilde symbol. The expression is evaluated for each element of {\tt object}, where the element is substituted for the free variable; \item a replacement rule of the form {\tt var => rep} where {\tt var} is a variable (a kernel without a subscript) and {\tt rep} is an expression that contains {\tt var}. {\tt Rep} is evaluated for each element of {\tt object} where the element is substituted for {\tt var}. {\tt Var} may be optionally preceded by a tilde. \end{enumerate} The rule form for {\tt function} is needed when more than one free variable occurs. Examples: {\small\begin{verbatim} map(abs,{1,-2,a,-a}) -> {1,2,ABS(A),ABS(A)} map(int(~w,x), mat((x^2,x^5),(x^4,x^5))) -> [ 3 6 ] [ x x ] [---- ----] [ 3 6 ] [ ] [ 5 6 ] [ x x ] [---- ----] [ 5 6 ] map(~w*6, x^2/3 = y^3/2 -1) -> 2*X^2=3*(Y^3-2) \end{verbatim}} You can use {\tt MAP} in nested expressions. However, you cannot apply {\tt MAP} to a non-composed object, e.g. an identifier or a number. \section{MKID Operator}\ttindex{MKID} In many applications, it is useful to create a set of identifiers for naming objects in a consistent manner. In most cases, it is sufficient to create such names from two components. The operator {\tt MKID} is provided for this purpose. Its syntax is: {\small\begin{verbatim} MKID(U:id,V:id|non-negative integer):id \end{verbatim}} for example {\small\begin{verbatim} mkid(a,3) -> A3 mkid(apple,s) -> APPLES \end{verbatim}} while {\tt mkid(a+b,2)} gives an error. The {\tt SET}\ttindex{SET} operator can be used to give a value to the identifiers created by {\tt MKID}, for example {\small\begin{verbatim} set(mkid(a,3),3); \end{verbatim}} will give {\tt A3} the value 2. \section{PF Operator}\ttindex{PF} {\tt PF(<exp>,<var>)} transforms the expression {\tt <exp>} into a list of partial fractions with respect to the main variable, {\tt <var>}. {\tt PF} does a complete partial fraction decomposition, and as the algorithms used are fairly unsophisticated (factorization and the extended Euclidean algorithm), the code may be unacceptably slow in complicated cases. {\it Example:} Given {\tt 2/((x+1)\verb|^|2*(x+2))} in the workspace, {\tt pf(ws,x);} gives the result {\small\begin{verbatim} 2 - 2 2 {-------,-------,--------------} . X + 2 X + 1 2 X + 2*X + 1 \end{verbatim}} If you want the denominators in factored form, use {\tt off exp;}. Thus, with {\tt 2/((x+1)\verb|^|2*(x+2))} in the workspace, the commands {\tt off exp; pf(ws,x);} give the result {\small\begin{verbatim} 2 - 2 2 {-------,-------,----------} . X + 2 X + 1 2 (X + 1) \end{verbatim}} To recombine the terms, {\tt FOR EACH \ldots SUM} can be used. So with the above list in the workspace, {\tt for each j in ws sum j;} returns the result {\small\begin{verbatim} 2 ------------------ 2 (X + 2)*(X + 1) \end{verbatim}} Alternatively, one can use the operations on lists to extract any desired term. \section{SELECT Operator}\ttindex{SELECT} \ttindex{map}\ttindex{list} The {\tt SELECT} operator extracts from a list, or from the arguments of an n--ary operator, elements corresponding to a boolean predicate. It is used with the syntax: {\small\begin{verbatim} SELECT(U:function,V:list) \end{verbatim}} {\tt Function} can be one of the following forms: \begin{enumerate} \item the name of an operator for a single argument: the operator is evaluated once with each element of {\tt object} as its single argument; \item an algebraic expression with exactly one free variable, that is a variable preceded by the tilde symbol. The expression is evaluated for each element of \meta{object}, where the element is substituted for the free variable; \item a replacement rule of the form \meta{var $=>$ rep} where {\tt var} is a variable (a kernel without subscript) and {\tt rep} is an expression that contains {\tt var}. {\tt Rep} is evaluated for each element of {\tt object} where the element is substituted for {\tt var}. {\tt var} may be optionally preceded by a tilde. \end{enumerate} The rule form for {\tt function} is needed when more than one free variable occurs. The result of evaluating {\tt function} is interpreted as a boolean value corresponding to the conventions of {\REDUCE}. These values are composed with the leading operator of the input expression. {\it Examples:} {\small\begin{verbatim} select( ~w>0 , {1,-1,2,-3,3}) -> {1,2,3} select(evenp deg(~w,y),part((x+y)^5,0):=list) -> {X^5 ,10*X^3*Y^2 ,5*X*Y^4} select(evenp deg(~w,x),2x^2+3x^3+4x^4) -> 4X^4 + 2X^2 \end{verbatim}} \section{SOLVE Operator}\ttindex{SOLVE} SOLVE is an operator for solving one or more simultaneous algebraic equations. It is used with the syntax: {\small\begin{verbatim} SOLVE(EXPRN:algebraic[,VAR:kernel|,VARLIST:list of kernels]) :list. \end{verbatim}} {\tt EXPRN} is of the form {\tt <expression>} or \{ {\tt <expression1>},{\tt <expression2>}, \dots \}. Each expression is an algebraic equation, or is the difference of the two sides of the equation. The second argument is either a kernel or a list of kernels representing the unknowns in the system. This argument may be omitted if the number of distinct, non-constant, top-level kernels equals the number of unknowns, in which case these kernels are presumed to be the unknowns. For one equation, {\tt SOLVE}\ttindex{SOLVE} recursively uses factorization and decomposition, together with the known inverses of {\tt LOG}, {\tt SIN}, {\tt COS}, {\tt \verb|^|}, {\tt ACOS}, {\tt ASIN}, and linear, quadratic, cubic, quartic, or binomial factors. Solutions of equations built with exponentials or logarithms are often expressed in terms of Lambert's {\tt W} function.\index{Lambert's W} This function is (partially) implemented in the special functions package. Linear equations are solved by the multi-step elimination method due to Bareiss, unless the switch {\tt CRAMER}\ttindex{CRAMER} is on, in which case Cramer's method is used. The Bareiss method is usually more efficient unless the system is large and dense. Non-linear equations are solved using the Groebner basis package. \index{Groebner} Users should note that this can be quite a time consuming process. {\it Examples:} {\small\begin{verbatim} solve(log(sin(x+3))^5 = 8,x); solve(a*log(sin(x+3))^5 - b, sin(x+3)); solve({a*x+y=3,y=-2},{x,y}); \end{verbatim}} {\tt SOLVE} returns a list of solutions. If there is one unknown, each solution is an equation for the unknown. If a complete solution was found, the unknown will appear by itself on the left-hand side of the equation. On the other hand, if the solve package could not find a solution, the ``solution'' will be an equation for the unknown in terms of the operator {\tt ROOT\_OF}\ttindex{ROOT\_OF}. If there are several unknowns, each solution will be a list of equations for the unknowns. For example, {\small\begin{verbatim} solve(x^2=1,x); -> {X=-1,X=1} solve(x^7-x^6+x^2=1,x) 6 -> {X=ROOT_OF(X_ + X_ + 1,X_,TAG_1),X=1} solve({x+3y=7,y-x=1},{x,y}) -> {{X=1,Y=2}}. \end{verbatim}} The TAG argument is used to uniquely identify those particular solutions. Solution multiplicities are stored in the global variable {\tt ROOT\_MULTIPLICITIES} rather than the solution list. The value of this variable is a list of the multiplicities of the solutions for the last call of {\tt SOLVE}. \ttindex{SOLVE} For example, {\small\begin{verbatim} solve(x^2=2x-1,x); root_multiplicities; \end{verbatim}} gives the results {\small\begin{verbatim} {X=1} {2} \end{verbatim}} If you want the multiplicities explicitly displayed, the switch {\tt MULTIPLICITIES}\ttindex{MULTIPLICITIES} can be turned on. For example {\small\begin{verbatim} on multiplicities; solve(x^2=2x-1,x); \end{verbatim}} yields the result {\small\begin{verbatim} {X=1,X=1} \end{verbatim}} \subsection{Handling of Undetermined Solutions} When {\tt SOLVE} cannot find a solution to an equation, it normally returns an equation for the relevant indeterminates in terms of the operator {\tt ROOT\_OF}.\ttindex{ROOT\_OF} For example, the expression {\small\begin{verbatim} solve(cos(x) + log(x),x); \end{verbatim}} returns the result {\small\begin{verbatim} {X=ROOT_OF(COS(X_) + LOG(X_),X_,TAG_1)} . \end{verbatim}} An expression with a top-level {\tt ROOT\_OF} operator is implicitly a list with an unknown number of elements (since we don't always know how many solutions an equation has). If a substitution is made into such an expression, closed form solutions can emerge. If this occurs, the {\tt ROOT\_OF} construct is replaced by an operator {\tt ONE\_OF}.\ttindex{ONE\_OF} At this point it is of course possible to transform the result of the original {\tt SOLVE} operator expression into a standard {\tt SOLVE} solution. To effect this, the operator {\tt EXPAND\_CASES} \ttindex{EXPAND\_CASES} can be used. The following example shows the use of these facilities: \extendedmanual{\newpage} {\small\begin{verbatim} solve(-a*x^3+a*x^2+x^4-x^3-4*x^2+4,x); 2 3 {X=ROOT_OF(A*X_ - X_ + 4*X_ + 4,X_,TAG_2),X=1} sub(a=-1,ws); {X=ONE_OF({2,-1,-2},TAG_2),X=1} expand_cases ws; {X=2,X=-1,X=-2,X=1} \end{verbatim}} \subsection{Solutions of Equations Involving Cubics and Quartics} Since roots of cubics and quartics can often be very messy, a switch {\tt FULLROOTS}\ttindex{FULLROOTS} is available, that, when off (the default), will prevent the production of a result in closed form. The {\tt ROOT\_OF} construct will be used in this case instead. In constructing the solutions of cubics and quartics, trigonometrical forms are used where appropriate. This option is under the control of a switch {\tt TRIGFORM},\ttindex{TRIGFORM} which is normally on. The following example illustrates the use of these facilities: {\small\begin{verbatim} let xx = solve(x^3+x+1,x); xx; 3 {X=ROOT_OF(X_ + X_ + 1,X_)} on fullroots; xx; - SQRT(31)*I ATAN(---------------) 3*SQRT(3) {X=(I*(SQRT(3)*SIN(-----------------------) 3 \end{verbatim}} \newpage {\small\begin{verbatim} - SQRT(31)*I ATAN(---------------) 3*SQRT(3) - COS(-----------------------)))/SQRT(3), 3 - SQRT(31)*I ATAN(---------------) 3*SQRT(3) X=( - I*(SQRT(3)*SIN(-----------------------) 3 - SQRT(31)*I ATAN(---------------) 3*SQRT(3) + COS(-----------------------)))/SQRT( 3 3), - SQRT(31)*I ATAN(---------------) 3*SQRT(3) 2*COS(-----------------------)*I 3 X=----------------------------------} SQRT(3) off trigform; xx; 2/3 {X=( - (SQRT(31) - 3*SQRT(3)) *SQRT(3)*I 2/3 2/3 - (SQRT(31) - 3*SQRT(3)) - 2 *SQRT(3)*I 2/3 1/3 1/3 + 2 )/(2*(SQRT(31) - 3*SQRT(3)) *6 1/6 *3 ), 2/3 X=((SQRT(31) - 3*SQRT(3)) *SQRT(3)*I 2/3 2/3 - (SQRT(31) - 3*SQRT(3)) + 2 *SQRT(3)*I 2/3 1/3 1/3 + 2 )/(2*(SQRT(31) - 3*SQRT(3)) *6 1/6 *3 ), 2/3 2/3 (SQRT(31) - 3*SQRT(3)) - 2 X=-------------------------------------} 1/3 1/3 1/6 (SQRT(31) - 3*SQRT(3)) *6 *3 \end{verbatim}} \subsection{Other Options} If {\tt SOLVESINGULAR}\ttindex{SOLVESINGULAR} is on (the default setting), degenerate systems such as {\tt x+y=0}, {\tt 2x+2y=0} will be solved by introducing appropriate arbitrary constants. The consistent singular equation 0=0 or equations involving functions with multiple inverses may introduce unique new indeterminant kernels {\tt ARBCOMPLEX(j)}, or {\tt ARBINT(j)}, ($j$=1,2,...), % {\tt ARBREAL(j)}, representing arbitrary complex or integer numbers respectively. To automatically select the principal branches, do {\tt off allbranch;} . \ttindex{ALLBRANCH} To avoid the introduction of new indeterminant kernels do {\tt OFF ARBVARS}\ttindex{ARBVARS} -- then no equations are generated for the free variables and their original names are used to express the solution forms. To suppress solutions of consistent singular equations do {\tt OFF SOLVESINGULAR}. To incorporate additional inverse functions do, for example: {\small\begin{verbatim} put('sinh,'inverse,'asinh); put('asinh,'inverse,'sinh); \end{verbatim}} together with any desired simplification rules such as {\small\begin{verbatim} for all x let sinh(asinh(x))=x, asinh(sinh(x))=x; \end{verbatim}} For completeness, functions with non-unique inverses should be treated as {\tt \verb|^|}, {\tt SIN}, and {\tt COS} are in the {\tt SOLVE} \ttindex{SOLVE} module source. Arguments of {\tt ASIN} and {\tt ACOS} are not checked to ensure that the absolute value of the real part does not exceed 1; and arguments of {\tt LOG} are not checked to ensure that the absolute value of the imaginary part does not exceed $\pi$; but checks (perhaps involving user response for non-numerical arguments) could be introduced using {\tt LET}\ttindex{LET} statements for these operators. \subsection{Parameters and Variable Dependency} The proper design of a variable sequence supplied as a second argument to {\tt SOLVE} is important for the structure of the solution of an equation system. Any unknown in the system not in this list is considered totally free. E.g.\ the call {\small\begin{verbatim} solve({x=2*z,z=2*y},{z}); \end{verbatim}} produces an empty list as a result because there is no function $z=z(x,y)$ which fulfills both equations for arbitrary $x$ and $y$ values. In such a case the share variable {\tt requirements}\ttindex{requirements} displays a set of restrictions for the parameters of the system: {\small\begin{verbatim} requirements; {x - 4*y} \end{verbatim}} The non-existence of a formal solution is caused by a contradiction which disappears only if the parameters of the initial system are set such that all members of the requirements list take the value zero. For a linear system the set is complete: a solution of the requirements list makes the initial system solvable. E.g.\ in the above case a substitution $x=4y$ makes the equation set consistent. For a non-linear system only one inconsistency is detected. If such a system has more than one inconsistency, you must reduce them one after the other. \footnote{ The difference between linear and non--linear inconsistent systems is based on the algorithms which produce this information as a side effect when attempting to find a formal solution; example: $solve(\{x=a,x=b,y=c,y=d\},\{x,y\}$ gives a set $\{a-b,c-d\}$ while $solve(\{x^2=a,x^2=b,y^2=c,y^2=d\},\{x,y\}$ leads to $\{a-b\}$. } The set shows you also the dependency among the parameters: here one of $x$ and $y$ is free and a formal solution of the system can be computed by adding it to the variable list of {\tt solve}. The requirement set is not unique -- there may be other such sets. A system with parameters may have a formal solution, e.g.\ {\small\begin{verbatim} solve({x=a*z+1,0=b*z-y},{z,x}); y a*y + b {{z=---,x=---------}} b b \end{verbatim}} which is not valid for all possible values of the parameters. The variable {\tt assumptions}\ttindex{assumptions} contains then a list of restrictions: the solutions are valid only as long as none of these expressions vanishes. Any zero of one of them represents a special case that is not covered by the formal solution. In the above case the value is \extendedmanual{\newpage} {\small\begin{verbatim} assumptions; {b} \end{verbatim}} which excludes formally the case $b=0$; obviously this special parameter value makes the system singular. The set of assumptions is complete for both, linear and non--linear systems. {\tt SOLVE} rearranges the variable sequence to reduce the (expected) computing time. This behavior is controlled by the switch {\tt varopt}\ttindex{varopt}, which is on by default. If it is turned off, the supplied variable sequence is used or the system kernel ordering is taken if the variable list is omitted. The effect is demonstrated by an example: {\small\begin{verbatim} s:= {y^3+3x=0,x^2+y^2=1}; solve(s,{y,x}); 6 2 {{y=root_of(y_ + 9*y_ - 9,y_), 3 - y x=-------}} 3 off varopt; solve(s,{y,x}); 6 4 2 {{x=root_of(x_ - 3*x_ + 12*x_ - 1,x_), 4 2 x*( - x + 2*x - 10) y=-----------------------}} 3 \end{verbatim}} In the first case, {\tt solve} forms the solution as a set of pairs $(y_i,x(y_i))$ because the degree of $x$ is higher -- such a rearrangement makes the internal computation of the Gr\"obner basis generally faster. For the second case the explicitly given variable sequence is used such that the solution has now the form $(x_i,y(x_i))$. Controlling the variable sequence is especially important if the system has one or more free variables. As an alternative to turning off {\tt varopt}, a partial dependency among the variables can be declared using the {\tt depend}\index{depend} statement: {\tt solve} then rearranges the variable sequence but keeps any variable ahead of those on which it depends. \extendedmanual{\newpage} {\small\begin{verbatim} on varopt; s:={a^3+b,b^2+c}$ solve(s,{a,b,c}); 3 6 {{a=arbcomplex(1),b= - a ,c= - a }} depend a,c; depend b,c; solve(s,{a,b,c}); {{c=arbcomplex(2), 6 a=root_of(a_ + c,a_), 3 b= - a }} \end{verbatim}} Here {\tt solve} is forced to put $c$ after $a$ and after $b$, but there is no obstacle to interchanging $a$ and $b$. \section{Even and Odd Operators}\index{Even operator}\index{Odd operator} An operator can be declared to be {\em even\/} or {\em odd\/} in its first argument by the declarations {\tt EVEN}\ttindex{EVEN} and {\tt ODD}\ttindex{ODD} respectively. Expressions involving an operator declared in this manner are transformed if the first argument contains a minus sign. Any other arguments are not affected. In addition, if say {\tt F} is declared odd, then {\tt f(0)} is replaced by zero unless {\tt F} is also declared {\em non zero\/} by the declaration {\tt NONZERO}\ttindex{NONZERO}. For example, the declarations {\small\begin{verbatim} even f1; odd f2; \end{verbatim}} mean that {\small\begin{verbatim} f1(-a) -> F1(A) f2(-a) -> -F2(A) f1(-a,-b) -> F1(A,-B) f2(0) -> 0. \end{verbatim}} To inhibit the last transformation, say {\tt nonzero f2;}. \section{Linear Operators}\index{Linear operator} An operator can be declared to be linear in its first argument over powers of its second argument. If an operator {\tt F} is so declared, {\tt F} of any sum is broken up into sums of {\tt F}s, and any factors that are not powers of the variable are taken outside. This means that {\tt F} must have (at least) two arguments. In addition, the second argument must be an identifier (or more generally a kernel), not an expression. {\it Example:} If {\tt F} were declared linear, then {\small\begin{verbatim} 5 f(a*x^5+b*x+c,x) -> F(X ,X)*A + F(X,X)*B + F(1,X)*C \end{verbatim}} More precisely, not only will the variable and its powers remain within the scope of the {\tt F} operator, but so will any variable and its powers that had been declared to {\tt DEPEND} on the prescribed variable; and so would any expression that contains that variable or a dependent variable on any level, e.g. {\tt cos(sin(x))}. To declare operators {\tt F} and {\tt G} to be linear operators, use:\ttindex{LINEAR} {\small\begin{verbatim} linear f,g; \end{verbatim}} The analysis is done of the first argument with respect to the second; any other arguments are ignored. It uses the following rules of evaluation: \begin{quote} \begin{tabbing} {\tt f(0) -> 0} \\ {\tt f(-y,x) -> -F(Y,X)} \\ {\tt f(y+z,x) -> F(Y,X)+F(Z,X)} \\ {\tt f(y*z,x) -> Z*F(Y,X)} \hspace{0.5in}\= if Z does not depend on X \\ {\tt f(y/z,x) -> F(Y,X)/Z} \> if Z does not depend on X \end{tabbing} \end{quote} To summarize, {\tt Y} ``depends'' on the indeterminate {\tt X} in the above if either of the following hold: \begin{enumerate} \item {\tt Y} is an expression that contains {\tt X} at any level as a variable, e.g.: {\tt cos(sin(x))} \item Any variable in the expression {\tt Y} has been declared dependent on {\tt X} by use of the declaration {\tt DEPEND}. \end{enumerate} The use of such linear operators\index{Linear operator} can be seen in the paper Fox, J.A. and A. C. Hearn, ``Analytic Computation of Some Integrals in Fourth Order Quantum Electrodynamics'' Journ. Comp. Phys. 14 (1974) 301-317, which contains a complete listing of a program for definite integration\index{Integration} of some expressions that arise in fourth order quantum electrodynamics. \section{Non-Commuting Operators}\index{Non-commuting operator} An operator can be declared to be non-commutative under multiplication by the declaration {\tt NONCOM}.\ttindex{NONCOM} {\it Example:} After the declaration \\ {\tt noncom u,v;}\\ the expressions {\tt u(x)*u(y)-u(y)*u(x)} and {\tt u(x)*v(y)-v(y)*u(x)} will remain unchanged on simplification, and in particular will not simplify to zero. Note that it is the operator ({\tt U} and {\tt V} in the above example) and not the variable that has the non-commutative property. The {\tt LET}\ttindex{LET} statement may be used to introduce rules of evaluation for such operators. In particular, the boolean operator {\tt ORDP}\ttindex{ORDP} is useful for introducing an ordering on such expressions. {\it Example:} The rule {\small\begin{verbatim} for all x,y such that x neq y and ordp(x,y) let u(x)*u(y)= u(y)*u(x)+comm(x,y); \end{verbatim}} would introduce the commutator of {\tt u(x)} and {\tt u(y)} for all {\tt X} and {\tt Y}. Note that since {\tt ordp(x,x)} is {\em true}, the equality check is necessary in the degenerate case to avoid a circular loop in the rule. \section{Symmetric and Antisymmetric Operators} An operator can be declared to be symmetric with respect to its arguments by the declaration {\tt SYMMETRIC}.\ttindex{SYMMETRIC} For example {\small\begin{verbatim} symmetric u,v; \end{verbatim}} means that any expression involving the top level operators {\tt U} or {\tt V} will have its arguments reordered to conform to the internal order used by {\REDUCE}. The user can change this order for kernels by the command {\tt KORDER}. For example, {\tt u(x,v(1,2))} would become {\tt u(v(2,1),x)}, since numbers are ordered in decreasing order, and expressions are ordered in decreasing order of complexity. Similarly the declaration {\tt ANTISYMMETRIC}\ttindex{ANTISYMMETRIC} declares an operator antisymmetric. For example, {\small\begin{verbatim} antisymmetric l,m; \end{verbatim}} means that any expression involving the top level operators {\tt L} or {\tt M} will have its arguments reordered to conform to the internal order of the system, and the sign of the expression changed if there are an odd number of argument interchanges necessary to bring about the new order. For example, {\tt l(x,m(1,2))} would become {\tt -l(-m(2,1),x)} since one interchange occurs with each operator. An expression like {\tt l(x,x)} would also be replaced by 0. \section{Declaring New Prefix Operators} The user may add new prefix\index{Prefix} operators to the system by using the declaration {\tt OPERATOR}. For example: {\small\begin{verbatim} operator h,g1,arctan; \end{verbatim}} adds the prefix operators {\tt H}, {\tt G1} and {\tt ARCTAN} to the system. This allows symbols like {\tt h(w), h(x,y,z), g1(p+q), arctan(u/v)} to be used in expressions, but no meaning or properties of the operator are implied. The same operator symbol can be used equally well as a 0-, 1-, 2-, 3-, etc.-place operator. To give a meaning to an operator symbol, or express some of its properties, {\tt LET}\ttindex{LET} statements can be used, or the operator can be given a definition as a procedure. If the user forgets to declare an identifier as an operator, the system will prompt the user to do so in interactive mode, or do it automatically in non-interactive mode. A diagnostic message will also be printed if an identifier is declared {\tt OPERATOR} more than once. Operators once declared are global in scope, and so can then be referenced anywhere in the program. In other words, a declaration within a block (or a procedure) does not limit the scope of the operator to that block, nor does the operator go away on exiting the block (use {\tt CLEAR} instead for this purpose). \section{Declaring New Infix Operators} Users can add new infix operators by using the declarations {\tt INFIX}\ttindex{INFIX} and {\tt PRECEDENCE}.\ttindex{PRECEDENCE} For example, {\small\begin{verbatim} infix mm; precedence mm,-; \end{verbatim}} The declaration {\tt infix mm;} would allow one to use the symbol {\tt MM} as an infix operator: \begin{quote} \hspace{0.2in} {\tt a mm b} \hspace{0.3in} instead of \hspace{0.3in} {\tt mm(a,b)}. \end{quote} The declaration {\tt precedence mm,-;} says that {\tt MM} should be inserted into the infix operator precedence list just {\em after\/} the $-$ operator. This gives it higher precedence than $-$ and lower precedence than * . Thus \begin{quote} \hspace{0.2in}{\tt a - b mm c - d}\hspace{.3in} means \hspace{.3in} {\tt a - (b mm c) - d}, \end{quote} while \begin{quote} \hspace{0.2in}{\tt a * b mm c * d}\hspace{.3in} means \hspace{.3in} {\tt (a * b) mm (c * d)}. \end{quote} Both infix and prefix\index{Prefix} operators have no transformation properties unless {\tt LET}\ttindex{LET} statements or procedure declarations are used to assign a meaning. We should note here that infix operators so defined are always binary: \begin{quote} \hspace{0.2in}{\tt a mm b mm c}\hspace{.3in} means \hspace{.3in} {\tt (a mm b) mm c}. \end{quote} \section{Creating/Removing Variable Dependency} There are several facilities in {\REDUCE}, such as the differentiation \index{Differentiation} operator and the linear operator\index{Linear operator} facility, that can utilize knowledge of the dependency between various variables, or kernels. Such dependency may be expressed by the command {\tt DEPEND}.\ttindex{DEPEND} This takes an arbitrary number of arguments and sets up a dependency of the first argument on the remaining arguments. For example, {\small\begin{verbatim} depend x,y,z; \end{verbatim}} says that {\tt X} is dependent on both {\tt Y} and {\tt Z}. {\small\begin{verbatim} depend z,cos(x),y; \end{verbatim}} says that {\tt Z} is dependent on {\tt COS(X)} and {\tt Y}. Dependencies introduced by {\tt DEPEND} can be removed by {\tt NODEPEND}. \ttindex{NODEPEND} The arguments of this are the same as for {\tt DEPEND}. For example, given the above dependencies, {\small\begin{verbatim} nodepend z,cos(x); \end{verbatim}} says that {\tt Z} is no longer dependent on {\tt COS(X)}, although it remains dependent on {\tt Y}. \chapter{Display and Structuring of Expressions}\index{Display} \index{Structuring} In this section, we consider a variety of commands and operators that permit the user to obtain various parts of algebraic expressions and also display their structure in a variety of forms. Also presented are some additional concepts in the {\REDUCE} design that help the user gain a better understanding of the structure of the system. \section{Kernels}\index{Kernel} {\REDUCE} is designed so that each operator in the system has an evaluation (or simplification)\index{Simplification} function associated with it that transforms the expression into an internal canonical form. \index{Canonical form} This form, which bears little resemblance to the original expression, is described in detail in Hearn, A. C., ``{\REDUCE} 2: A System and Language for Algebraic Manipulation,'' Proc. of the Second Symposium on Symbolic and Algebraic Manipulation, ACM, New York (1971) 128-133. The evaluation function may transform its arguments in one of two alternative ways. First, it may convert the expression into other operators in the system, leaving no functions of the original operator for further manipulation. This is in a sense true of the evaluation functions associated with the operators {\tt +}, {\tt *} and {\tt /} , for example, because the canonical form\index{Canonical form} does not include these operators explicitly. It is also true of an operator such as the determinant operator {\tt DET}\ttindex{DET} because the relevant evaluation function calculates the appropriate determinant, and the operator {\tt DET} no longer appears. On the other hand, the evaluation process may leave some residual functions of the relevant operator. For example, with the operator {\tt COS}, a residual expression like {\tt COS(X)} may remain after evaluation unless a rule for the reduction of cosines into exponentials, for example, were introduced. These residual functions of an operator are termed {\em kernels\/}\index{Kernel} and are stored uniquely like variables. Subsequently, the kernel is carried through the calculation as a variable unless transformations are introduced for the operator at a later stage. In those cases where the evaluation process leaves an operator expression with non-trivial arguments, the form of the argument can vary depending on the state of the system at the point of evaluation. Such arguments are normally produced in expanded form with no terms factored or grouped in any way. For example, the expression {\tt cos(2*x+2*y)} will normally be returned in the same form. If the argument {\tt 2*x+2*y} were evaluated at the top level, however, it would be printed as {\tt 2*(X+Y)}. If it is desirable to have the arguments themselves in a similar form, the switch {\tt INTSTR}\ttindex{INTSTR} (for ``internal structure''), if on, will cause this to happen. In cases where the arguments of the kernel operators may be reordered, the system puts them in a canonical order, based on an internal intrinsic ordering of the variables. However, some commands allow arguments in the form of kernels, and the user has no way of telling what internal order the system will assign to these arguments. To resolve this difficulty, we introduce the notion of a {\em kernel form\/}\index{kernel form} as an expression that transforms to a kernel on evaluation. Examples of kernel forms are: {\small\begin{verbatim} a cos(x*y) log(sin(x)) \end{verbatim}} whereas {\small\begin{verbatim} a*b (a+b)^4 \end{verbatim}} are not. We see that kernel forms can usually be used as generalized variables, and most algebraic properties associated with variables may also be associated with kernels. \section{The Expression Workspace}\index{Workspace} Several mechanisms are available for saving and retrieving previously evaluated expressions. The simplest of these refers to the last algebraic expression simplified. When an assignment of an algebraic expression is made, or an expression is evaluated at the top level, (i.e., not inside a compound statement or procedure) the results of the evaluation are automatically saved in a variable {\tt WS} that we shall refer to as the workspace. (More precisely, the expression is assigned to the variable {\tt WS} that is then available for further manipulation.) {\it Example:} If we evaluate the expression {\tt (x+y)\verb|^|2} at the top level and next wish to differentiate it with respect to {\tt Y}, we can simply say {\small\begin{verbatim} df(ws,y); \end{verbatim}} to get the desired answer. If the user wishes to assign the workspace to a variable or expression for later use, the {\tt SAVEAS}\ttindex{SAVEAS} statement can be used. It has the syntax {\small\begin{verbatim} SAVEAS <expression> \end{verbatim}} For example, after the differentiation in the last example, the workspace holds the expression {\tt 2*x+2*y}. If we wish to assign this to the variable {\tt Z} we can now say {\small\begin{verbatim} saveas z; \end{verbatim}} If the user wishes to save the expression in a form that allows him to use some of its variables as arbitrary parameters, the {\tt FOR ALL} command can be used. {\it Example:} {\small\begin{verbatim} for all x saveas h(x); \end{verbatim}} with the above expression would mean that {\tt h(z)} evaluates to {\tt 2*Y+2*Z}. A further method for referencing more than the last expression is described in the section on interactive use of {\REDUCE}. \section{Output of Expressions} A considerable degree of flexibility is available in {\REDUCE} in the printing of expressions generated during calculations. No explicit format statements are supplied, as these are in most cases of little use in algebraic calculations, where the size of output or its composition is not generally known in advance. Instead, {\REDUCE} provides a series of mode options to the user that should enable him to produce his output in a comprehensible and possibly pleasing form. The most extreme option offered is to suppress the output entirely from any top level evaluation. This is accomplished by turning off the switch {\tt OUTPUT}\ttindex{OUTPUT} which is normally on. It is useful for limiting output when loading large files or producing ``clean'' output from the prettyprint programs. In most circumstances, however, we wish to view the output, so we need to know how to format it appropriately. As we mentioned earlier, an algebraic expression is normally printed in an expanded form, filling the whole output line with terms. Certain output declarations,\index{Output declaration} however, can be used to affect this format. To begin with, we look at an operator for changing the length of the output line. \subsection{LINELENGTH Operator}\ttindex{LINELENGTH} This operator is used with the syntax {\small\begin{verbatim} LINELENGTH(NUM:integer):integer \end{verbatim}} and sets the output line length to the integer {\tt NUM}. It returns the previous output line length (so that it can be stored for later resetting of the output line if needed). \subsection{Output Declarations} We now describe a number of switches and declarations that are available for controlling output formats. It should be noted, however, that the transformation of large expressions to produce these varied output formats can take a lot of computing time and space. If a user wishes to speed up the printing of the output in such cases, he can turn off the switch {\tt PRI}.\ttindex{PRI} If this is done, then output is produced in one fixed format, which basically reflects the internal form of the expression, and none of the options below apply. {\tt PRI} is normally on. With {\tt PRI} on, the output declarations\index{Output declaration} and switches available are as follows: \subsubsection{ORDER Declaration} The declaration {\tt ORDER}\ttindex{ORDER} may be used to order variables on output. The syntax is: {\small\begin{verbatim} order v1,...vn; \end{verbatim}} where the {\tt vi} are kernels. Thus, {\small\begin{verbatim} order x,y,z; \end{verbatim}} orders {\tt X} ahead of {\tt Y}, {\tt Y} ahead of {\tt Z} and all three ahead of other variables not given an order. {\tt order nil;} resets the output order to the system default. The order of variables may be changed by further calls of {\tt ORDER}, but then the reordered variables would have an order lower than those in earlier {\tt ORDER}\ttindex{ORDER} calls. Thus, {\small\begin{verbatim} order x,y,z; order y,x; \end{verbatim}} would order {\tt Z} ahead of {\tt Y} and {\tt X}. The default ordering is usually alphabetic. \subsubsection{FACTOR Declaration} This declaration takes a list of identifiers or kernels\index{Kernel} as argument. {\tt FACTOR}\ttindex{FACTOR} is not a factoring command (use {\tt FACTORIZE} or the {\tt FACTOR} switch for this purpose); rather it is a separation command. All terms involving fixed powers of the declared expressions are printed as a product of the fixed powers and a sum of the rest of the terms. All expressions involving a given prefix operator may also be factored by putting the operator name in the list of factored identifiers. For example: {\small\begin{verbatim} factor x,cos,sin(x); \end{verbatim}} causes all powers of {\tt X} and {\tt SIN(X)} and all functions of {\tt COS} to be factored. Note that {\tt FACTOR} does not affect the order of its arguments. You should also use {\tt ORDER} if this is important. The declaration {\tt remfac v1,...,vn;}\ttindex{REMFAC} removes the factoring flag from the expressions {\tt v1} through {\tt vn}. \subsection{Output Control Switches} \label{sec-output} In addition to these declarations, the form of the output can be modified by switching various output control switches using the declarations {\tt ON} and {\tt OFF}. We shall illustrate the use of these switches by an example, namely the printing of the expression {\small\begin{verbatim} x^2*(y^2+2*y)+x*(y^2+z)/(2*a) . \end{verbatim}} The relevant switches are as follows: \subsubsection{ALLFAC Switch} This switch will cause the system to search the whole expression, or any sub-expression enclosed in parentheses, for simple multiplicative factors and print them outside the parentheses. Thus our expression with {\tt ALLFAC} \ttindex{ALLFAC} off will print as {\small\begin{verbatim} 2 2 2 2 (2*X *Y *A + 4*X *Y*A + X*Y + X*Z)/(2*A) \end{verbatim}} and with {\tt ALLFAC} on as {\small\begin{verbatim} 2 2 X*(2*X*Y *A + 4*X*Y*A + Y + Z)/(2*A) . \end{verbatim}} {\tt ALLFAC} is normally on, and is on in the following examples, except where otherwise stated. \subsubsection{DIV Switch}\ttindex{DIV} This switch makes the system search the denominator of an expression for simple factors that it divides into the numerator, so that rational fractions and negative powers appear in the output. With {\tt DIV} on, our expression would print as {\small\begin{verbatim} 2 2 (-1) (-1) X*(X*Y + 2*X*Y + 1/2*Y *A + 1/2*A *Z) . \end{verbatim}} {\tt DIV} is normally off. \subsubsection{LIST Switch}\ttindex{LIST} This switch causes the system to print each term in any sum on a separate line. With {\tt LIST} on, our expression prints as {\small\begin{verbatim} 2 X*(2*X*Y *A + 4*X*Y*A 2 + Y + Z)/(2*A) . \end{verbatim}} {\tt LIST} is normally off. \subsubsection{NOSPLIT Switch}\ttindex{NOSPLIT} Under normal circumstances, the printing routines try to break an expression across lines at a natural point. This is a fairly expensive process. If you are not overly concerned about where the end-of-line breaks come, you can speed up the printing of expressions by turning off the switch {\tt NOSPLIT}. This switch is normally on. \subsubsection{RAT Switch}\ttindex{RAT} This switch is only useful with expressions in which variables are factored with {\tt FACTOR}. With this mode, the overall denominator of the expression is printed with each factored sub-expression. We assume a prior declaration {\tt factor x;} in the following output. We first print the expression with {\tt RAT off}: {\small\begin{verbatim} 2 2 (2*X *Y*A*(Y + 2) + X*(Y + Z))/(2*A) . \end{verbatim}} With {\tt RAT} on the output becomes: \extendedmanual{\newpage} {\small\begin{verbatim} 2 2 X *Y*(Y + 2) + X*(Y + Z)/(2*A) . \end{verbatim}} {\tt RAT} is normally off. Next, if we leave {\tt X} factored, and turn on both {\tt DIV} and {\tt RAT}, the result becomes {\small\begin{verbatim} 2 (-1) 2 X *Y*(Y + 2) + 1/2*X*A *(Y + Z) . \end{verbatim}} Finally, with {\tt X} factored, {\tt RAT} on and {\tt ALLFAC}\ttindex{ALLFAC} off we retrieve the original structure {\small\begin{verbatim} 2 2 2 X *(Y + 2*Y) + X*(Y + Z)/(2*A) . \end{verbatim}} \subsubsection{RATPRI Switch}\ttindex{RATPRI} If the numerator and denominator of an expression can each be printed in one line, the output routines will print them in a two dimensional notation, with numerator and denominator on separate lines and a line of dashes in between. For example, {\tt (a+b)/2} will print as {\small\begin{verbatim} A + B ----- 2 \end{verbatim}} Turning this switch off causes such expressions to be output in a linear form. \subsubsection{REVPRI Switch}\ttindex{REVPRI} The normal ordering of terms in output is from highest to lowest power. In some situations (e.g., when a power series is output), the opposite ordering is more convenient. The switch {\tt REVPRI} if on causes such a reverse ordering of terms. For example, the expression {\tt y*(x+1)\verb|^|2+(y+3)\verb|^|2} will normally print as {\small\begin{verbatim} 2 2 X *Y + 2*X*Y + Y + 7*Y + 9 \end{verbatim}} whereas with {\tt REVPRI} on, it will print as {\small\begin{verbatim} 2 2 9 + 7*Y + Y + 2*X*Y + X *Y. \end{verbatim}} \subsection{WRITE Command}\ttindex{WRITE} In simple cases no explicit output\index{Output} command is necessary in {\REDUCE}, since the value of any expression is automatically printed if a semicolon is used as a delimiter. There are, however, several situations in which such a command is useful. In a {\tt FOR}, {\tt WHILE}, or {\tt REPEAT} statement it may be desired to output something each time the statement within the loop construct is repeated. It may be desired for a procedure to output intermediate results or other information while it is running. It may be desired to have results labeled in special ways, especially if the output is directed to a file or device other than the terminal. The {\tt WRITE} command consists of the word {\tt WRITE} followed by one or more items separated by commas, and followed by a terminator. There are three kinds of items that can be used: \begin{enumerate} \item Expressions (including variables and constants). The expression is evaluated, and the result is printed out. \item Assignments. The expression on the right side of the {\tt :=} operator is evaluated, and is assigned to the variable on the left; then the symbol on the left is printed, followed by a ``{\tt :=}'', followed by the value of the expression on the right -- almost exactly the way an assignment followed by a semicolon prints out normally. (The difference is that if the {\tt WRITE} is in a {\tt FOR} statement and the left-hand side of the assignment is an array position or something similar containing the variable of the {\tt FOR} iteration, then the value of that variable is inserted in the printout.) \item Arbitrary strings of characters, preceded and followed by double-quote marks (e.g., {\tt "string"}). \end{enumerate} The items specified by a single {\tt WRITE} statement print side by side on one line. (The line is broken automatically if it is too long.) Strings print exactly as quoted. The {\tt WRITE} command itself however does not return a value. The print line is closed at the end of a {\tt WRITE} command evaluation. Therefore the command {\tt WRITE "";} (specifying nothing to be printed except the empty string) causes a line to be skipped. {\it Examples:} \begin{enumerate} \item If {\tt A} is {\tt X+5}, {\tt B} is itself, {\tt C} is 123, {\tt M} is an array, and {\tt Q}=3, then {\small\begin{verbatim} write m(q):=a," ",b/c," THANK YOU"; \end{verbatim}} will set {\tt M(3)} to {\tt x+5} and print {\small\begin{verbatim} M(Q) := X + 5 B/123 THANK YOU \end{verbatim}} The blanks between the {\tt 5} and {\tt B}, and the {\tt 3} and {\tt T}, come from the blanks in the quoted strings. \item To print a table of the squares of the integers from 1 to 20: {\small\begin{verbatim} for i:=1:20 do write i," ",i^2; \end{verbatim}} \item To print a table of the squares of the integers from 1 to 20, and at the same time store them in positions 1 to 20 of an array {\tt A:} {\small\begin{verbatim} for i:=1:20 do <<a(i):=i^2; write i," ",a(i)>>; \end{verbatim}} This will give us two columns of numbers. If we had used {\small\begin{verbatim} for i:=1:20 do write i," ",a(i):=i^2; \end{verbatim}} we would also get {\tt A(}i{\tt ) := } repeated on each line. \item The following more complete example calculates the famous f and g series, first reported in Sconzo, P., LeSchack, A. R., and Tobey, R., ``Symbolic Computation of f and g Series by Computer'', Astronomical Journal 70 (May 1965). {\small\begin{verbatim} x1:= -sig*(mu+2*eps)$ x2:= eps - 2*sig^2$ x3:= -3*mu*sig$ f:= 1$ g:= 0$ for i:= 1 step 1 until 10 do begin f1:= -mu*g+x1*df(f,eps)+x2*df(f,sig)+x3*df(f,mu); write "f(",i,") := ",f1; g1:= f+x1*df(g,eps)+x2*df(g,sig)+x3*df(g,mu); write "g(",i,") := ",g1; f:=f1$ g:=g1$ end; \end{verbatim}} A portion of the output, to illustrate the printout from the {\tt WRITE} command, is as follows: {\small\begin{verbatim} ... <prior output> ... 2 F(4) := MU*(3*EPS - 15*SIG + MU) G(4) := 6*SIG*MU 2 F(5) := 15*SIG*MU*( - 3*EPS + 7*SIG - MU) 2 G(5) := MU*(9*EPS - 45*SIG + MU) ... <more output> ... \end{verbatim}} \end{enumerate} \subsection{Suppression of Zeros} It is sometimes annoying to have zero assignments (i.e. assignments of the form {\tt <expression> := 0}) printed, especially in printing large arrays with many zero elements. The output from such assignments can be suppressed by turning on the switch {\tt NERO}.\ttindex{NERO} \subsection{{FORTRAN} Style Output Of Expressions} It is naturally possible to evaluate expressions numerically in {\REDUCE} by giving all variables and sub-expressions numerical values. However, as we pointed out elsewhere the user must declare real arithmetical operation by turning on the switch {\tt ROUNDED}\ttindex{ROUNDED}. However, it should be remembered that arithmetic in {\REDUCE} is not particularly fast, since results are interpreted rather than evaluated in a compiled form. The user with a large amount of numerical computation after all necessary algebraic manipulations have been performed is therefore well advised to perform these calculations in a FORTRAN\index{FORTRAN} or similar system. For this purpose, {\REDUCE} offers facilities for users to produce FORTRAN compatible files for numerical processing. First, when the switch {\tt FORT}\ttindex{FORT} is on, the system will print expressions in a FORTRAN notation. Expressions begin in column seven. If an expression extends over one line, a continuation mark (.) followed by a blank appears on subsequent cards. After a certain number of lines have been produced (according to the value of the variable {\tt CARD\_NO}),\ttindex{CARD\_NO} a new expression is started. If the expression printed arises from an assignment to a variable, the variable is printed as the name of the expression. Otherwise the expression is given the default name {\tt ANS}. An error occurs if identifiers or numbers are outside the bounds permitted by FORTRAN. A second option is to use the {\tt WRITE} command to produce other programs. {\it Example:} The following {\REDUCE} statements {\small\begin{verbatim} on fort; out "forfil"; write "C this is a fortran program"; write " 1 format(e13.5)"; write " u=1.23"; write " v=2.17"; write " w=5.2"; x:=(u+v+w)^11; write "C it was foolish to expand this expression"; write " print 1,x"; write " end"; shut "forfil"; off fort; \end{verbatim}} will generate a file {\tt forfil} that contains: {\small {\small\begin{verbatim} c this is a fortran program 1 format(e13.5) u=1.23 v=2.17 w=5.2 ans1=1320.*u**3*v*w**7+165.*u**3*w**8+55.*u**2*v**9+495.*u . **2*v**8*w+1980.*u**2*v**7*w**2+4620.*u**2*v**6*w**3+ . 6930.*u**2*v**5*w**4+6930.*u**2*v**4*w**5+4620.*u**2*v**3* . w**6+1980.*u**2*v**2*w**7+495.*u**2*v*w**8+55.*u**2*w**9+ . 11.*u*v**10+110.*u*v**9*w+495.*u*v**8*w**2+1320.*u*v**7*w . **3+2310.*u*v**6*w**4+2772.*u*v**5*w**5+2310.*u*v**4*w**6 . +1320.*u*v**3*w**7+495.*u*v**2*w**8+110.*u*v*w**9+11.*u*w . **10+v**11+11.*v**10*w+55.*v**9*w**2+165.*v**8*w**3+330.* . v**7*w**4+462.*v**6*w**5+462.*v**5*w**6+330.*v**4*w**7+ . 165.*v**3*w**8+55.*v**2*w**9+11.*v*w**10+w**11 x=u**11+11.*u**10*v+11.*u**10*w+55.*u**9*v**2+110.*u**9*v* . w+55.*u**9*w**2+165.*u**8*v**3+495.*u**8*v**2*w+495.*u**8 . *v*w**2+165.*u**8*w**3+330.*u**7*v**4+1320.*u**7*v**3*w+ . 1980.*u**7*v**2*w**2+1320.*u**7*v*w**3+330.*u**7*w**4+462. . *u**6*v**5+2310.*u**6*v**4*w+4620.*u**6*v**3*w**2+4620.*u . **6*v**2*w**3+2310.*u**6*v*w**4+462.*u**6*w**5+462.*u**5* . v**6+2772.*u**5*v**5*w+6930.*u**5*v**4*w**2+9240.*u**5*v . **3*w**3+6930.*u**5*v**2*w**4+2772.*u**5*v*w**5+462.*u**5 . *w**6+330.*u**4*v**7+2310.*u**4*v**6*w+6930.*u**4*v**5*w . **2+11550.*u**4*v**4*w**3+11550.*u**4*v**3*w**4+6930.*u** . 4*v**2*w**5+2310.*u**4*v*w**6+330.*u**4*w**7+165.*u**3*v . **8+1320.*u**3*v**7*w+4620.*u**3*v**6*w**2+9240.*u**3*v** . 5*w**3+11550.*u**3*v**4*w**4+9240.*u**3*v**3*w**5+4620.*u . **3*v**2*w**6+ans1 c it was foolish to expand this expression print 1,x end \end{verbatim}} } If the arguments of a {\tt WRITE} statement include an expression that requires continuation records, the output will need editing, since the output routine prints the arguments of {\tt WRITE} sequentially, and the continuation mechanism therefore generates its auxiliary variables after the preceding expression has been printed. Finally, since there is no direct analog of {\em list\/} in FORTRAN, a comment line of the form {\small\begin{verbatim} c ***** invalid fortran construct (list) not printed \end{verbatim}} will be printed if you try to print a list with {\tt FORT} on. \subsubsection{{FORTRAN} Output Options}\index{Output}\index{FORTRAN} There are a number of methods available to change the default format of the FORTRAN output. The breakup of the expression into subparts is such that the number of continuation lines produced is less than a given number. This number can be modified by the assignment {\small\begin{verbatim} card_no := <number>; \end{verbatim}} where {\tt <number>} is the {\em total\/} number of cards allowed in a statement. The default value of {\tt CARD\_NO} is 20. The width of the output expression is also adjustable by the assignment {\small\begin{verbatim} fort_width := <integer>; \end{verbatim}} \ttindex{FORT\_WIDTH} which sets the total width of a given line to {\tt <integer>}. The initial FORTRAN output width is 70. {\REDUCE} automatically inserts a decimal point after each isolated integer coefficient in a FORTRAN expression (so that, for example, 4 becomes {\tt 4.} ). To prevent this, set the {\tt PERIOD}\ttindex{PERIOD} mode switch to {\tt OFF}. FORTRAN output is normally produced in lower case. If upper case is desired, the switch {\tt FORTUPPER}\ttindex{FORTUPPER} should be turned on. Finally, the default name {\tt ANS} assigned to an unnamed expression and its subparts can be changed by the operator {\tt VARNAME}. \ttindex{VARNAME} This takes a single identifier as argument, which then replaces {\tt ANS} as the expression name. The value of {\tt VARNAME} is its argument. Further facilities for the production of FORTRAN and other language output are provided by the SCOPE and GENTRAN packages\extendedmanual{described in chapters~\ref{GENTRAN} and \ref{SCOPE}}. \subsection{Saving Expressions for Later Use as Input} \index{Saving an expression} It is often useful to save an expression on an external file for use later as input in further calculations. The commands for opening and closing output files are explained elsewhere. However, we see in the examples on output of expressions that the standard ``natural'' method of printing expressions is not compatible with the input syntax. So to print the expression in an input compatible form we must inhibit this natural style by turning off the switch {\tt NAT}.\ttindex{NAT} If this is done, a dollar sign will also be printed at the end of the expression. {\it Example:} The following sequence of commands {\small\begin{verbatim} off nat; out "out"; x := (y+z)^2; write "end"; shut "out"; on nat; \end{verbatim}} will generate a file {\tt out} that contains {\small\begin{verbatim} X := Y**2 + 2*Y*Z + Z**2$ END$ \end{verbatim}} \subsection{Displaying Expression Structure}\index{Displaying structure} In those cases where the final result has a complicated form, it is often convenient to display the skeletal structure of the answer. The operator {\tt STRUCTR},\ttindex{STRUCTR} that takes a single expression as argument, will do this for you. Its syntax is: {\small\begin{verbatim} STRUCTR(EXPRN:algebraic[,ID1:identifier[,ID2:identifier]]); \end{verbatim}} The structure is printed effectively as a tree, in which the subparts are laid out with auxiliary names. If the optional {\tt ID1} is absent, the auxiliary names are prefixed by the root {\tt ANS}. This root may be changed by the operator {\tt VARNAME}\ttindex{VARNAME}. If the optional {\tt ID1} is present, and is an array name, the subparts are named as elements of that array, otherwise {\tt ID1} is used as the root prefix. (The second optional argument {\tt ID2} is explained later.) The {\tt EXPRN} can be either a scalar or a matrix expression. Use of any other will result in an error. {\it Example:} Let us suppose that the workspace contains {\tt ((A+B)\verb|^|2+C)\verb|^|3+D}. Then the input {\tt STRUCTR WS;} will (with {\tt EXP} off) result in the output:\newpage {\small\begin{verbatim} ANS3 where 3 ANS3 := ANS2 + D 2 ANS2 := ANS1 + C ANS1 := A + B \end{verbatim}} The workspace remains unchanged after this operation, since {\tt STRUCTR} \ttindex{STRUCTR} in the default situation returns no value (if {\tt STRUCTR} is used as a sub-expression, its value is taken to be 0). In addition, the sub-expressions are normally only displayed and not retained. If you wish to access the sub-expressions with their displayed names, the switch {\tt SAVESTRUCTR}\ttindex{SAVESTRUCTR} should be turned on. In this case, {\tt STRUCTR} returns a list whose first element is a representation for the expression, and subsequent elements are the sub-expression relations. Thus, with {\tt SAVESTRUCTR} on, {\tt STRUCTR WS} in the above example would return \vspace{-11pt} {\small\begin{verbatim} 3 2 {ANS3,ANS3=ANS2 + D,ANS2=ANS1 + C,ANS1=A + B} \end{verbatim}} The {\tt PART}\ttindex{PART} operator can be used to retrieve the required parts of the expression. For example, to get the value of {\tt ANS2} in the above, one could say: {\small\begin{verbatim} part(ws,3,2); \end{verbatim}} If {\tt FORT} is on, then the results are printed in the reverse order; the algorithm in fact guaranteeing that no sub-expression will be referenced before it is defined. The second optional argument {\tt ID2} may also be used in this case to name the actual expression (or expressions in the case of a matrix argument). {\it Example:} Let us suppose that {\tt M}, a 2 by 1 matrix, contains the elements {\tt ((a+b)\verb|^|2 + c)\verb|^|3 + d} and {\tt (a + b)*(c + d)} respectively, and that {\tt V} has been declared to be an array. With {\tt EXP} off and {\tt FORT} on, the statement {\tt structr(2*m,v,k);} will result in the output {\small\begin{verbatim} V(1)=A+B V(2)=V(1)**2+C V(3)=V(2)**3+D V(4)=C+D K(1,1)=2.*V(3) K(2,1)=2.*V(1)*V(4) \end{verbatim}} \section{Changing the Internal Order of Variables} The internal ordering of variables (more specifically kernels) can have a significant effect on the space and time associated with a calculation. In its default state, {\REDUCE} uses a specific order for this which may vary between sessions. However, it is possible for the user to change this internal order by means of the declaration {\tt KORDER}\ttindex{KORDER}. The syntax for this is: {\small\begin{verbatim} korder v1,...,vn; \end{verbatim}} where the {\tt Vi} are kernels\index{Kernel}. With this declaration, the {\tt Vi} are ordered internally ahead of any other kernels in the system. {\tt V1} has the highest order, {\tt V2} the next highest, and so on. A further call of {\tt KORDER} replaces a previous one. {\tt KORDER NIL;} resets the internal order to the system default. Unlike the {\tt ORDER}\ttindex{ORDER} declaration, that has a purely cosmetic effect on the way results are printed, the use of {\tt KORDER} can have a significant effect on computation time. In critical cases then, the user can experiment with the ordering of the variables used to determine the optimum set for a given problem. \section{Obtaining Parts of Algebraic Expressions} There are many occasions where it is desirable to obtain a specific part of an expression, or even change such a part to another expression. A number of operators are available in {\REDUCE} for this purpose, and will be described in this section. In addition, operators for obtaining specific parts of polynomials and rational functions (such as a denominator) are described in another section. \subsection{COEFF Operator}\ttindex{COEFF} Syntax: {\small\begin{verbatim} COEFF(EXPRN:polynomial,VAR:kernel) \end{verbatim}} {\tt COEFF} is an operator that partitions {\tt EXPRN} into its various coefficients with respect to {\tt VAR} and returns them as a list, with the coefficient independent of {\tt VAR} first. Under normal circumstances, an error results if {\tt EXPRN} is not a polynomial in {\tt VAR}, although the coefficients themselves can be rational as long as they do not depend on {\tt VAR}. However, if the switch {\tt RATARG}\ttindex{RATARG} is on, denominators are not checked for dependence on {\tt VAR}, and are taken to be part of the coefficients. {\it Example:} {\small\begin{verbatim} coeff((y^2+z)^3/z,y); \end{verbatim}} returns the result {\small\begin{verbatim} 2 {Z ,0,3*Z,0,3,0,1/Z}. \end{verbatim}} whereas {\small\begin{verbatim} coeff((y^2+z)^3/y,y); \end{verbatim}} gives an error if {\tt RATARG} is off, and the result {\small\begin{verbatim} 3 2 {Z /Y,0,3*Z /Y,0,3*Z/Y,0,1/Y} \end{verbatim}} if {\tt RATARG} is on. The length of the result of {\tt COEFF} is the highest power of {\tt VAR} encountered plus 1. In the above examples it is 7. In addition, the variable {\tt HIGH\_POW}\ttindex{HIGH\_POW} is set to the highest non-zero power found in {\tt EXPRN} during the evaluation, and {\tt LOW\_POW} \ttindex{LOW\_POW} to the lowest non-zero power, or zero if there is a constant term. If {\tt EXPRN} is a constant, then {\tt HIGH\_POW} and {\tt LOW\_POW} are both set to zero. \subsection{COEFFN Operator}\ttindex{COEFFN} The {\tt COEFFN} operator is designed to give the user a particular coefficient of a variable in a polynomial, as opposed to {\tt COEFF} that returns all coefficients. {\tt COEFFN} is used with the syntax {\small\begin{verbatim} COEFFN(EXPRN:polynomial,VAR:kernel,N:integer) \end{verbatim}} It returns the $n^{th}$ coefficient of {\tt VAR} in the polynomial {\tt EXPRN}. \subsection{PART Operator}\ttindex{PART} Syntax: {\small\begin{verbatim} PART(EXPRN:algebraic[,INTEXP:integer]) \end{verbatim}} This operator works on the form of the expression as printed {\em or as it would have been printed at that point in the calculation\/} bearing in mind all the relevant switch settings at that point. The reader therefore needs some familiarity with the way that expressions are represented in prefix form in {\REDUCE} to use these operators effectively. Furthermore, it is assumed that {\tt PRI} is {\tt ON} at that point in the calculation. The reason for this is that with {\tt PRI} off, an expression is printed by walking the tree representing the expression internally. To save space, it is never actually transformed into the equivalent prefix expression as occurs when {\tt PRI} is on. However, the operations on polynomials described elsewhere can be equally well used in this case to obtain the relevant parts. The evaluation proceeds recursively down the integer expression list. In other words, {\small\begin{verbatim} PART(<expression>,<integer1>,<integer2>) -> PART(PART(<expression>,<integer1>),<integer2>) \end{verbatim}} and so on, and {\small\begin{verbatim} PART(<expression>) -> <expression>. \end{verbatim}} {\tt INTEXP} can be any expression that evaluates to an integer. If the integer is positive, then that term of the expression is found. If the integer is 0, the operator is returned. Finally, if the integer is negative, the counting is from the tail of the expression rather than the head. For example, if the expression {\tt a+b} is printed as {\tt A+B} (i.e., the ordering of the variables is alphabetical), then {\small\begin{verbatim} part(a+b,2) -> B part(a+b,-1) -> B and part(a+b,0) -> PLUS \end{verbatim}} An operator {\tt ARGLENGTH}\ttindex{ARGLENGTH} is available to determine the number of arguments of the top level operator in an expression. If the expression does not contain a top level operator, then $-1$ is returned. For example, {\small\begin{verbatim} arglength(a+b+c) -> 3 arglength(f()) -> 0 arglength(a) -> -1 \end{verbatim}} \subsection{Substituting for Parts of Expressions} {\tt PART} may also be used to substitute for a given part of an expression. In this case, the {\tt PART} construct appears on the left-hand side of an assignment statement, and the expression to replace the given part on the right-hand side. For example, with the normal settings of the {\REDUCE} switches: {\small\begin{verbatim} xx := a+b; part(xx,2) := c; -> A+C part(c+d,0) := -; -> C-D \end{verbatim}} Note that {\tt xx} in the above is not changed by this substitution. In addition, unlike expressions such as array and matrix elements that have an {\em instant evaluation\/}\index{Instant evaluation} property, the values of {\tt part(xx,2)} and {\tt part(c+d,0)} are also not changed. \chapter{Polynomials and Rationals} Many operations in computer algebra are concerned with polynomials \index{Polynomial} and rational functions\index{Rational function}. In this section, we review some of the switches and operators available for this purpose. These are in addition to those that work on general expressions (such as {\tt DF} and {\tt INT}) described elsewhere. In the case of operators, the arguments are first simplified before the operations are applied. In addition, they operate only on arguments of prescribed types, and produce a type mismatch error if given arguments which cannot be interpreted in the required mode with the current switch settings. For example, if an argument is required to be a kernel and {\tt a/2} is used (with no other rules for {\tt A}), an error {\small\begin{verbatim} A/2 invalid as kernel \end{verbatim}} will result. With the exception of those that select various parts of a polynomial or rational function, these operations have potentially significant effects on the space and time associated with a given calculation. The user should therefore experiment with their use in a given calculation in order to determine the optimum set for a given problem. One such operation provided by the system is an operator {\tt LENGTH} \ttindex{LENGTH} which returns the number of top level terms in the numerator of its argument. For example, {\small\begin{verbatim} length ((a+b+c)^3/(c+d)); \end{verbatim}} has the value 10. To get the number of terms in the denominator, one would first select the denominator by the operator {\tt DEN}\ttindex{DEN} and then call {\tt LENGTH}, as in {\small\begin{verbatim} length den ((a+b+c)^3/(c+d)); \end{verbatim}} Other operations currently supported, the relevant switches and operators, and the required argument and value modes of the latter, follow. \section{Controlling the Expansion of Expressions} The switch {\tt EXP}\ttindex{EXP} controls the expansion of expressions. If it is off, no expansion of powers or products of expressions occurs. Users should note however that in this case results come out in a normal but not necessarily canonical form. This means that zero expressions simplify to zero, but that two equivalent expressions need not necessarily simplify to the same form. {\it Example:} With {\tt EXP} on, the two expressions {\small\begin{verbatim} (a+b)*(a+2*b) \end{verbatim}} and {\small\begin{verbatim} a^2+3*a*b+2*b^2 \end{verbatim}} will both simplify to the latter form. With {\tt EXP} off, they would remain unchanged, unless the complete factoring {\tt (ALLFAC)} option were in force. {\tt EXP} is normally on. Several operators that expect a polynomial as an argument behave differently when {\tt EXP} is off, since there is often only one term at the top level. For example, with {\tt EXP} off {\small\begin{verbatim} length((a+b+c)^3/(c+d)); \end{verbatim}} returns the value 1. \section{Factorization of Polynomials}\index{Factorization} {\REDUCE} is capable of factorizing univariate and multivariate polynomials that have integer coefficients, finding all factors that also have integer coefficients. The package for doing this was written by Dr. Arthur C. Norman and Ms. P. Mary Ann Moore at The University of Cambridge. It is described in P. M. A. Moore and A. C. Norman, ``Implementing a Polynomial Factorization and GCD Package'', Proc. SYMSAC '81, ACM (New York) (1981), 109-116. The easiest way to use this facility is to turn on the switch {\tt FACTOR},\ttindex{FACTOR} which causes all expressions to be output in a factored form. For example, with {\tt FACTOR} on, the expression {\tt A\verb|^|2-B\verb|^|2} is returned as {\tt (A+B)*(A-B)}. It is also possible to factorize a given expression explicitly. The operator {\tt FACTORIZE}\ttindex{FACTORIZE} that invokes this facility is used with the syntax {\small\begin{verbatim} FACTORIZE(EXPRN:polynomial[,INTEXP:prime integer]):list, \end{verbatim}} the optional argument of which will be described later. Thus to find and display all factors of the cyclotomic polynomial $x^{105}-1$, one could write: {\small\begin{verbatim} factorize(x^105-1); \end{verbatim}} The result is a list of factor,exponent pairs. In the above example, there is no overall numerical factor in the result, so the results will consist only of polynomials in x. The number of such polynomials can be found by using the operator {\tt LENGTH}.\ttindex{LENGTH} If there is a numerical factor, as in factorizing $12x^{2}-12$, that factor will appear as the first member of the result. It will however not be factored further. Prime factors of such numbers can be found, using a probabilistic algorithm, by turning on the switch {\tt IFACTOR}.\ttindex{IFACTOR} For example, {\small\begin{verbatim} on ifactor; factorize(12x^2-12); \end{verbatim}} would result in the output {\small\begin{verbatim} {{2,2},{3,1},{X + 1,1},{X - 1,1}}. \end{verbatim}} If the first argument of {\tt FACTORIZE} is an integer, it will be decomposed into its prime components, whether or not {\tt IFACTOR} is on. Note that the {\tt IFACTOR} switch only affects the result of {\tt FACTORIZE}. It has no effect if the {\tt FACTOR}\ttindex{FACTOR} switch is also on. The order in which the factors occur in the result (with the exception of a possible overall numerical coefficient which comes first) can be system dependent and should not be relied on. Similarly it should be noted that any pair of individual factors can be negated without altering their product, and that {\REDUCE} may sometimes do that. The factorizer works by first reducing multivariate problems to univariate ones and then solving the univariate ones modulo small primes. It normally selects both evaluation points and primes using a random number generator that should lead to different detailed behavior each time any particular problem is tackled. If, for some reason, it is known that a certain (probably univariate) factorization can be performed effectively with a known prime, {\tt P} say, this value of {\tt P} can be handed to {\tt FACTORIZE}\ttindex{FACTORIZE} as a second argument. An error will occur if a non-prime is provided to {\tt FACTORIZE} in this manner. It is also an error to specify a prime that divides the discriminant of the polynomial being factored, but users should note that this condition is not checked by the program, so this capability should be used with care. Factorization can be performed over a number of polynomial coefficient domains in addition to integers. The particular description of the relevant domain should be consulted to see if factorization is supported. For example, the following statements will factorize $x^{4}+1$ modulo 7: {\small\begin{verbatim} setmod 7; on modular; factorize(x^4+1); \end{verbatim}} The factorization module is provided with a trace facility that may be useful as a way of monitoring progress on large problems, and of satisfying curiosity about the internal workings of the package. The most simple use of this is enabled by issuing the {\REDUCE} command\ttindex{TRFAC} {\tt on trfac;} . Following this, all calls to the factorizer will generate informative messages reporting on such things as the reduction of multivariate to univariate cases, the choice of a prime and the reconstruction of full factors from their images. Further levels of detail in the trace are intended mainly for system tuners and for the investigation of suspected bugs. For example, {\tt TRALLFAC} gives tracing information at all levels of detail. The switch that can be set by {\tt on timings;} makes it possible for one who is familiar with the algorithms used to determine what part of the factorization code is consuming the most resources. {\tt on overview}; reduces the amount of detail presented in other forms of trace. Other forms of trace output are enabled by directives of the form {\small\begin{verbatim} symbolic set!-trace!-factor(<number>,<filename>); \end{verbatim}} where useful numbers are 1, 2, 3 and 100, 101, ... . This facility is intended to make it possible to discover in fairly great detail what just some small part of the code has been doing --- the numbers refer mainly to depths of recursion when the factorizer calls itself, and to the split between its work forming and factorizing images and reconstructing full factors from these. If {\tt NIL} is used in place of a filename the trace output requested is directed to the standard output stream. After use of this trace facility the generated trace files should be closed by calling {\small\begin{verbatim} symbolic close!-trace!-files(); \end{verbatim}} {\it NOTE:} Using the factorizer with {\tt MCD}\ttindex{MCD} off will result in an error. \section{Cancellation of Common Factors} Facilities are available in {\REDUCE} for cancelling common factors in the numerators and denominators of expressions, at the option of the user. The system will perform this greatest common divisor computation if the switch {\tt GCD}\ttindex{GCD} is on. ({\tt GCD} is normally off.) A check is automatically made, however, for common variable and numerical products in the numerators and denominators of expressions, and the appropriate cancellations made. When {\tt GCD} is on, and {\tt EXP} is off, a check is made for square free factors in an expression. This includes separating out and independently checking the content of a given polynomial where appropriate. (For an explanation of these terms, see Anthony C. Hearn, ``Non-Modular Computation of Polynomial GCDs Using Trial Division'', Proc. EUROSAM 79, published as Lecture Notes on Comp. Science, Springer-Verlag, Berlin, No 72 (1979) 227-239.) {\it Example:} With {\tt EXP}\ttindex{EXP} off and {\tt GCD}\ttindex{GCD} on, the polynomial {\tt a*c+a*d+b*c+b*d} would be returned as {\tt (A+B)*(C+D)}. Under normal circumstances, GCDs are computed using an algorithm described in the above paper. It is also possible in {\REDUCE} to compute GCDs using an alternative algorithm, called the EZGCD Algorithm, which uses modular arithmetic. The switch {\tt EZGCD}\ttindex{EZGCD}, if on in addition to {\tt GCD}, makes this happen. In non-trivial cases, the EZGCD algorithm is almost always better than the basic algorithm, often by orders of magnitude. We therefore {\em strongly\/} advise users to use the {\tt EZGCD} switch where they have the resources available for supporting the package. For a description of the EZGCD algorithm, see J. Moses and D.Y.Y. Yun, ``The EZ GCD Algorithm'', Proc. ACM 1973, ACM, New York (1973) 159-166. {\it NOTE:} This package shares code with the factorizer, so a certain amount of trace information can be produced using the factorizer trace switches. \subsection{Determining the GCD of Two Polynomials} This operator, used with the syntax {\small\begin{verbatim} GCD(EXPRN1:polynomial,EXPRN2:polynomial):polynomial, \end{verbatim}} returns the greatest common divisor of the two polynomials {\tt EXPRN1} and {\tt EXPRN2}. {\it Examples:} {\small\begin{verbatim} gcd(x^2+2*x+1,x^2+3*x+2) -> X+1 gcd(2*x^2-2*y^2,4*x+4*y) -> 2*X+2*Y gcd(x^2+y^2,x-y) -> 1. \end{verbatim}} \section{Working with Least Common Multiples} Greatest common divisor calculations can often become expensive if extensive work with large rational expressions is required. However, in many cases, the only significant cancellations arise from the fact that there are often common factors in the various denominators which are combined when two rationals are added. Since these denominators tend to be smaller and more regular in structure than the numerators, considerable savings in both time and space can occur if a full GCD check is made when the denominators are combined and only a partial check when numerators are constructed. In other words, the true least common multiple of the denominators is computed at each step. The switch {\tt LCM}\ttindex{LCM} is available for this purpose, and is normally on. In addition, the operator {\tt LCM},\ttindex{LCM} used with the syntax {\small\begin{verbatim} LCM(EXPRN1:polynomial,EXPRN2:polynomial):polynomial, \end{verbatim}} returns the least common multiple of the two polynomials {\tt EXPRN1} and {\tt EXPRN2}. {\it Examples:} {\small\begin{verbatim} lcm(x^2+2*x+1,x^2+3*x+2) -> X**3 + 4*X**2 + 5*X + 2 lcm(2*x^2-2*y^2,4*x+4*y) -> 4*(X**2 - Y**2) \end{verbatim}} \section{Controlling Use of Common Denominators} When two rational functions are added, {\REDUCE} normally produces an expression over a common denominator. However, if the user does not want denominators combined, he or she can turn off the switch {\tt MCD} \ttindex{MCD} which controls this process. The latter switch is particularly useful if no greatest common divisor calculations are desired, or excessive differentiation of rational functions is required. {\it CAUTION:} With {\tt MCD} off, results are not guaranteed to come out in either normal or canonical form. In other words, an expression equivalent to zero may in fact not be simplified to zero. This option is therefore most useful for avoiding expression swell during intermediate parts of a calculation. {\tt MCD}\ttindex{MCD} is normally on. \section{REMAINDER Operator}\ttindex{REMAINDER} This operator is used with the syntax {\small\begin{verbatim} REMAINDER(EXPRN1:polynomial,EXPRN2:polynomial):polynomial. \end{verbatim}} It returns the remainder when {\tt EXPRN1} is divided by {\tt EXPRN2}. This is the true remainder based on the internal ordering of the variables, and not the pseudo-remainder. The pseudo-remainder \ttindex{PSEUDO\_REMAINDER} and in general pseudo-division \ttindex{PSEUDO\_DIVIDE} of polynomials can be calculated after loading the {\tt polydiv} package. Please refer to the documentation of this package for details. {\it Examples:} {\small\begin{verbatim} remainder((x+y)*(x+2*y),x+3*y) -> 2*Y**2 remainder(2*x+y,2) -> Y. \end{verbatim}} {\it CAUTION:} In the default case, remainders are calculated over the integers. If you need the remainder with respect to another domain, it must be declared explicitly. {\it Example:} {\small\begin{verbatim} remainder(x^2-2,x+sqrt(2)); -> X^2 - 2 load_package arnum; defpoly sqrt2**2-2; remainder(x^2-2,x+sqrt2); -> 0 \end{verbatim}} \section{RESULTANT Operator}\ttindex{RESULTANT} This is used with the syntax {\small\begin{verbatim} RESULTANT(EXPRN1:polynomial,EXPRN2:polynomial,VAR:kernel): polynomial. \end{verbatim}} It computes the resultant of the two given polynomials with respect to the given variable, the coefficients of the polynomials can be taken from any domain. The result can be identified as the determinant of a Sylvester matrix, but can often also be thought of informally as the result obtained when the given variable is eliminated between the two input polynomials. If the two input polynomials have a non-trivial GCD their resultant vanishes. The switch {\tt Bezout}\ttindex{Bezout} controls the computation of the resultants. It is off by default. In this case a subresultant algorithm is used. If the switch Bezout is turned on, the resultant is computed via the Bezout Matrix. However, in the latter case, only polynomial coefficients are permitted. \begin{samepage} The sign conventions used by the resultant function follow those in R. Loos, ``Computing in Algebraic Extensions'' in ``Computer Algebra --- Symbolic and Algebraic Computation'', Second Ed., Edited by B. Buchberger, G.E. Collins and R. Loos, Springer-Verlag, 1983. Namely, with {\tt A} and {\tt B} not dependent on {\tt X}: {\small\begin{verbatim} deg(p)*deg(q) resultant(p(x),q(x),x)= (-1) *resultant(q,p,x) deg(p) resultant(a,p(x),x) = a resultant(a,b,x) = 1 \end{verbatim}} \end{samepage} {\it Examples:} \begin{samepage} {\small\begin{verbatim} 2 resultant(x/r*u+y,u*y,u) -> - y \end{verbatim}} \end{samepage} {\it calculation in an algebraic extension:} \begin{samepage} {\small\begin{verbatim} load arnum; defpoly sqrt2**2 - 2; resultant(x + sqrt2,sqrt2 * x +1,x) -> -1 \end{verbatim}} \end{samepage} {\it or in a modular domain:} \begin{samepage} {\small\begin{verbatim} setmod 17; on modular; resultant(2x+1,3x+4,x) -> 5 \end{verbatim}} \end{samepage} \section{DECOMPOSE Operator}\ttindex{DECOMPOSE} The {\tt DECOMPOSE} operator takes a multivariate polynomial as argument, and returns an expression and a list of equations from which the original polynomial can be found by composition. Its syntax is: {\small\begin{verbatim} DECOMPOSE(EXPRN:polynomial):list. \end{verbatim}} For example: {\small\begin{verbatim} decompose(x^8-88*x^7+2924*x^6-43912*x^5+263431*x^4- 218900*x^3+65690*x^2-7700*x+234) 2 2 2 -> {U + 35*U + 234, U=V + 10*V, V=X - 22*X} 2 decompose(u^2+v^2+2u*v+1) -> {W + 1, W=U + V} \end{verbatim}} Users should note however that, unlike factorization, this decomposition is not unique. \section{INTERPOL operator}\ttindex{INTERPOL} Syntax: {\small\begin{verbatim} INTERPOL(<values>,<variable>,<points>); \end{verbatim}} where {\tt <values>} and {\tt <points>} are lists of equal length and {\tt <variable>} is an algebraic expression (preferably a kernel). {\tt INTERPOL} generates an interpolation polynomial {\em f\/} in the given variable of degree length({\tt <values>})-1. The unique polynomial {\em f\/} is defined by the property that for corresponding elements {\em v\/} of {\tt <values>} and {\em p\/} of {\tt <points>} the relation $f(p)=v$ holds. The Aitken-Neville interpolation algorithm is used which guarantees a stable result even with rounded numbers and an ill-conditioned problem. \section{Obtaining Parts of Polynomials and Rationals} These operators select various parts of a polynomial or rational function structure. Except for the cost of rearrangement of the structure, these operations take very little time to perform. For those operators in this section that take a kernel {\tt VAR} as their second argument, an error results if the first expression is not a polynomial in {\tt VAR}, although the coefficients themselves can be rational as long as they do not depend on {\tt VAR}. However, if the switch {\tt RATARG}\ttindex{RATARG} is on, denominators are not checked for dependence on {\tt VAR}, and are taken to be part of the coefficients. \subsection{DEG Operator}\ttindex{DEG} This operator is used with the syntax {\small\begin{verbatim} DEG(EXPRN:polynomial,VAR:kernel):integer. \end{verbatim}} It returns the leading degree\index{Degree} of the polynomial {\tt EXPRN} in the variable {\tt VAR}. If {\tt VAR} does not occur as a variable in {\tt EXPRN}, 0 is returned. {\it Examples:} {\small\begin{verbatim} deg((a+b)*(c+2*d)^2,a) -> 1 deg((a+b)*(c+2*d)^2,d) -> 2 deg((a+b)*(c+2*d)^2,e) -> 0. \end{verbatim}} Note also that if {\tt RATARG} is on, {\small\begin{verbatim} deg((a+b)^3/a,a) -> 3 \end{verbatim}} since in this case, the denominator {\tt A} is considered part of the coefficients of the numerator in {\tt A}. With {\tt RATARG} off, however, an error would result in this case. \subsection{DEN Operator}\ttindex{DEN} This is used with the syntax: {\small\begin{verbatim} DEN(EXPRN:rational):polynomial. \end{verbatim}} It returns the denominator of the rational expression {\tt EXPRN}. If {\tt EXPRN} is a polynomial, 1 is returned. {\it Examples:} {\small\begin{verbatim} den(x/y^2) -> Y**2 den(100/6) -> 3 [since 100/6 is first simplified to 50/3] den(a/4+b/6) -> 12 den(a+b) -> 1 \end{verbatim}} \subsection{LCOF Operator}\ttindex{LCOF} LCOF is used with the syntax {\small\begin{verbatim} LCOF(EXPRN:polynomial,VAR:kernel):polynomial. \end{verbatim}} It returns the leading coefficient\index{Leading coefficient} of the polynomial {\tt EXPRN} in the variable {\tt VAR}. If {\tt VAR} does not occur as a variable in {\tt EXPRN}, {\tt EXPRN} is returned. \extendedmanual{\newpage} {\it Examples:} {\small\begin{verbatim} lcof((a+b)*(c+2*d)^2,a) -> C**2+4*C*D+4*D**2 lcof((a+b)*(c+2*d)^2,d) -> 4*(A+B) lcof((a+b)*(c+2*d),e) -> A*C+2*A*D+B*C+2*B*D \end{verbatim}} \subsection{LPOWER Operator}\ttindex{LPOWER} \begin{samepage} Syntax: {\small\begin{verbatim} LPOWER(EXPRN:polynomial,VAR:kernel):polynomial. \end{verbatim}} LPOWER returns the leading power of {\tt EXPRN} with respect to {\tt VAR}. If {\tt EXPRN} does not depend on {\tt VAR}, 1 is returned. \end{samepage} {\it Examples:} {\small\begin{verbatim} lpower((a+b)*(c+2*d)^2,a) -> A lpower((a+b)*(c+2*d)^2,d) -> D**2 lpower((a+b)*(c+2*d),e) -> 1 \end{verbatim}} \subsection{LTERM Operator}\ttindex{LTERM} \begin{samepage} Syntax: {\small\begin{verbatim} LTERM(EXPRN:polynomial,VAR:kernel):polynomial. \end{verbatim}} LTERM returns the leading term of {\tt EXPRN} with respect to {\tt VAR}. If {\tt EXPRN} does not depend on {\tt VAR}, {\tt EXPRN} is returned. \end{samepage} {\it Examples:} {\small\begin{verbatim} lterm((a+b)*(c+2*d)^2,a) -> A*(C**2+4*C*D+4*D**2) lterm((a+b)*(c+2*d)^2,d) -> 4*D**2*(A+B) lterm((a+b)*(c+2*d),e) -> A*C+2*A*D+B*C+2*B*D \end{verbatim}} {\COMPATNOTE} In some earlier versions of REDUCE, {\tt LTERM} returned {\tt 0} if the {\tt EXPRN} did not depend on {\tt VAR}. In the present version, {\tt EXPRN} is always equal to {\tt LTERM(EXPRN,VAR)} $+$ {\tt REDUCT(EXPRN,VAR)}. \subsection{MAINVAR Operator}\ttindex{MAINVAR} Syntax: {\small\begin{verbatim} MAINVAR(EXPRN:polynomial):expression. \end{verbatim}} Returns the main variable (based on the internal polynomial representation) of {\tt EXPRN}. If {\tt EXPRN} is a domain element, 0 is returned. {\it Examples:} Assuming {\tt A} has higher kernel order than {\tt B}, {\tt C}, or {\tt D}: {\small\begin{verbatim} mainvar((a+b)*(c+2*d)^2) -> A mainvar(2) -> 0 \end{verbatim}} \subsection{NUM Operator}\ttindex{NUM} Syntax: {\small\begin{verbatim} NUM(EXPRN:rational):polynomial. \end{verbatim}} Returns the numerator of the rational expression {\tt EXPRN}. If {\tt EXPRN} is a polynomial, that polynomial is returned. {\it Examples:} {\small\begin{verbatim} num(x/y^2) -> X num(100/6) -> 50 num(a/4+b/6) -> 3*A+2*B num(a+b) -> A+B \end{verbatim}} \subsection{REDUCT Operator}\ttindex{REDUCT} Syntax: {\small\begin{verbatim} REDUCT(EXPRN:polynomial,VAR:kernel):polynomial. \end{verbatim}} Returns the reductum of {\tt EXPRN} with respect to {\tt VAR} (i.e., the part of {\tt EXPRN} left after the leading term is removed). If {\tt EXPRN} does not depend on the variable {\tt VAR}, 0 is returned. {\it Examples:} {\small\begin{verbatim} reduct((a+b)*(c+2*d),a) -> B*(C + 2*D) reduct((a+b)*(c+2*d),d) -> C*(A + B) reduct((a+b)*(c+2*d),e) -> 0 \end{verbatim}} {\COMPATNOTE} In some earlier versions of REDUCE, {\tt REDUCT} returned {\tt EXPRN} if it did not depend on {\tt VAR}. In the present version, {\tt EXPRN} is always equal to {\tt LTERM(EXPRN,VAR)} $+$ {\tt REDUCT(EXPRN,VAR)}. \section{Polynomial Coefficient Arithmetic}\index{Coefficient} {\REDUCE} allows for a variety of numerical domains for the numerical coefficients of polynomials used in calculations. The default mode is integer arithmetic, although the possibility of using real coefficients \index{Real coefficient} has been discussed elsewhere. Rational coefficients have also been available by using integer coefficients in both the numerator and denominator of an expression, using the {\tt ON DIV}\ttindex{DIV} option to print the coefficients as rationals. However, {\REDUCE} includes several other coefficient options in its basic version which we shall describe in this section. All such coefficient modes are supported in a table-driven manner so that it is straightforward to extend the range of possibilities. A description of how to do this is given in R.J. Bradford, A.C. Hearn, J.A. Padget and E. Schr\"ufer, ``Enlarging the {\REDUCE} Domain of Computation,'' Proc. of SYMSAC '86, ACM, New York (1986), 100--106. \subsection{Rational Coefficients in Polynomials}\index{Coefficient} \index{Rational coefficient} Instead of treating rational numbers as the numerator and denominator of a rational expression, it is also possible to use them as polynomial coefficients directly. This is accomplished by turning on the switch {\tt RATIONAL}.\ttindex{RATIONAL} {\it Example:} With {\tt RATIONAL} off, the input expression {\tt a/2} would be converted into a rational expression, whose numerator was {\tt A} and denominator 2. With {\tt RATIONAL} on, the same input would become a rational expression with numerator {\tt 1/2*A} and denominator {\tt 1}. Thus the latter can be used in operations that require polynomial input whereas the former could not. \subsection{Real Coefficients in Polynomials}\index{Coefficient} \index{Real coefficient} The switch {\tt ROUNDED}\ttindex{ROUNDED} permits the use of arbitrary sized real coefficients in polynomial expressions. The actual precision of these coefficients can be set by the operator {\tt PRECISION}. \ttindex{PRECISION} For example, {\tt precision 50;} sets the precision to fifty decimal digits. The default precision is system dependent and can be found by {\tt precision 0;}. In this mode, denominators are automatically made monic, and an appropriate adjustment is made to the numerator. {\it Example:} With {\tt ROUNDED} on, the input expression {\tt a/2} would be converted into a rational expression whose numerator is {\tt 0.5*A} and denominator {\tt 1}. Internally, {\REDUCE} uses floating point numbers up to the precision supported by the underlying machine hardware, and so-called {\em bigfloats} for higher precision or whenever necessary to represent numbers whose value cannot be represented in floating point. The internal precision is two decimal digits greater than the external precision to guard against roundoff inaccuracies. Bigfloats represent the fraction and exponent parts of a floating-point number by means of (arbitrary precision) integers, which is a more precise representation in many cases than the machine floating point arithmetic, but not as efficient. If a case arises where use of the machine arithmetic leads to problems, a user can force {\REDUCE} to use the bigfloat representation at all precisions by turning on the switch {\tt ROUNDBF}.\ttindex{ROUNDBF} In rare cases, this switch is turned on by the system, and the user informed by the message {\small\begin{verbatim} ROUNDBF turned on to increase accuracy \end{verbatim}} Rounded numbers are normally printed to the specified precision. However, if the user wishes to print such numbers with less precision, the printing precision can be set by the command {\tt PRINT\_PRECISION}. \ttindex{PRINT\_PRECISION} For example, {\tt print\_precision 5;} will cause such numbers to be printed with five digits maximum. Under normal circumstances when {\tt ROUNDED} is on, {\REDUCE} converts the number 1.0 to the integer 1. If this is not desired, the switch {\tt NOCONVERT}\ttindex{NOCONVERT} can be turned on. Numbers that are stored internally as bigfloats are normally printed with a space between every five digits to improve readability. If this feature is not required, it can be suppressed by turning off the switch {\tt BFSPACE}.\ttindex{BFSPACE} Further information on the bigfloat arithmetic may be found in T. Sasaki, ``Manual for Arbitrary Precision Real Arithmetic System in {\REDUCE}'', Department of Computer Science, University of Utah, Technical Note No. TR-8 (1979). When a real number is input, it is normally truncated to the precision in effect at the time the number is read. If it is desired to keep the full precision of all numbers input, the switch {\tt ADJPREC}\ttindex{ADJPREC} (for {\em adjust precision\/}) can be turned on. While on, {\tt ADJPREC} will automatically increase the precision, when necessary, to match that of any integer or real input, and a message printed to inform the user of the precision increase. When {\tt ROUNDED} is on, rational numbers are normally converted to rounded representation. However, if a user wishes to keep such numbers in a rational form until used in an operation that returns a real number, the switch {\tt ROUNDALL}\ttindex{ROUNDALL} can be turned off. This switch is normally on. Results from rounded calculations are returned in rounded form with two exceptions: if the result is recognized as {\tt 0} or {\tt 1} to the current precision, the integer result is returned. \subsection{Modular Number Coefficients in Polynomials}\index{Coefficient} \index{Modular coefficient} {\REDUCE} includes facilities for manipulating polynomials whose coefficients are computed modulo a given base. To use this option, two commands must be used; {\tt SETMOD} {\tt <integer>},\ttindex{SETMOD} to set the prime modulus, and {\tt ON MODULAR}\ttindex{MODULAR} to cause the actual modular calculations to occur. For example, with {\tt setmod 3;} and {\tt on modular;}, the polynomial {\tt (a+2*b)\verb|^|3} would become {\tt A\verb|^|3+2*B\verb|^|3}. The argument of {\tt SETMOD} is evaluated algebraically, except that non-modular (integer) arithmetic is used. Thus the sequence {\small\begin{verbatim} setmod 3; on modular; setmod 7; \end{verbatim}} will correctly set the modulus to 7. Modular numbers are by default represented by integers in the interval [0,p-1] where p is the current modulus. Sometimes it is more convenient to use an equivalent symmetric representation in the interval [-p/2+1,p/2], or more precisely [-floor((p-1)/2), ceiling((p-1)/2)], especially if the modular numbers map objects that include negative quantities. The switch {\tt BALANCED\_MOD}\ttindex{BALANCED\_MOD} allows you to select the symmetric representation for output. Users should note that the modular calculations are on the polynomial coefficients only. It is not currently possible to reduce the exponents since no check for a prime modulus is made (which would allow $x^{p-1}$ to be reduced to 1 mod p). Note also that any division by a number not co-prime with the modulus will result in the error ``Invalid modular division''. \subsection{Complex Number Coefficients in Polynomials}\index{Coefficient} \index{Complex coefficient} Although {\REDUCE} routinely treats the square of the variable {\em i\/} as equivalent to $-1$, this is not sufficient to reduce expressions involving {\em i\/} to lowest terms, or to factor such expressions over the complex numbers. For example, in the default case, {\small\begin{verbatim} factorize(a^2+1); \end{verbatim}} gives the result {\small\begin{verbatim} {{A**2+1,1}} \end{verbatim}} and {\small\begin{verbatim} (a^2+b^2)/(a+i*b) \end{verbatim}} is not reduced further. However, if the switch {\tt COMPLEX}\ttindex{COMPLEX} is turned on, full complex arithmetic is then carried out. In other words, the above factorization will give the result {\small\begin{verbatim} {{A + I,1},{A - I,1}} \end{verbatim}} and the quotient will be reduced to {\tt A-I*B}. The switch {\tt COMPLEX} may be combined with {\tt ROUNDED} to give complex real numbers; the appropriate arithmetic is performed in this case. Complex conjugation is used to remove complex numbers from denominators of expressions. To do this if {\tt COMPLEX} is off, you must turn the switch {\tt RATIONALIZE}\ttindex{RATIONALIZE} on. \chapter{Substitution Commands}\index{Substitution} An important class of commands in {\REDUCE} define substitutions for variables and expressions to be made during the evaluation of expressions. Such substitutions use the prefix operator {\tt SUB}, various forms of the command {\tt LET}, and rule sets. \section{SUB Operator}\ttindex{SUB} Syntax: {\small\begin{verbatim} SUB(<substitution_list>,EXPRN1:algebraic):algebraic \end{verbatim}} where {\tt <substitution\_list>} is a list of one or more equations of the form {\small\begin{verbatim} VAR:kernel=EXPRN:algebraic \end{verbatim}} or a kernel that evaluates to such a list. The {\tt SUB} operator gives the algebraic result of replacing every occurrence of the variable {\tt VAR} in the expression {\tt EXPRN1} by the expression {\tt EXPRN}. Specifically, {\tt EXPRN1} is first evaluated using all available rules. Next the substitutions are made, and finally the substituted expression is reevaluated. When more than one variable occurs in the substitution list, the substitution is performed by recursively walking down the tree representing {\tt EXPRN1}, and replacing every {\tt VAR} found by the appropriate {\tt EXPRN}. The {\tt EXPRN} are not themselves searched for any occurrences of the various {\tt VAR}s. The trivial case {\tt SUB(EXPRN1)} returns the algebraic value of {\tt EXPRN1}. {\it Examples:} {\small\begin{verbatim} 2 2 sub({x=a+y,y=y+1},x^2+y^2) -> A + 2*A*Y + 2*Y + 2*Y + 1 \end{verbatim}} and with {\tt s := \{x=a+y,y=y+1\}}, {\small\begin{verbatim} 2 2 sub(s,x^2+y^2) -> A + 2*A*Y + 2*Y + 2*Y + 1 \end{verbatim}} Note that the global assignments {\tt x:=a+y}, etc., do not take place. {\tt EXPRN1} can be any valid algebraic expression whose type is such that a substitution process is defined for it (e.g., scalar expressions, lists and matrices). An error will occur if an expression of an invalid type for substitution occurs either in {\tt EXPRN} or {\tt EXPRN1}. The braces around the substitution list may also be omitted, as in: {\small\begin{verbatim} 2 2 sub(x=a+y,y=y+1,x^2+y^2) -> A + 2*A*Y + 2*Y + 2*Y + 1 \end{verbatim}} \section{LET Rules}\ttindex{LET} Unlike substitutions introduced via {\tt SUB}, {\tt LET} rules are global in scope and stay in effect until replaced or {\tt CLEAR}ed. The simplest use of the {\tt LET} statement is in the form {\small\begin{verbatim} LET <substitution list> \end{verbatim}} where {\tt <substitution list>} is a list of rules separated by commas, each of the form: {\small\begin{verbatim} <variable> = <expression> \end{verbatim}} or {\small\begin{verbatim} <prefix operator>(<argument>,...,<argument>) = <expression> \end{verbatim}} or {\small\begin{verbatim} <argument> <infix operator>,..., <argument> = <expression> \end{verbatim}} For example, {\small\begin{verbatim} let {x => y^2, h(u,v) => u - v, cos(pi/3) => 1/2, a*b => c, l+m => n, w^3 => 2*z - 3, z^10 => 0} \end{verbatim}} The list brackets can be left out if preferred. The above rules could also have been entered as seven separate {\tt LET} statements. After such {\tt LET} rules have been input, {\tt X} will always be evaluated as the square of {\tt Y}, and so on. This is so even if at the time the {\tt LET} rule was input, the variable {\tt Y} had a value other than {\tt Y}. (In contrast, the assignment {\tt x:=y\verb|^|2} will set {\tt X} equal to the square of the current value of {\tt Y}, which could be quite different.) The rule {\tt let a*b=c} means that whenever {\tt A} and {\tt B} are both factors in an expression their product will be replaced by {\tt C}. For example, {\tt a\verb|^|5*b\verb|^|7*w} would be replaced by {\tt c\verb|^|5*b\verb|^|2*w}. The rule for {\tt l+m} will not only replace all occurrences of {\tt l+m} by {\tt N}, but will also normally replace {\tt L} by {\tt n-m}, but not {\tt M} by {\tt n-l}. A more complete description of this case is given in Section~\ref{sec-gensubs}. The rule pertaining to {\tt w\verb|^|3} will apply to any power of {\tt W} greater than or equal to the third. Note especially the last example, {\tt let z\verb|^|10=0}. This declaration means, in effect: ignore the tenth or any higher power of {\tt Z}. Such declarations, when appropriate, often speed up a computation to a considerable degree. (See\index{Asymptotic command} Section~\ref{sec-asymp} for more details.) Any new operators occurring in such {\tt LET} rules will be automatically declared {\tt OPERATOR} by the system, if the rules are being read from a file. If they are being entered interactively, the system will ask {\tt DECLARE} ... {\tt OPERATOR?} . Answer {\tt Y} or {\tt N} and hit \key{Return}. In each of these examples, substitutions are only made for the explicit expressions given; i.e., none of the variables may be considered arbitrary in any sense. For example, the command {\small\begin{verbatim} let h(u,v) = u - v; \end{verbatim}} will cause {\tt h(u,v)} to evaluate to {\tt U - V}, but will not affect {\tt h(u,z)} or {\tt H} with any arguments other than precisely the symbols {\tt U,V}. These simple {\tt LET} rules are on the same logical level as assignments made with the := operator. An assignment {\tt x := p+q} cancels a rule {\tt let x = y\verb|^|2} made earlier, and vice versa. {\it CAUTION:} A recursive rule such as {\small\begin{verbatim} let x = x + 1; \end{verbatim}} is erroneous, since any subsequent evaluation of {\tt X} would lead to a non-terminating chain of substitutions: {\small\begin{verbatim} x -> x + 1 -> (x + 1) + 1 -> ((x + 1) + 1) + 1 -> ... \end{verbatim}} Similarly, coupled substitutions such as {\small\begin{verbatim} let l = m + n, n = l + r; \end{verbatim}} would lead to the same error. As a result, if you try to evaluate an {\tt X}, {\tt L} or {\tt N} defined as above, you will get an error such as {\small\begin{verbatim} X improperly defined in terms of itself \end{verbatim}} Array and matrix elements can appear on the left-hand side of a {\tt LET} statement. However, because of their {\em instant evaluation\/} \index{Instant evaluation} property, it is the value of the element that is substituted for, rather than the element itself. E.g., {\small\begin{verbatim} array a(5); a(2) := b; let a(2) = c; \end{verbatim}} results in {\tt B} being substituted by {\tt C}; the assignment for {\tt a(2)} does not change. Finally, if an error occurs in any equation in a {\tt LET} statement (including generalized statements involving {\tt FOR ALL} and {\tt SUCH THAT)}, the remaining rules are not evaluated. \subsection{FOR ALL \ldots LET}\ttindex{FOR ALL} If a substitution for all possible values of a given argument of an operator is required, the declaration {\tt FOR ALL} may be used. The syntax of such a command is {\small\begin{verbatim} FOR ALL <variable>,...,<variable> <LET statement> <terminator> \end{verbatim}} e.g., {\small\begin{verbatim} for all x,y let h(x,y) = x-y; for all x let k(x,y) = x^y; \end{verbatim}} The first of these declarations would cause {\tt h(a,b)} to be evaluated as {\tt A-B}, {\tt h(u+v,u+w)} to be {\tt V-W}, etc. If the operator symbol {\tt H} is used with more or fewer argument places, not two, the {\tt LET} would have no effect, and no error would result. The second declaration would cause {\tt k(a,y)} to be evaluated as {\tt a\verb|^|y}, but would have no effect on {\tt k(a,z)} since the rule didn't say {\tt FOR ALL Y} ... . Where we used {\tt X} and {\tt Y} in the examples, any variables could have been used. This use of a variable doesn't affect the value it may have outside the {\tt LET} statement. However, you should remember what variables you actually used. If you want to delete the rule subsequently, you must use the same variables in the {\tt CLEAR} command. It is possible to use more complicated expressions as a template for a {\tt LET} statement, as explained in the section on substitutions for general expressions. In nearly all cases, the rule will be accepted, and a consistent application made by the system. However, if there is a sole constant or a sole free variable on the left-hand side of a rule (e.g., {\tt let 2=3} or {\tt for all x let x=2)}, then the system is unable to handle the rule, and the error message {\small\begin{verbatim} Substitution for ... not allowed \end{verbatim}} will be issued. Any variable listed in the {\tt FOR ALL} part will have its symbol preceded by an equal sign: {\tt X} in the above example will appear as {\tt =X}. An error will also occur if a variable in the {\tt FOR ALL} part is not properly matched on both sides of the {\tt LET} equation. \subsection{FOR ALL \ldots SUCH THAT \ldots LET} \ttindex{FOR ALL}\ttindex{SUCH THAT} If a substitution is desired for more than a single value of a variable in an operator or other expression, but not all values, a conditional form of the {\tt FOR ALL \ldots LET} declaration can be used. {\it Example:} {\small\begin{verbatim} for all x such that numberp x and x<0 let h(x)=0; \end{verbatim}} will cause {\tt h(-5)} to be evaluated as 0, but {\tt H} of a positive integer, or of an argument that is not an integer at all, would not be affected. Any boolean expression can follow the {\tt SUCH THAT} keywords. \subsection{Removing Assignments and Substitution Rules}\ttindex{CLEAR} The user may remove all assignments and substitution rules from any expression by the command {\tt CLEAR}, in the form {\small\begin{verbatim} CLEAR <expression>,...,<expression><terminator> \end{verbatim}} e.g. {\small\begin{verbatim} clear x, h(x,y); \end{verbatim}} Because of their {\em instant evaluation\/} property, array and matrix elements cannot be cleared with {\tt CLEAR}. For example, if {\tt A} is an array, you must say {\small\begin{verbatim} a(3) := 0; \end{verbatim}} rather than {\small\begin{verbatim} clear a(3); \end{verbatim}} to ``clear'' element {\tt a(3)}. On the other hand, a whole array (or matrix) {\tt A} can be cleared by the command {\tt clear a}; This means much more than resetting to 0 all the elements of {\tt A}. The fact that {\tt A} is an array, and what its dimensions are, are forgotten, so {\tt A} can be redefined as another type of object, for example an operator. The more general types of {\tt LET} declarations can also be deleted by using {\tt CLEAR}. Simply repeat the {\tt LET} rule to be deleted, using {\tt CLEAR} in place of {\tt LET}, and omitting the equal sign and right-hand part. The same dummy variables must be used in the {\tt FOR ALL} part, and the boolean expression in the {\tt SUCH THAT} part must be written the same way. (The placing of blanks doesn't have to be identical.) {\it Example:} The {\tt LET} rule {\small\begin{verbatim} for all x such that numberp x and x<0 let h(x)=0; \end{verbatim}} can be erased by the command {\small\begin{verbatim} for all x such that numberp x and x<0 clear h(x); \end{verbatim}} \subsection{Overlapping LET Rules} {\tt CLEAR} is not the only way to delete a {\tt LET} rule. A new {\tt LET} rule identical to the first, but with a different expression after the equal sign, replaces the first. Replacements are also made in other cases where the existing rule would be in conflict with the new rule. For example, a rule for {\tt x\verb|^|4} would replace a rule for {\tt x\verb|^|5}. The user should however be cautioned against having several {\tt LET} rules in effect that relate to the same expression. No guarantee can be given as to which rules will be applied by {\REDUCE} or in what order. It is best to {\tt CLEAR} an old rule before entering a new related {\tt LET} rule. \subsection{Substitutions for General Expressions} \label{sec-gensubs} The examples of substitutions discussed in other sections have involved very simple rules. However, the substitution mechanism used in {\REDUCE} is very general, and can handle arbitrarily complicated rules without difficulty. The general substitution mechanism used in {\REDUCE} is discussed in Hearn, A. C., ``{\REDUCE}, A User-Oriented Interactive System for Algebraic Simplification,'' Interactive Systems for Experimental Applied Mathematics, (edited by M. Klerer and J. Reinfelds), Academic Press, New York (1968), 79-90, and Hearn. A. C., ``The Problem of Substitution,'' Proc. 1968 Summer Institute on Symbolic Mathematical Computation, IBM Programming Laboratory Report FSC 69-0312 (1969). For the reasons given in these references, {\REDUCE} does not attempt to implement a general pattern matching algorithm. However, the present system uses far more sophisticated techniques than those discussed in the above papers. It is now possible for the rules appearing in arguments of {\tt LET} to have the form {\small\begin{verbatim} <substitution expression> = <expression> \end{verbatim}} where any rule to which a sensible meaning can be assigned is permitted. However, this meaning can vary according to the form of {\tt <substitution expression>}. The semantic rules associated with the application of the substitution are completely consistent, but somewhat complicated by the pragmatic need to perform such substitutions as efficiently as possible. The following rules explain how the majority of the cases are handled. To begin with, the {\tt <substitution expression>} is first partly simplified by collecting like terms and putting identifiers (and kernels) in the system order. However, no substitutions are performed on any part of the expression with the exception of expressions with the {\em instant evaluation\/} property, such as array and matrix elements, whose actual values are used. It should also be noted that the system order used is not changeable by the user, even with the {\tt KORDER} command. Specific cases are then handled as follows: \begin{enumerate} \item If the resulting simplified rule has a left-hand side that is an identifier, an expression with a top-level algebraic operator or a power, then the rule is added without further change to the appropriate table. \item If the operator * appears at the top level of the simplified left-hand side, then any constant arguments in that expression are moved to the right-hand side of the rule. The remaining left-hand side is then added to the appropriate table. For example, {\small\begin{verbatim} let 2*x*y=3 \end{verbatim}} becomes {\small\begin{verbatim} let x*y=3/2 \end{verbatim}} so that {\tt x*y} is added to the product substitution table, and when this rule is applied, the expression {\tt x*y} becomes 3/2, but {\tt X} or {\tt Y} by themselves are not replaced. \item If the operators {\tt +}, {\tt -} or {\tt /} appear at the top level of the simplified left-hand side, all but the first term is moved to the right-hand side of the rule. Thus the rules {\small\begin{verbatim} let l+m=n, x/2=y, a-b=c \end{verbatim}} become {\small\begin{verbatim} let l=n-m, x=2*y, a=c+b. \end{verbatim}} \end{enumerate} One problem that can occur in this case is that if a quantified expression is moved to the right-hand side, a given free variable might no longer appear on the left-hand side, resulting in an error because of the unmatched free variable. E.g., {\small\begin{verbatim} for all x,y let f(x)+f(y)=x*y \end{verbatim}} would become {\small\begin{verbatim} for all x,y let f(x)=x*y-f(y) \end{verbatim}} which no longer has {\tt Y} on both sides. The fact that array and matrix elements are evaluated in the left-hand side of rules can lead to confusion at times. Consider for example the statements {\small\begin{verbatim} array a(5); let x+a(2)=3; let a(3)=4; \end{verbatim}} The left-hand side of the first rule will become {\tt X}, and the second 0. Thus the first rule will be instantiated as a substitution for {\tt X}, and the second will result in an error. The order in which a list of rules is applied is not easily understandable without a detailed knowledge of the system simplification protocol. It is also possible for this order to change from release to release, as improved substitution techniques are implemented. Users should therefore assume that the order of application of rules is arbitrary, and program accordingly. After a substitution has been made, the expression being evaluated is reexamined in case a new allowed substitution has been generated. This process is continued until no more substitutions can be made. As mentioned elsewhere, when a substitution expression appears in a product, the substitution is made if that expression divides the product. For example, the rule {\small\begin{verbatim} let a^2*c = 3*z; \end{verbatim}} would cause {\tt a\verb|^|2*c*x} to be replaced by {\tt 3*Z*X} and {\tt a\verb|^|2*c\verb|^|2} by {\tt 3*Z*C}. If the substitution is desired only when the substitution expression appears in a product with the explicit powers supplied in the rule, the command {\tt MATCH} should be used instead.\ttindex{MATCH} For example, {\small\begin{verbatim} match a^2*c = 3*z; \end{verbatim}} would cause {\tt a\verb|^|2*c*x} to be replaced by {\tt 3*Z*X}, but {\tt a\verb|^|2*c\verb|^|2} would not be replaced. {\tt MATCH} can also be used with the {\tt FOR ALL} constructions described above. To remove substitution rules of the type discussed in this section, the {\tt CLEAR}\ttindex{CLEAR} command can be used, combined, if necessary, with the same {\tt FOR ALL} clause with which the rule was defined, for example: {\small\begin{verbatim} for all x clear log(e^x),e^log(x),cos(w*t+theta(x)); \end{verbatim}} Note, however, that the arbitrary variable names in this case {\em must\/} be the same as those used in defining the substitution. \section{Rule Lists} \index{Rule lists} Rule lists offer an alternative approach to defining substitutions that is different from either {\tt SUB} or {\tt LET}. In fact, they provide the best features of both, since they have all the capabilities of {\tt LET}, but the rules can also be applied locally as is possible with {\tt SUB}. In time, they will be used more and more in {\REDUCE}. However, since they are relatively new, much of the {\REDUCE} code you see uses the older constructs. A rule list is a list of {\em rules\/} that have the syntax {\small\begin{verbatim} <expression> => <expression> (WHEN <boolean expression>) \end{verbatim}} For example, {\small\begin{verbatim} {cos(~x)*cos(~y) => (cos(x+y)+cos(x-y))/2, cos(~n*pi) => (-1)^n when remainder(n,2)=0} \end{verbatim}} The tilde preceding a variable marks that variable as {\em free\/} for that rule, much as a variable in a {\tt FOR ALL} clause in a {\tt LET} statement. The first occurrence of that variable in each relevant rule must be so marked on input, otherwise inconsistent results can occur. For example, the rule list {\small\begin{verbatim} {cos(~x)*cos(~y) => (cos(x+y)+cos(x-y))/2, cos(x)^2 => (1+cos(2x))/2} \end{verbatim}} designed to replace products of cosines, would not be correct, since the second rule would only apply to the explicit argument {\tt X}. Later occurrences in the same rule may also be marked, but this is optional (internally, all such rules are stored with each relevant variable explicitly marked). The optional {\tt WHEN}\ttindex{WHEN} clause allows constraints to be placed on the application of the rule, much as the {\tt SUCH THAT} clause in a {\tt LET} statement. A rule list may be named, for example {\small\begin{verbatim} trig1 := {cos(~x)*cos(~y) => (cos(x+y)+cos(x-y))/2, cos(~x)*sin(~y) => (sin(x+y)-sin(x-y))/2, sin(~x)*sin(~y) => (cos(x-y)-cos(x+y))/2, cos(~x)^2 => (1+cos(2*x))/2, sin(~x)^2 => (1-cos(2*x))/2}; \end{verbatim}} Such named rule lists may be inspected as needed. E.g., the command {\tt trig1;} would cause the above list to be printed. Rule lists may be used in two ways. They can be globally instantiated by means of the command {\tt LET}.\ttindex{LET} For example, {\small\begin{verbatim} let trig1; \end{verbatim}} would cause the above list of rules to be globally active from then on until cancelled by the command {\tt CLEARRULES},\ttindex{CLEARRULES} as in {\small\begin{verbatim} clearrules trig1; \end{verbatim}} {\tt CLEARRULES} has the syntax {\small\begin{verbatim} CLEARRULES <rule list>|<name of rule list>(,...) . \end{verbatim}} The second way to use rule lists is to invoke them locally by means of a {\tt WHERE}\ttindex{WHERE} clause. For example {\small\begin{verbatim} cos(a)*cos(b+c) where {cos(~x)*cos(~y) => (cos(x+y)+cos(x-y))/2}; \end{verbatim}} or {\small\begin{verbatim} cos(a)*sin(b) where trigrules; \end{verbatim}} The syntax of an expression with a {\tt WHERE} clause is: {\small\begin{verbatim} <expression> WHERE <rule>|<rule list>(,<rule>|<rule list> ...) \end{verbatim}} so the first example above could also be written {\small\begin{verbatim} cos(a)*cos(b+c) where cos(~x)*cos(~y) => (cos(x+y)+cos(x-y))/2; \end{verbatim}} The effect of this construct is that the rule list(s) in the {\tt WHERE} clause only apply to the expression on the left of {\tt WHERE}. They have no effect outside the expression. In particular, they do not affect previously defined {\tt WHERE} clauses or {\tt LET} statements. For example, the sequence {\small\begin{verbatim} let a=2; a where a=>4; a; \end{verbatim}} would result in the output {\small\begin{verbatim} 4 2 \end{verbatim}} Although {\tt WHERE} has a precedence less than any other infix operator, it still binds higher than keywords such as {\tt ELSE}, {\tt THEN}, {\tt DO}, {\tt REPEAT} and so on. Thus the expression {\small\begin{verbatim} if a=2 then 3 else a+2 where a=3 \end{verbatim}} will parse as {\small\begin{verbatim} if a=2 then 3 else (a+2 where a=3) \end{verbatim}} {\tt WHERE} may be used to introduce auxiliary variables in symbolic mode expressions, as described in Section~\ref{sec-lambda}. However, the symbolic mode use has different semantics, so expressions do not carry from one mode to the other. \COMPATNOTE In order to provide compatibility with older versions of rule lists released through the Network Library, it is currently possible to use an equal sign interchangeably with the replacement sign {\tt =>} in rules and {\tt LET} statements. However, since this will change in future versions, the replacement sign is preferable in rules and the equal sign in non-rule-based {\tt LET} statements. \subsection*{Advanced Use of Rule Lists} Some advanced features of the rule list mechanism make it possible to write more complicated rules than those discussed so far, and in many cases to write more compact rule lists. These features are: \begin{itemize} \item Free operators \item Double slash operator \item Double tilde variables. \end{itemize} A {\bf free operator} in the left hand side of a pattern will match any operator with the same number of arguments. The free operator is written in the same style as a variable. For example, the implementation of the product rule of differentiation can be written as: {\small\begin{verbatim} operator diff, !~f, !~g; prule := {diff(~f(~x) * ~g(~x),x) => diff(f(x),x) * g(x) + diff(g(x),x) * f(x)}; let prule; diff(sin(z)*cos(z),z); cos(z)*diff(sin(z),z) + diff(cos(z),z)*sin(z) \end{verbatim}} The {\bf double slash operator} may be used as an alternative to a single slash (quotient) in order to match quotients properly. E.g., in the example of the Gamma function above, one can use: {\small\begin{verbatim} gammarule := {gamma(~z)//(~c*gamma(~zz)) => gamma(z)/(c*gamma(zz-1)*zz) when fixp(zz -z) and (zz -z) >0, gamma(~z)//gamma(~zz) => gamma(z)/(gamma(zz-1)*zz) when fixp(zz -z) and (zz -z) >0}; let gammarule; gamma(z)/gamma(z+3); 1 ---------------------- 3 2 z + 6*z + 11*z + 6 \end{verbatim}} The above example suffers from the fact that two rules had to be written in order to perform the required operation. This can be simplified by the use of {\bf double tilde variables}. E.g. the rule list {\small\begin{verbatim} GGrule := { gamma(~z)//(~~c*gamma(~zz)) => gamma(z)/(c*gamma(zz-1)*zz) when fixp(zz -z) and (zz -z) >0}; \end{verbatim}} will implement the same operation in a much more compact way. In general, double tilde variables are bound to the neutral element with respect to the operation in which they are used. \begin{tabular}{lll} Pattern given & Argument used & Binding \\ \\ \symbol{126}z + \symbol{126}\symbol{126}y & x & z=x; y=0 \\ \symbol{126}z + \symbol{126}\symbol{126}y & x+3 & z=x; y=3 or z=3; y=x \\ \\ \symbol{126}z * \symbol{126}\symbol{126}y & x & z=x; y=1\\ \symbol{126}z * \symbol{126}\symbol{126}y & x*3 & z=x; y=3 or z=3; y=x\\ \\ \symbol{126}z / \symbol{126}\symbol{126}y & x & z=x; y=1\\ \symbol{126}z / \symbol{126}\symbol{126}y & x/3 & z=x; y=3 \\ \\ \end{tabular} Remarks: A double tilde variable as the numerator of a pattern is not allowed. Also, using double tilde variables may lead to recursion errors when the zero case is not handled properly. {\small\begin{verbatim} let f(~~a * ~x,x) => a * f(x,x) when freeof (a,x); f(z,z); ***** f(z,z) improperly defined in terms of itself % BUT: let ff(~~a * ~x,x) => a * ff(x,x) when freeof (a,x) and a neq 1; ff(z,z); ff(z,z) ff(3*z,z); 3*ff(z,z) \end{verbatim}} \subsection*{Displaying Rules Associated with an Operator} The operator {\tt SHOWRULES}\ttindex{SHOWRULES} takes a single identifier as argument, and returns in rule-list form the operator rules associated with that argument. For example: {\small\begin{verbatim} showrules log; {LOG(E) => 1, LOG(1) => 0, ~X LOG(E ) => ~X, 1 DF(LOG(~X),~X) => ----} ~X \end{verbatim}} Such rules can then be manipulated further as with any list. For example {\tt rhs first ws;} has the value {\tt 1}. Note that an operator may have other properties that cannot be displayed in such a form, such as the fact it is an odd function, or has a definition defined as a procedure. \subsection*{Order of Application of Rules} If rules have overlapping domains, their order of application is important. In general, it is very difficult to specify this order precisely, so that it is best to assume that the order is arbitrary. However, if only one operator is involved, the order of application of the rules for this operator can be determined from the following: \begin{enumerate} \item Rules containing at least one free variable apply before all rules without free variables. \item Rules activated in the most recent {\tt LET} command are applied first. \item {\tt LET} with several entries generate the same order of application as a corresponding sequence of commands with one rule or rule set each. \item Within a rule set, the rules containing at least one free variable are applied in their given order. In other words, the first member of the list is applied first. \item Consistent with the first item, any rule in a rule list that contains no free variables is applied after all rules containing free variables. \end{enumerate} {\it Example:} The following rule set enables the computation of exact values of the Gamma function: {\small\begin{verbatim} operator gamma,gamma_error; gamma_rules := {gamma(~x)=>sqrt(pi)/2 when x=1/2, gamma(~n)=>factorial(n-1) when fixp n and n>0, gamma(~n)=>gamma_error(n) when fixp n, gamma(~x)=>(x-1)*gamma(x-1) when fixp(2*x) and x>1, gamma(~x)=>gamma(x+1)/x when fixp(2*x)}; \end{verbatim}} Here, rule by rule, cases of known or definitely uncomputable values are sorted out; e.g. the rule leading to the error expression will be applied for negative integers only, since the positive integers are caught by the preceding rule, and the last rule will apply for negative odd multiples of $1/2$ only. Alternatively the first rule could have been written as {\small\begin{verbatim} gamma(1/2) => sqrt(pi)/2, \end{verbatim}} but then the case $x=1/2$ should be excluded in the {\tt WHEN} part of the last rule explicitly because a rule without free variables cannot take precedence over the other rules. \section{Asymptotic Commands} \index{Asymptotic command} \label{sec-asymp} In expansions of polynomials involving variables that are known to be small, it is often desirable to throw away all powers of these variables beyond a certain point to avoid unnecessary computation. The command {\tt LET} may be used to do this. For example, if only powers of {\tt X} up to {\tt x\verb|^|7} are needed, the command {\small\begin{verbatim} let x^8 = 0; \end{verbatim}} will cause the system to delete all powers of {\tt X} higher than 7. {\it CAUTION:} This particular simplification works differently from most substitution mechanisms in {\REDUCE} in that it is applied during polynomial manipulation rather than to the whole evaluated expression. Thus, with the above rule in effect, {\tt x\verb|^|10/x\verb|^|5} would give the result zero, since the numerator would simplify to zero. Similarly {\tt x\verb|^|20/x\verb|^|10} would give a {\tt Zero divisor} error message, since both numerator and denominator would first simplify to zero. The method just described is not adequate when expressions involve several variables having different degrees of smallness. In this case, it is necessary to supply an asymptotic weight to each variable and count up the total weight of each product in an expanded expression before deciding whether to keep the term or not. There are two associated commands in the system to permit this type of asymptotic constraint. The command {\tt WEIGHT} \ttindex{WEIGHT} takes a list of equations of the form {\small\begin{verbatim} <kernel form> = <number> \end{verbatim}} where {\tt <number>} must be a positive integer (not just evaluate to a positive integer). This command assigns the weight {\tt <number>} to the relevant kernel form. A check is then made in all algebraic evaluations to see if the total weight of the term is greater than the weight level assigned to the calculation. If it is, the term is deleted. To compute the total weight of a product, the individual weights of each kernel form are multiplied by their corresponding powers and then added. The weight level of the system is initially set to 1. The user may change this setting by the command\ttindex{WTLEVEL} {\small\begin{verbatim} wtlevel <number>; \end{verbatim}} which sets {\tt <number>} as the new weight level of the system. {\tt <number>} must evaluate to a positive integer. WTLEVEL will also allow NIL as an argument, in which case the current weight level is returned. \chapter{File Handling Commands}\index{File handling} In many applications, it is desirable to load previously prepared {\REDUCE} files into the system, or to write output on other files. {\REDUCE} offers four commands for this purpose, namely, {\tt IN}, {\tt OUT}, {\tt SHUT}, {\tt LOAD}, and {\tt LOAD\_PACKAGE}. The first\ttindex{IN}\ttindex{OUT} \ttindex{SHUT} three operators are described here; {\tt LOAD} and {\tt LOAD\_PACKAGE} are discussed in Section~\ref{sec-load}. \section{IN Command}\ttindex{IN} This command takes a list of file names as argument and directs the system to input\index{Input} each file (that should contain {\REDUCE} statements and commands) into the system. File names can either be an identifier or a string. The explicit format of these will be system dependent and, in many cases, site dependent. The explicit instructions for the implementation being used should therefore be consulted for further details. For example: {\small\begin{verbatim} in f1,"ggg.rr.s"; \end{verbatim}} will first load file {\tt F1}, then {\tt ggg.rr.s}. When a semicolon is used as the terminator of the IN statement, the statements in the file are echoed on the terminal or written on the current output file. If \$ \index{Command terminator} is used as the terminator, the input is not shown. Echoing of all or part of the input file can be prevented, even if a semicolon was used, by placing an {\tt off echo;}\ttindex{ECHO} command in the input file. Files to be read using {\tt IN} should end with {\tt ;END;}. Note the two semicolons! First of all, this is protection against obscure difficulties the user will have if there are, by mistake, more {\tt BEGIN}s than {\tt END}s on the file. Secondly, it triggers some file control book-keeping which may improve system efficiency. If {\tt END} is omitted, an error message {\tt "End-of-file read"} will occur. \section{OUT Command}\ttindex{OUT} This command takes a single file name as argument, and directs output to that file from then on, until another {\tt OUT} changes the output file, or {\tt SHUT} closes it. Output can go to only one file at a time, although many can be open. If the file has previously been used for output during the current job, and not {\tt SHUT},\ttindex{SHUT} the new output is appended to the end of the file. Any existing file is erased before its first use for output in a job, or if it had been {\tt SHUT} before the new {\tt OUT}. To output on the terminal without closing the output file, the reserved file name T (for terminal) may be used. For example, {\tt out ofile;} will direct output to the file {\tt OFILE} and {\tt out t;} will direct output to the user's terminal. The output sent to the file will be in the same form that it would have on the terminal. In particular {\tt x\verb|^|2} would appear on two lines, an {\tt X} on the lower line and a 2 on the line above. If the purpose of the output file is to save results to be read in later, this is not an appropriate form. We first must turn off the {\tt NAT} switch that specifies that output should be in standard mathematical notation. {\it Example:} To create a file {\tt ABCD} from which it will be possible to read -- using {\tt IN} -- the value of the expression {\tt XYZ}: {\small\begin{verbatim} off echo$ % needed if your input is from a file. off nat$ % output in IN-readable form. Each expression % printed will end with a $ . out abcd$ % output to new file linelength 72$ % for systems with fixed input line length. xyz:=xyz; % will output "XYZ := " followed by the value % of XYZ write ";end"$ % standard for ending files for IN shut abcd$ % save ABCD, return to terminal output on nat$ % restore usual output form \end{verbatim}} \section{SHUT Command}\ttindex{SHUT} This command takes a list of names of files that have been previously opened via an {\tt OUT} statement and closes them. Most systems require this action by the user before he ends the {\REDUCE} job (if not sooner), otherwise the output may be lost. If a file is shut and a further {\tt OUT} command issued for the same file, the file is erased before the new output is written. If it is the current output file that is shut, output will switch to the terminal. Attempts to shut files that have not been opened by {\tt OUT}, or an input file, will lead to errors. \chapter{Commands for Interactive Use}\index{Interactive use} {\REDUCE} is designed as an interactive system, but naturally it can also operate in a batch processing or background mode by taking its input command by command from the relevant input stream. There is a basic difference, however, between interactive and batch use of the system. In the former case, whenever the system discovers an ambiguity at some point in a calculation, such as a forgotten type assignment for instance, it asks the user for the correct interpretation. In batch operation, it is not practical to terminate the calculation at such points and require resubmission of the job, so the system makes the most obvious guess of the user's intentions and continues the calculation. There is also a difference in the handling of errors. In the former case, the computation can continue since the user has the opportunity to correct the mistake. In batch mode, the error may lead to consequent erroneous (and possibly time consuming) computations. So in the default case, no further evaluation occurs, although the remainder of the input is checked for syntax errors. A message {\tt "Continuing with parsing only"} informs the user that this is happening. On the other hand, the switch {\tt ERRCONT},\ttindex{ERRCONT} if on, will cause the system to continue evaluating expressions after such errors occur. When a syntactical error occurs, the place where the system detected the error is marked with three dollar signs (\$\$\$). In interactive mode, the user can then use {\tt ED}\ttindex{ED} to correct the error, or retype the command. When a non-syntactical error occurs in interactive mode, the command being evaluated at the time the last error occurred is saved, and may later be reevaluated by the command {\tt RETRY}.\ttindex{RETRY} \section{Referencing Previous Results} It is often useful to be able to reference results of previous computations during a {\REDUCE} session. For this purpose, {\REDUCE} maintains a history\index{History} of all interactive inputs and the results of all interactive computations during a given session. These results are referenced by the command number that {\REDUCE} prints automatically in interactive mode. To use an input expression in a new computation, one writes {\tt input(}$n${\tt )},\ttindex{INPUT} where $n$ is the command number. To use an output expression, one writes {\tt WS(}$n${\tt )}.\ttindex{WS} {\tt WS} references the previous command. E.g., if command number 1 was {\tt INT(X-1,X)}; and the result of command number 7 was {\tt X-1}, then {\small\begin{verbatim} 2*input(1)-ws(7)^2; \end{verbatim}} would give the result {\tt -1}, whereas {\small\begin{verbatim} 2*ws(1)-ws(7)^2; \end{verbatim}} would yield the same result, but {\em without\/} a recomputation of the integral. The operator {\tt DISPLAY}\ttindex{DISPLAY} is available to display previous inputs. If its argument is a positive integer, {\it n} say, then the previous n inputs are displayed. If its argument is {\tt ALL} (or in fact any non-numerical expression), then all previous inputs are displayed. \section{Interactive Editing} It is possible when working interactively to edit any {\REDUCE} input that comes from the user's terminal, and also some user-defined procedure definitions. At the top level, one can access any previous command string by the command {\tt ed(}$n${\tt )},\ttindex{ED} where n is the desired command number as prompted by the system in interactive mode. {\tt ED}; (i.e. no argument) accesses the previous command. After {\tt ED} has been called, you can now edit the displayed string using a string editor with the following commands: \begin{tabular}{lp{\rboxwidth}} {\tt~~~~~ B} & move pointer to beginning \\ {\tt~~~~~ C<character>} & replace next character by {\em character} \\ {\tt~~~~~ D} & delete next character \\ {\tt~~~~~ E} & end editing and reread text \\ {\tt~~~~~ F<character>} & move pointer to next occurrence of {\em character} \\[1.7pt] {\tt~~~~~ I<string><escape>} & insert {\em string\/} in front of pointer \\ {\tt~~~~~ K<character>} & delete all characters until {\em character} \\ {\tt~~~~~ P} & print string from current pointer \\ {\tt~~~~~ Q} & give up with error exit \\ {\tt~~~~~ S<string><escape>} & search for first occurrence of {\em string}, positioning pointer just before it \\ {\tt~~~~~ space} or {\tt X} & move pointer right one character. \end{tabular} The above table can be displayed online by typing a question mark followed by a carriage return to the editor. The editor prompts with an angle bracket. Commands can be combined on a single line, and all command sequences must be followed by a carriage return to become effective. Thus, to change the command {\tt x := a+1;} to {\tt x := a+2}; and cause it to be executed, the following edit command sequence could be used: {\small\begin{verbatim} f1c2e<return>. \end{verbatim}} The interactive editor may also be used to edit a user-defined procedure that has not been compiled. To do this, one says: \ttindex{EDITDEF} {\small\begin{verbatim} editdef <id>; \end{verbatim}} where {\tt <id>} is the name of the procedure. The procedure definition will then be displayed in editing mode, and may then be edited and redefined on exiting from the editor. Some versions of {\REDUCE} now include input editing that uses the capabilities of modern window systems. Please consult your system dependent documentation to see if this is possible. Such editing techniques are usually much easier to use then {\tt ED} or {\tt EDITDEF}. \section{Interactive File Control} If input is coming from an external file, the system treats it as a batch processed calculation. If the user desires interactive \index{Interactive use} response in this case, he can include the command {\tt on int};\ttindex{INT} in the file. Likewise, he can issue the command {\tt off int}; in the main program if he does not desire continual questioning from the system. Regardless of the setting of {\tt INT}, input commands from a file are not kept in the system, and so cannot be edited using {\tt ED}. However, many implementations of {\REDUCE} provide a link to an external system editor that can be used for such editing. The specific instructions for the particular implementation should be consulted for information on this. Two commands are available in {\REDUCE} for interactive use of files. {\tt PAUSE};\ttindex{PAUSE} may be inserted at any point in an input file. When this command is encountered on input, the system prints the message {\tt CONT?} on the user's terminal and halts. If the user responds {\tt Y} (for yes), the calculation continues from that point in the file. If the user responds {\tt N} (for no), control is returned to the terminal, and the user can input further statements and commands. Later on he can use the command {\tt cont;}\ttindex{CONT} to transfer control back to the point in the file following the last {\tt PAUSE} encountered. A top-level {\tt pause;}\ttindex{PAUSE} from the user's terminal has no effect. \chapter{Matrix Calculations} \index{Matrix calculations} A very powerful feature of {\REDUCE} is the ease with which matrix calculations can be performed. To extend our syntax to this class of calculations we need to add another prefix operator, {\tt MAT}, \ttindex{MAT} and a further variable and expression type as follows: \section{MAT Operator}\ttindex{MAT} This prefix operator is used to represent $n\times m$ matrices. {\tt MAT} has {\em n} arguments interpreted as rows of the matrix, each of which is a list of {\em m} expressions representing elements in that row. For example, the matrix \[ \left( \begin{array}{lcr} a & b & c \\ d & e & f \end{array} \right) \] would be written as {\tt mat((a,b,c),(d,e,f))}. Note that the single column matrix \[ \left( \begin{array}{c} x \\ y \end{array} \right) \] becomes {\tt mat((x),(y))}. The inside parentheses are required to distinguish it from the single row matrix \[ \left( \begin{array}{lr} x & y \end{array} \right) \] that would be written as {\tt mat((x,y))}. \section{Matrix Variables} An identifier may be declared a matrix variable by the declaration {\tt MATRIX}.\ttindex{MATRIX} The size of the matrix may be declared explicitly in the matrix declaration, or by default in assigning such a variable to a matrix expression. For example, {\small\begin{verbatim} matrix x(2,1),y(3,4),z; \end{verbatim}} declares {\tt X} to be a 2 x 1 (column) matrix, {\tt Y} to be a 3 x 4 matrix and {\tt Z} a matrix whose size is to be declared later. Matrix declarations can appear anywhere in a program. Once a symbol is declared to name a matrix, it can not also be used to name an array, operator or a procedure, or used as an ordinary variable. It can however be redeclared to be a matrix, and its size may be changed at that time. Note however that matrices once declared are {\em global\/} in scope, and so can then be referenced anywhere in the program. In other words, a declaration within a block (or a procedure) does not limit the scope of the matrix to that block, nor does the matrix go away on exiting the block (use {\tt CLEAR} instead for this purpose). An element of a matrix is referred to in the expected manner; thus {\tt x(1,1)} gives the first element of the matrix {\tt X} defined above. References to elements of a matrix whose size has not yet been declared leads to an error. All elements of a matrix whose size is declared are initialized to 0. As a result, a matrix element has an {\em instant evaluation\/}\index{Instant evaluation} property and cannot stand for itself. If this is required, then an operator should be used to name the matrix elements as in: {\small\begin{verbatim} matrix m; operator x; m := mat((x(1,1),x(1,2)); \end{verbatim}} \section{Matrix Expressions} These follow the normal rules of matrix algebra as defined by the following syntax:\ttindex{MAT} {\small\begin{verbatim} <matrix expression> ::= MAT<matrix description>|<matrix variable>| <scalar expression>*<matrix expression>| <matrix expression>*<matrix expression> <matrix expression>+<matrix expression>| <matrix expression>^<integer>| <matrix expression>/<matrix expression> \end{verbatim}} Sums and products of matrix expressions must be of compatible size; otherwise an error will result during their evaluation. Similarly, only square matrices may be raised to a power. A negative power is computed as the inverse of the matrix raised to the corresponding positive power. {\tt a/b} is interpreted as {\tt a*b\verb|^|(-1)}. {\it Examples:} Assuming {\tt X} and {\tt Y} have been declared as matrices, the following are matrix expressions {\small\begin{verbatim} y y^2*x-3*y^(-2)*x y + mat((1,a),(b,c))/2 \end{verbatim}} The computation of the quotient of two matrices normally uses a two-step elimination method due to Bareiss. An alternative method using Cramer's method is also available. This is usually less efficient than the Bareiss method unless the matrices are large and dense, although we have no solid statistics on this as yet. To use Cramer's method instead, the switch {\tt CRAMER}\ttindex{CRAMER} should be turned on. \section{Operators with Matrix Arguments} The operator {\tt LENGTH}\ttindex{LENGTH} applied to a matrix returns a list of the number of rows and columns in the matrix. Other operators useful in matrix calculations are defined in the following subsections. Attention is also drawn to the LINALG \extendedmanual{(chapter~\ref{LINALG})} and NORMFORM \extendedmanual{(chapter~\ref{NORMFORM})} packages. \subsection{DET Operator}\ttindex{DET} Syntax: {\small\begin{verbatim} DET(EXPRN:matrix_expression):algebraic. \end{verbatim}} The operator {\tt DET} is used to represent the determinant of a square matrix expression. E.g., {\small\begin{verbatim} det(y^2) \end{verbatim}} is a scalar expression whose value is the determinant of the square of the matrix {\tt Y}, and {\small\begin{verbatim} det mat((a,b,c),(d,e,f),(g,h,j)); \end{verbatim}} is a scalar expression whose value is the determinant of the matrix \[ \left( \begin{array}{lcr} a & b & c \\ d & e & f \\ g & h & j \end{array} \right) \] Determinant expressions have the {\em instant evaluation\/} property. \index{Instant evaluation} In other words, the statement {\small\begin{verbatim} let det mat((a,b),(c,d)) = 2; \end{verbatim}} sets the {\em value\/} of the determinant to 2, and does not set up a rule for the determinant itself. \subsection{MATEIGEN Operator}\ttindex{MATEIGEN} Syntax: {\small\begin{verbatim} MATEIGEN(EXPRN:matrix_expression,ID):list. \end{verbatim}} {\tt MATEIGEN} calculates the eigenvalue equation and the corresponding eigenvectors of a matrix, using the variable {\tt ID} to denote the eigenvalue. A square free decomposition of the characteristic polynomial is carried out. The result is a list of lists of 3 elements, where the first element is a square free factor of the characteristic polynomial, the second its multiplicity and the third the corresponding eigenvector (as an {\em n} by 1 matrix). If the square free decomposition was successful, the product of the first elements in the lists is the minimal polynomial. In the case of degeneracy, several eigenvectors can exist for the same eigenvalue, which manifests itself in the appearance of more than one arbitrary variable in the eigenvector. To extract the various parts of the result use the operations defined on lists. {\it Example:} The command {\small\begin{verbatim} mateigen(mat((2,-1,1),(0,1,1),(-1,1,1)),eta); \end{verbatim}} gives the output {\small\begin{verbatim} {{ETA - 1,2, [ARBCOMPLEX(1)] [ ] [ARBCOMPLEX(1)] [ ] [ 0 ] }, {ETA - 2,1, [ 0 ] [ ] [ARBCOMPLEX(2)] [ ] [ARBCOMPLEX(2)] }} \end{verbatim}} \subsection{TP Operator}\ttindex{TP} Syntax: {\small\begin{verbatim} TP(EXPRN:matrix_expression):matrix. \end{verbatim}} This operator takes a single matrix argument and returns its transpose. \subsection{Trace Operator}\ttindex{TRACE} Syntax: {\small\begin{verbatim} TRACE(EXPRN:matrix_expression):algebraic. \end{verbatim}} The operator {\tt TRACE} is used to represent the trace of a square matrix. \subsection{Matrix Cofactors}\ttindex{COFACTOR} Syntax: {\small\begin{verbatim} COFACTOR(EXPRN:matrix_expression,ROW:integer,COLUMN:integer): algebraic \end{verbatim}} The operator {\tt COFACTOR} returns the cofactor of the element in row {\tt ROW} and column {\tt COLUMN} of the matrix {\tt MATRIX}. Errors occur if {\tt ROW} or {\tt COLUMN} do not simplify to integer expressions or if {\tt MATRIX} is not square. \subsection{NULLSPACE Operator}\ttindex{NULLSPACE} Syntax: {\small\begin{verbatim} NULLSPACE(EXPRN:matrix_expression):list \end{verbatim}} {\tt NULLSPACE} calculates for a matrix {\tt A} a list of linear independent vectors (a basis) whose linear combinations satisfy the equation $A x = 0$. The basis is provided in a form such that as many upper components as possible are isolated. Note that with {\tt b := nullspace a} the expression {\tt length b} is the {\em nullity\/} of A, and that {\tt second length a - length b} calculates the {\em rank\/} of A. The rank of a matrix expression can also be found more directly by the {\tt RANK} operator described below. {\it Example:} The command {\small\begin{verbatim} nullspace mat((1,2,3,4),(5,6,7,8)); \end{verbatim}} gives the output {\small\begin{verbatim} { [ 1 ] [ ] [ 0 ] [ ] [ - 3] [ ] [ 2 ] , [ 0 ] [ ] [ 1 ] [ ] [ - 2] [ ] [ 1 ] } \end{verbatim}} In addition to the {\REDUCE} matrix form, {\tt NULLSPACE} accepts as input a matrix given as a list of lists, that is interpreted as a row matrix. If that form of input is chosen, the vectors in the result will be represented by lists as well. This additional input syntax facilitates the use of {\tt NULLSPACE} in applications different from classical linear algebra. \subsection{RANK Operator}\ttindex{RANK} Syntax: {\small\begin{verbatim} RANK(EXPRN:matrix_expression):integer \end{verbatim}} {\tt RANK} calculates the rank of its argument, that, like {\tt NULLSPACE} can either be a standard matrix expression, or a list of lists, that can be interpreted either as a row matrix or a set of equations. {\tt Example:} {\small\begin{verbatim} rank mat((a,b,c),(d,e,f)); \end{verbatim}} returns the value 2. \section{Matrix Assignments} \index{Matrix assignment} Matrix expressions may appear in the right-hand side of assignment statements. If the left-hand side of the assignment, which must be a variable, has not already been declared a matrix, it is declared by default to the size of the right-hand side. The variable is then set to the value of the right-hand side. Such an assignment may be used very conveniently to find the solution of a set of linear equations. For example, to find the solution of the following set of equations {\small\begin{verbatim} a11*x(1) + a12*x(2) = y1 a21*x(1) + a22*x(2) = y2 \end{verbatim}} we simply write {\small\begin{verbatim} x := 1/mat((a11,a12),(a21,a22))*mat((y1),(y2)); \end{verbatim}} \section{Evaluating Matrix Elements} Once an element of a matrix has been assigned, it may be referred to in standard array element notation. Thus {\tt y(2,1)} refers to the element in the second row and first column of the matrix {\tt Y}. \chapter{Procedures}\ttindex{PROCEDURE} It is often useful to name a statement for repeated use in calculations with varying parameters, or to define a complete evaluation procedure for an operator. {\REDUCE} offers a procedural declaration for this purpose. Its general syntax is: {\small\begin{verbatim} [<procedural type>] PROCEDURE <name>[<varlist>];<statement>; \end{verbatim}} where {\small\begin{verbatim} <varlist> ::= (<variable>,...,<variable>) \end{verbatim}} This will be explained more fully in the following sections. In the algebraic mode of {\REDUCE} the {\tt <procedure type>} can be omitted, since the default is {\tt ALGEBRAIC}. Procedures of type {\tt INTEGER} or {\tt REAL} may also be used. In the former case, the system checks that the value of the procedure is an integer. At present, such checking is not done for a real procedure, although this will change in the future when a more complete type checking mechanism is installed. Users should therefore only use these types when appropriate. An empty variable list may also be omitted. All user-defined procedures are automatically declared to be operators. In order to allow users relatively easy access to the whole {\REDUCE} source program, system procedures are not protected against user redefinition. If a procedure is redefined, a message {\small\begin{verbatim} *** <procedure name> REDEFINED \end{verbatim}} is printed. If this occurs, and the user is not redefining his own procedure, he is well advised to rename it, and possibly start over (because he has {\em already\/} redefined some internal procedure whose correct functioning may be required for his job!) All required procedures should be defined at the top level, since they have global scope throughout a program. In particular, an attempt to define a procedure within a procedure will cause an error to occur. \section{Procedure Heading}\index{Procedure heading} Each procedure has a heading consisting of the word {\tt PROCEDURE} (optionally preceded by the word {\tt ALGEBRAIC}), followed by the name of the procedure to be defined, and followed by its formal parameters -- the symbols that will be used in the body of the definition to illustrate what is to be done. There are three cases: \begin{enumerate} \item No parameters. Simply follow the procedure name with a terminator (semicolon or dollar sign). {\small\begin{verbatim} procedure abc; \end{verbatim}} When such a procedure is used in an expression or command, {\tt abc()}, with empty parentheses, must be written. \item One parameter. Enclose it in parentheses {\em or\/} just leave at least one space, then follow with a terminator. {\small\begin{verbatim} procedure abc(x); \end{verbatim}} or {\small\begin{verbatim} procedure abc x; \end{verbatim}} \item More than one parameter. Enclose them in parentheses, separated by commas, then follow with a terminator. {\small\begin{verbatim} procedure abc(x,y,z); \end{verbatim}} \end{enumerate} Referring to the last example, if later in some expression being evaluated the symbols {\tt abc(u,p*q,123)} appear, the operations of the procedure body will be carried out as if {\tt X} had the same value as {\tt U} does, {\tt Y} the same value as {\tt p*q} does, and {\tt Z} the value 123. The values of {\tt X}, {\tt Y}, {\tt Z}, after the procedure body operations are completed are unchanged. So, normally, are the values of {\tt U}, {\tt P}, {\tt Q}, and (of course) 123. (This is technically referred to as call by value.)\index{Call by value} The reader will have noted the word {\em normally\/} a few lines earlier. The call by value protections can be bypassed if necessary, as described elsewhere. \section{Procedure Body}\index{Procedure body} Following the delimiter that ends the procedure heading must be a {\em single} statement defining the action to be performed or the value to be delivered. A terminator must follow the statement. If it is a semicolon, the name of the procedure just defined is printed. It is not printed if a dollar sign is used. If the result wanted is given by a formula of some kind, the body is just that formula, using the variables in the procedure heading. {\it Simple Example:} If {\tt f(x)} is to mean {\tt (x+5)*(x+6)/(x+7)}, the entire procedure definition could read {\small\begin{verbatim} procedure f x; (x+5)*(x+6)/(x+7); \end{verbatim}} Then {\tt f(10)} would evaluate to 240/17, {\tt f(a-6)} to {\tt A*(A-1)/(A+1)}, and so on. {\it More Complicated Example:} Suppose we need a function {\tt p(n,x)} that, for any positive integer {\tt N}, is the Legendre polynomial\index{Legendre polynomials} of order {\em n}. We can define this operator using the textbook formula defining these functions: \begin{displaymath} p_n(x) = \displaystyle{1\over{n!}}\ \displaystyle{d^n\over dy^n}\ \displaystyle{{1\over{(y^2 - 2xy + 1) ^{{1\over2}}}}}\Bigg\vert_{y=0} \end{displaymath} Put into words, the Legendre polynomial $p_n(x)$ is the result of substituting $y=0$ in the $n^{th}$ partial derivative with respect to $y$ of a certain fraction involving $x$ and $y$, then dividing that by $n!$. This verbal formula can easily be written in {\REDUCE}: {\small\begin{verbatim} procedure p(n,x); sub(y=0,df(1/(y^2-2*x*y+1)^(1/2),y,n)) /(for i:=1:n product i); \end{verbatim}} Having input this definition, the expression evaluation {\small\begin{verbatim} 2p(2,w); \end{verbatim}} would result in the output {\small\begin{verbatim} 2 3*W - 1 . \end{verbatim}} If the desired process is best described as a series of steps, then a group or compound statement can be used. \extendedmanual{\newpage} {\it Example:} The above Legendre polynomial example can be rewritten as a series of steps instead of a single formula as follows: {\small\begin{verbatim} procedure p(n,x); begin scalar seed,deriv,top,fact; seed:=1/(y^2 - 2*x*y +1)^(1/2); deriv:=df(seed,y,n); top:=sub(y=0,deriv); fact:=for i:=1:n product i; return top/fact end; \end{verbatim}} Procedures may also be defined recursively. In other words, the procedure body\index{Procedure body} can include references to the procedure name itself, or to other procedures that themselves reference the given procedure. As an example, we can define the Legendre polynomial through its standard recurrence relation: {\small\begin{verbatim} procedure p(n,x); if n<0 then rederr "Invalid argument to P(N,X)" else if n=0 then 1 else if n=1 then x else ((2*n-1)*x*p(n-1,x)-(n-1)*p(n-2,x))/n; \end{verbatim}} The operator {\tt REDERR}\ttindex{REDERR} in the above example provides for a simple error exit from an algebraic procedure (and also a block). It can take a string as argument. It should be noted however that all the above definitions of {\tt p(n,x)} are quite inefficient if extensive use is to be made of such polynomials, since each call effectively recomputes all lower order polynomials. It would be better to store these expressions in an array, and then use say the recurrence relation to compute only those polynomials that have not already been derived. We leave it as an exercise for the reader to write such a definition. \section{Using LET Inside Procedures} By using {\tt LET}\ttindex{LET} instead of an assignment in the procedure body\index{Procedure body} it is possible to bypass the call-by-value \index{Call by value} protection. If {\tt X} is a formal parameter or local variable of the procedure (i.e. is in the heading or in a local declaration), and {\tt LET} is used instead of {\tt :=} to make an assignment to {\tt X}, e.g. {\small\begin{verbatim} let x = 123; \end{verbatim}} then it is the variable that is the value of {\tt X} that is changed. This effect also occurs with local variables defined in a block. If the value of {\tt X} is not a variable, but a more general expression, then it is that expression that is used on the left-hand side of the {\tt LET} statement. For example, if {\tt X} had the value {\tt p*q}, it is as if {\tt let p*q = 123} had been executed. \section{LET Rules as Procedures} The {\tt LET}\ttindex{LET} statement offers an alternative syntax and semantics for procedure definition. In place of {\small\begin{verbatim} procedure abc(x,y,z); <procedure body>; \end{verbatim}} one can write {\small\begin{verbatim} for all x,y,z let abc(x,y,z) = <procedure body>; \end{verbatim}} There are several differences to note. If the procedure body contains an assignment to one of the formal parameters, e.g. {\small\begin{verbatim} x := 123; \end{verbatim}} in the {\tt PROCEDURE} case it is a variable holding a copy of the first actual argument that is changed. The actual argument is not changed. In the {\tt LET} case, the actual argument is changed. Thus, if {\tt ABC} is defined using {\tt LET}, and {\tt abc(u,v,w)} is evaluated, the value of {\tt U} changes to 123. That is, the {\tt LET} form of definition allows the user to bypass the protections that are enforced by the call by value conventions of standard {\tt PROCEDURE} definitions. {\it Example:} We take our earlier {\tt FACTORIAL}\ttindex{FACTORIAL} procedure and write it as a {\tt LET} statement. {\small\begin{verbatim} for all n let factorial n = begin scalar m,s; m:=1; s:=n; l1: if s=0 then return m; m:=m*s; s:=s-1; go to l1 end; \end{verbatim}} The reader will notice that we introduced a new local variable, {\tt S}, and set it equal to {\tt N}. The original form of the procedure contained the statement {\tt n:=n-1;}. If the user asked for the value of {\tt factorial(5)} then {\tt N} would correspond to, not just have the value of, 5, and {\REDUCE} would object to trying to execute the statement 5 := $5-1$. If {\tt PQR} is a procedure with no parameters, {\small\begin{verbatim} procedure pqr; <procedure body>; \end{verbatim}} it can be written as a {\tt LET} statement quite simply: {\small\begin{verbatim} let pqr = <procedure body>; \end{verbatim}} To call {\em procedure\/} {\tt PQR}, if defined in the latter form, the empty parentheses would not be used: use {\tt PQR} not {\tt PQR()} where a call on the procedure is needed. The two notations for a procedure with no arguments can be combined. {\tt PQR} can be defined in the standard {\tt PROCEDURE} form. Then a {\tt LET} statement {\small\begin{verbatim} let pqr = pqr(); \end{verbatim}} would allow a user to use {\tt PQR} instead of {\tt PQR()} in calling the procedure. A feature available with {\tt LET}-defined procedures and not with procedures defined in the standard way is the possibility of defining partial functions.\index{Function} {\small\begin{verbatim} for all x such that numberp x let uvw(x)=<procedure body>; \end{verbatim}} Now {\tt UVW} of an integer would be calculated as prescribed by the procedure body, while {\tt UVW} of a general argument, such as {\tt Z} or {\tt p+q} (assuming these evaluate to themselves) would simply stay {\tt uvw(z)} or {\tt uvw(p+q)} as the case may be. \section{REMEMBER Statement}\ttindex{REMEMBER} Setting the remember option for an algebraic procedure by {\small\begin{verbatim} REMEMBER (PROCNAME:procedure); \end{verbatim}} saves all intermediate results of such procedure evaluations, including recursive calls. Subsequent calls to the procedure can then be determined from the saved results, and thus the number of evaluations (or the complexity) can be reduced. This mode of evalation costs extra memory, of course. In addition, the procedure must be free of side--effects. The following examples show the effect of the remember statement on two well--known examples. \begin{samepage} {\small\begin{verbatim} procedure H(n); % Hofstadter's function if numberp n then << cnn := cnn +1; % counts the calls if n < 3 then 1 else H(n-H(n-1))+H(n-H(n-2))>>; remember h; > << cnn := 0; H(100); cnn>>; 100 % H has been called 100 times only. procedure A(m,n); % Ackermann function if m=0 then n+1 else if n=0 then A(m-1,1) else A(m-1,A(m,n-1)); remember a; A(3,3); \end{verbatim}} \end{samepage} \chapter{User Contributed Packages} \index{User packages} \label{chap-user} The complete {\REDUCE} system includes a number of packages contributed by users that are provided as a service to the user community. Questions regarding these packages should be directed to their individual authors. All such packages have been precompiled as part of the installation process. However, many must be specifically loaded before they can be used. (Those that are loaded automatically are so noted in their description.) You should also consult the user notes for your particular implementation for further information on whether this is necessary. If it is, the relevant command is {\tt LOAD\_PACKAGE},\ttindex{LOAD\_PACKAGE} which takes a list of one or more package names as argument, for example: {\small\begin{verbatim} load_package algint; \end{verbatim}} although this syntax may vary from implementation to implementation. Nearly all these packages come with separate documentation and test files (except those noted here that have no additional documentation), which is included, along with the source of the package, in the {\REDUCE} system distribution. These items should be studied for any additional details on the use of a particular package. Part 2 of this manual contains short documentation for the packages \begin{itemize} %% %%The packages available in the current release of {\REDUCE} are as follows: %% \item {ALGINT: Integration of square roots} (chapter~\ref{ALGINT});\ttindex{ALGINT} %% %%This package, which is an extension of the basic integration package %%distributed with {\REDUCE}, will analytically integrate a wide range of %%expressions involving square roots where the answer exists in that class %%of functions. It is an implementation of the work described in J.H. %%Davenport, ``On the Integration of Algebraic Functions", LNCS 102, %%Springer Verlag, 1981. Both this and the source code should be consulted %%for a more detailed description of this work. %% %%Once the {\tt ALGINT} package has been loaded, using {\tt LOAD\_PACKAGE}, %%one enters an expression for integration, as with the regular integrator, %%for example: %%{\small\begin{verbatim} %% int(sqrt(x+sqrt(x**2+1))/x,x); %%\end{verbatim}} %%If one later wishes to integrate expressions without using the facilities of %%this package, the switch {\tt ALGINT}\ttindex{ALGINT} should be turned %%off. This is turned on automatically when the package is loaded. %% %%The switches supported by the standard integrator (e.g., {\tt TRINT}) %%\ttindex{TRINT} are also supported by this package. In addition, the %%switch {\tt TRA},\ttindex{TRA} if on, will give further tracing %%information about the specific functioning of the algebraic integrator. %% %%There is no additional documentation for this package. %% %%Author: James H. Davenport. %% \item {APPLYSYM: Infinitesimal symmetries of differential equations} (chapter~\ref{APPLYSYM});\ttindex{APPLYSYM} %%\ttindex{APPLYSYM} %% %%This package provides programs APPLYSYM, QUASILINPDE and DETRAFO for %%computing with infinitesimal symmetries of differential equations. %% %%Author: Thomas Wolf. %% \item {ARNUM: An algebraic number package} (chapter~\ref{ARNUM});\ttindex{ARNUM} %% %%This package provides facilities for handling algebraic numbers as %%polynomial coefficients in {\REDUCE} calculations. It includes facilities for %%introducing indeterminates to represent algebraic numbers, for calculating %%splitting fields, and for factoring and finding greatest common divisors %%in such domains. %% %%Author: Eberhard Schr\"ufer. %% \item {ASSIST: Useful utilities for various applications} (chapter~\ref{ASSIST});\ttindex{ASSIST} %% %%ASSIST contains a large number of additional general purpose functions %%that allow a user to better adapt \REDUCE\ to various calculational %%strategies and to make the programming task more straightforward and more %%efficient. %% %%Author: Hubert Caprasse. %% \item {AVECTOR: A vector algebra and calculus package} (chapter~\ref{AVECTOR});\ttindex{AVECTOR} %% %%This package provides REDUCE with the ability to perform vector algebra %%using the same notation as scalar algebra. The basic algebraic operations %%are supported, as are differentiation and integration of vectors with %%respect to scalar variables, cross product and dot product, component %%manipulation and application of scalar functions (e.g. cosine) to a vector %%to yield a vector result. %% %%Author: David Harper. %% \item {BOOLEAN: A package for boolean algebra} (chapter~\ref{BOOLEAN}); \ttindex{BOOLEAN} %% %%This package supports the computation with boolean expressions in the %%propositional calculus. The data objects are composed from algebraic %%expressions connected by the infix boolean operators {\bf and}, {\bf or}, %%{\bf implies}, {\bf equiv}, and the unary prefix operator {\bf not}. %%{\bf Boolean} allows you to simplify expressions built from these %%operators, and to test properties like equivalence, subset property etc. %% %%Author: Herbert Melenk. %% \item {CALI: A package for computational commutative algebra} (chapter~\ref{CALI});\ttindex{CALI} %%\ttindex{CALI} %% %%This package contains algorithms for computations in commutative algebra %%closely related to the Gr\"obner algorithm for ideals and modules. Its %%heart is a new implementation of the Gr\"obner algorithm that also allows %%for the computation of syzygies. This implementation is also applicable to %%submodules of free modules with generators represented as rows of a matrix. %% %%Author: Hans-Gert Gr\"abe. %% \item {CAMAL: Calculations in celestial mechanics} (chapter~\ref{CAMAL}); \ttindex{CAMAL} %% %%This packages implements in REDUCE the Fourier transform procedures of the %%CAMAL package for celestial mechanics. %% %%Author: John P. Fitch. %% \item {CHANGEVR: Change of Independent Variable(s) in DEs} (chapter~\ref{CHANGEVR});\ttindex{CHANGEVR} %% %%This package provides facilities for changing the independent variables in %%a differential equation. It is basically the application of the chain rule. %% %%Author: G. \"{U}\c{c}oluk. %% \item {COMPACT: Package for compacting expressions} (chapter~\ref{COMPACT}); \ttindex{COMPACT} %% %%COMPACT is a package of functions for the reduction of a polynomial in the %%presence of side relations. COMPACT applies the side relations to the %%polynomial so that an equivalent expression results with as few terms as %%possible. For example, the evaluation of %%{\small\begin{verbatim} %% compact(s*(1-sin x^2)+c*(1-cos x^2)+sin x^2+cos x^2, %% {cos x^2+sin x^2=1}); %%\end{verbatim}} %%yields the result\pagebreak[1] %%\begin{samepage} %%{\small\begin{verbatim} %% 2 2 %% SIN(X) *C + COS(X) *S + 1 . %%\end{verbatim}} %% %%Author: Anthony C. Hearn. %%\end{samepage} %% \item {CONTFR: Approximation of a number by continued fractions} (chapter~\ref{CONTFR});\ttindex{CONTFR} %% %%This package provides for the simultaneous approximation of a real number %%by a continued fraction and a rational number with optional user %%controlled precision (upper bound for numerator). %% %%To use this package, the {\bf misc} package should be loaded. One can then %%use the operator\ttindex{continued\_fraction} to calculate the required %%sequence. For example: %%{\small\begin{verbatim} %% %% continued_fraction pi; -> %% %% 1146408 %% {---------,{3,7,15,1,292,1,1,1,2,1}} %% 364913 %%\end{verbatim}} %% %%There is no further documentation for this package. %% %%Author: Herbert Melenk. %% \item {CRACK: Solving overdetermined systems of PDEs or ODEs} (chapter~\ref{CRACK});\ttindex{CRACK} %% %%CRACK is a package for solving overdetermined systems of partial or %%ordinary differential equations (PDEs, ODEs). Examples of programs which %%make use of CRACK for investigating ODEs (finding symmetries, first %%integrals, an equivalent Lagrangian or a ``differential factorization'') are %%included. %% %%Authors: Andreas Brand, Thomas Wolf. %% \item {CVIT: Fast calculation of Dirac gamma matrix traces} (chapter~\ref{CVIT});\ttindex{CVIT} %% %%This package provides an alternative method for computing traces of Dirac %%gamma matrices, based on an algorithm by Cvitanovich that treats gamma %%matrices as 3-j symbols. %% %%Authors: V.Ilyin, A.Kryukov, A.Rodionov, A.Taranov. %% \item {DEFINT: A definite integration interface for REDUCE} (chapter~\ref{DEFINT});\ttindex{DEFINT} %% %%This package finds the definite integral of an expression in a stated %%interval. It uses several techniques, including an innovative approach %%based on the Meijer G-function, and contour integration. %% %%Authors: Kerry Gaskell, Stanley M. Kameny, Winfried Neun. %% \item {DESIR: Differential linear homogeneous equation solutions in the neighborhood of irregular and regular singular points} (chapter~\ref{DESIR});\ttindex{DESIR} %% %%This package enables the basis of formal solutions to be computed for an %%ordinary homogeneous differential equation with polynomial coefficients %%over Q of any order, in the neighborhood of zero (regular or irregular %%singular point, or ordinary point). %% %%Documentation for this package is in plain text. %% %%Authors: C. Dicrescenzo, F. Richard-Jung, E. Tournier. %% \item {DFPART: Derivatives of generic functions} (chapter~\ref{DFPART});\ttindex{DFPART} %% %%This package supports computations with total and partial derivatives of %%formal function objects. Such computations can be useful in the context %%of differential equations or power series expansions. %% %%Author: Herbert Melenk. %% \item {DUMMY: Canonical form of expressions with dummy variables} (chapter~\ref{DUMMY});\ttindex{DUMMY} %% %%This package allows a user to find the canonical form of expressions %%involving dummy variables. In that way, the simplification of %%polynomial expressions can be fully done. The indeterminates are general %%operator objects endowed with as few properties as possible. In that way %%the package may be used in a large spectrum of applications. %% %%Author: Alain Dresse. %% \item {EXCALC: A differential geometry package} (chapter~\ref{EXCALC}); \ttindex{EXCALC} %% %%EXCALC is designed for easy use by all who are familiar with the calculus %%of Modern Differential Geometry. The program is currently able to handle %%scalar-valued exterior forms, vectors and operations between them, as well %%as non-scalar valued forms (indexed forms). It is thus an ideal tool for %%studying differential equations, doing calculations in general relativity %%and field theories, or doing simple things such as calculating the %%Laplacian of a tensor field for an arbitrary given frame. %% %%Author: Eberhard Schr\"ufer. %% \item {FPS: Automatic calculation of formal power series} (chapter~\ref{FPS});\ttindex{FPS} %% %%This package can expand a specific class of functions into their %%corresponding Laurent-Puiseux series. %% %%Authors: Wolfram Koepf and Winfried Neun. %% \item {FIDE: Finite difference method for partial differential equations} (chapter~\ref{FIDE});\ttindex{FIDE} %% %%This package performs automation of the process of numerically %%solving partial differential equations systems (PDES) by means of %%computer algebra. For PDES solving, the finite difference method is applied. %%The computer algebra system REDUCE and the numerical programming %%language FORTRAN are used in the presented methodology. The main aim of %%this methodology is to speed up the process of preparing numerical %%programs for solving PDES. This process is quite often, especially for %%complicated systems, a tedious and time consuming task. %% %%Documentation for this package is in plain text. %% %%Author: Richard Liska. %% \item {GENTRAN: A code generation package} (chapter~\ref{GENTRAN}); \ttindex{GENTRAN} %% %%GENTRAN is an automatic code GENerator and TRANslator. It constructs %%complete numerical programs based on sets of algorithmic specifications %%and symbolic expressions. Formatted FORTRAN, RATFOR, PASCAL or C code can be %%generated through a series of interactive commands or under the control of %%a template processing routine. Large expressions can be automatically %%segmented into subexpressions of manageable size, and a special %%file-handling mechanism maintains stacks of open I/O channels to allow %%output to be sent to any number of files simultaneously and to facilitate %%recursive invocation of the whole code generation process. %% %%Author: Barbara L. Gates. %% \item {GNUPLOT: Display of functions and surfaces} (chapter~\ref{GNUPLOT});\ttindex{PLOT}\ttindex{GNUPLOT} %% %%This package is an interface to the popular GNUPLOT package. %%It allows you to display functions in 2D and surfaces in 3D %%on a variety of output devices including X terminals, PC monitors, and %%postscript and Latex printer files. %% %%NOTE: The GNUPLOT package may not be included in all versions of REDUCE. %% %%Author: Herbert Melenk. %% \item {GROEBNER: A Gr\"obner basis package} (chapter~\ref{GROEBNER}); \ttindex{GROEBNER} %% %%GROEBNER\ttindex{GROEBNER} is a package for the computation of Gr\"obner %%Bases using the Buchberger algorithm and related methods %%for polynomial ideals and modules. It can be used over a variety of %%different coefficient domains, and for different variable and term %%orderings. %% %%Gr\"obner Bases can be used for various purposes in commutative %%algebra, e.g. for elimination of variables,\index{Variable elimination} %%converting surd expressions to implicit polynomial form, %%computation of dimensions, solution of polynomial equation systems %%\index{Polynomial equations} etc. %%The package is also used internally by the {\tt SOLVE}\ttindex{SOLVE} %%operator. %% %%Authors: Herbert Melenk, H.M. M\"oller and Winfried Neun. %% \item {IDEALS: Arithmetic for polynomial ideals} (chapter~\ref{IDEALS}); \ttindex{IDEALS} %% %%This package implements the basic arithmetic for polynomial ideals by %%exploiting the Gr\"obner bases package of REDUCE. In order to save %%computing time all intermediate Gr\"obner bases are stored internally such %%that time consuming repetitions are inhibited. %% %%Author: Herbert Melenk. %% \item {INEQ: Support for solving inequalities} (chapter~\ref{INEQ});\ttindex{INEQ} %% %%This package supports the operator {\bf ineq\_solve} that %%tries to solves single inequalities and sets of coupled inequalities. %% %%Author: Herbert Melenk. %% \item {INVBASE: A package for computing involutive bases} (chapter~\ref{INVBASE});\ttindex{INVBASE} %% %%Involutive bases are a new tool for solving problems in connection with %%multivariate polynomials, such as solving systems of polynomial equations %%and analyzing polynomial ideals. An involutive basis of polynomial ideal %%is nothing but a special form of a redundant Gr\"obner basis. The %%construction of involutive bases reduces the problem of solving polynomial %%systems to simple linear algebra. %% %%Authors: A.Yu. Zharkov and Yu.A. Blinkov. %% \item {LAPLACE: Laplace and inverse Laplace transforms} (chapter~\ref{LAPLACE});\ttindex{LAPLACE} %% %%This package can calculate ordinary and inverse Laplace transforms of %%expressions. Documentation is in plain text. %% %%Authors: C. Kazasov, M. Spiridonova, V. Tomov. %% \item {LIE: Functions for the classification of real n-dimensional Lie algebras} (chapter~\ref{LIE});\ttindex{LIE} %%algebras} %%\ttindex{LIE} %% %%{\bf LIE} is a package of functions for the classification of real %%n-dimensional Lie algebras. It consists of two modules: {\bf liendmc1} %%and {\bf lie1234}. With the help of the functions in the {\bf liendmcl} %%module, real n-dimensional Lie algebras $L$ with a derived algebra %%$L^{(1)}$ of dimension 1 can be classified. %% %%Authors: Carsten and Franziska Sch\"obel. %% \item {LIMITS: A package for finding limits} (chapter~\ref{LIMITS});\ttindex{LIMITS} %% %%LIMITS is a fast limit package for REDUCE for functions which are %%continuous except for computable poles and singularities, based on some %%earlier work by Ian Cohen and John P. Fitch. The Truncated Power Series %%package is used for non-critical points, at which the value of the %%function is the constant term in the expansion around that point. %%L'H\^opital's rule is used in critical cases, with preprocessing of %%$\infty - \infty$ forms and reformatting of product forms in order to %%be able to apply l'H\^opital's rule. A limited amount of bounded arithmetic %%is also employed where applicable. %% %%This package defines a {\tt LIMIT} operator, called with the syntax: %%{\small\begin{verbatim} %% LIMIT(EXPRN:algebraic,VAR:kernel,LIMPOINT:algebraic): %% algebraic. %%\end{verbatim}} %%For example: %%{\small\begin{verbatim} %% limit(x*sin(1/x),x,infinity) -> 1 %% limit(sin x/x^2,x,0) -> INFINITY %%\end{verbatim}} %%Direction-dependent limit operators {\tt LIMIT!+} and {\tt LIMIT!-} are %%also defined. %% %%This package loads automatically. %% %%Author: Stanley L. Kameny. %% \item {LINALG: Linear algebra package} (chapter~\ref{LINALG});\ttindex{LINALG} %% %%This package provides a selection of functions that are useful %%in the world of linear algebra. %% %%Author: Matt Rebbeck. %% \item {MODSR: Modular solve and roots} (chapter~\ref{MODSR});\ttindex{MODSR} %% %%This package supports solve (M\_SOLVE) and roots (M\_ROOTS) operators for %%modular polynomials and modular polynomial systems. The moduli need not %%be primes. M\_SOLVE requires a modulus to be set. M\_ROOTS takes the %%modulus as a second argument. For example: %% %%{\small\begin{verbatim} %%on modular; setmod 8; %%m_solve(2x=4); -> {{X=2},{X=6}} %%m_solve({x^2-y^3=3}); %% -> {{X=0,Y=5}, {X=2,Y=1}, {X=4,Y=5}, {X=6,Y=1}} %%m_solve({x=2,x^2-y^3=3}); -> {{X=2,Y=1}} %%off modular; %%m_roots(x^2-1,8); -> {1,3,5,7} %%m_roots(x^3-x,7); -> {0,1,6} %%\end{verbatim}} %% %%There is no further documentation for this package. %% %%Author: Herbert Melenk. %% \item {NCPOLY: Non--commutative polynomial ideals} (chapter~\ref{NCPOLY});\ttindex{NCPOLY} %%\ttindex{NCPOLY} %% %%This package allows the user to set up automatically a consistent %%environment for computing in an algebra where the non--commutativity is %%defined by Lie-bracket commutators. The package uses the {REDUCE} {\bf %%noncom} mechanism for elementary polynomial arithmetic; the commutator %%rules are automatically computed from the Lie brackets. %% %%Authors: Herbert Melenk and Joachim Apel. %% \item {NORMFORM: Computation of matrix normal forms} (chapter~\ref{NORMFORM});\ttindex{NORMFORM} %% %%This package contains routines for computing the following %%normal forms of matrices: %%\begin{itemize} %%\item smithex\_int %%\item smithex %%\item frobenius %%\item ratjordan %%\item jordansymbolic %%\item jordan. %%\end{itemize} %% %%Author: Matt Rebbeck. %% \item {NUMERIC: Solving numerical problems} (chapter~\ref{NUMERIC});\ttindex{NUMERIC} %%\ttindex{NUM\_SOLVE}\index{Newton's method}\ttindex{NUM\_ODESOLVE} %%\ttindex{BOUNDS}\index{Chebyshev fit} %%\ttindex{NUM\_MIN}\index{Minimum}\ttindex{NUM\_INT}\index{Quadrature} %%This package implements basic algorithms of numerical analysis. %%These include: %%\begin{itemize} %%\item solution of algebraic equations by Newton's method %%{\small\begin{verbatim} %% num_solve({sin x=cos y, x + y = 1},{x=1,y=2}) %%\end{verbatim}} %%\item solution of ordinary differential equations %%{\small\begin{verbatim} %% num_odesolve(df(y,x)=y,y=1,x=(0 .. 1), iterations=5) %%\end{verbatim}} %%\item bounds of a function over an interval %%{\small\begin{verbatim} %% bounds(sin x+x,x=(1 .. 2)); %%\end{verbatim}} %%\item minimizing a function (Fletcher Reeves steepest descent) %%{\small\begin{verbatim} %% num_min(sin(x)+x/5, x); %%\end{verbatim}} %%\item Chebyshev curve fitting %%{\small\begin{verbatim} %% chebyshev_fit(sin x/x,x=(1 .. 3),5); %%\end{verbatim}} %%\item numerical quadrature %%{\small\begin{verbatim} %% num_int(sin x,x=(0 .. pi)); %%\end{verbatim}} %%\end{itemize} %% %%Author: Herbert Melenk. %% \item {ODESOLVE: Ordinary differential equations solver} (chapter~\ref{ODESOLVE});\ttindex{ODESOLVE} %% %%The ODESOLVE package is a solver for ordinary differential equations. At %%the present time it has very limited capabilities. It can handle only a %%single scalar equation presented as an algebraic expression or equation, %%and it can solve only first-order equations of simple types, linear %%equations with constant coefficients and Euler equations. These solvable %%types are exactly those for which Lie symmetry techniques give no useful %%information. For example, the evaluation of %%{\small\begin{verbatim} %% depend(y,x); %% odesolve(df(y,x)=x**2+e**x,y,x); %%\end{verbatim}} %%yields the result %%{\small\begin{verbatim} %% X 3 %% 3*E + 3*ARBCONST(1) + X %% {Y=---------------------------} %% 3 %%\end{verbatim}} %% %%Main Author: Malcolm A.H. MacCallum. %% %%Other contributors: Francis Wright, Alan Barnes. %% \item {ORTHOVEC: Manipulation of scalars and vectors} (chapter~\ref{ORTHOVEC});\ttindex{ORTHOVEC} %% %%ORTHOVEC is a collection of REDUCE procedures and operations which %%provide a simple-to-use environment for the manipulation of scalars and %%vectors. Operations include addition, subtraction, dot and cross %%products, division, modulus, div, grad, curl, laplacian, differentiation, %%integration, and Taylor expansion. %% %%Author: James W. Eastwood. %% \item {PHYSOP: Operator calculus in quantum theory} (chapter~\ref{PHYSOP});\ttindex{PHYSOP} %% %%This package has been designed to meet the requirements of theoretical %%physicists looking for a computer algebra tool to perform complicated %%calculations in quantum theory with expressions containing operators. %%These operations consist mainly of the calculation of commutators between %%operator expressions and in the evaluations of operator matrix elements in %%some abstract space. %% %%Author: Mathias Warns. %% \item {PM: A REDUCE pattern matcher} (chapter~\ref{PM});\ttindex{PM} %% %%PM is a general pattern matcher similar in style to those found in systems %%such as SMP and Mathematica, and is based on the pattern matcher described %%in Kevin McIsaac, ``Pattern Matching Algebraic Identities'', SIGSAM Bulletin, %%19 (1985), 4-13. %% %%Documentation for this package is in plain text. %% %%Author: Kevin McIsaac. %% \item {RANDPOLY: A random polynomial generator} (chapter~\ref{RANDPOLY}); \ttindex{RANDPOLY} %% %%This package is based on a port of the Maple random polynomial %%generator together with some support facilities for the generation %%of random numbers and anonymous procedures. %% %%Author: Francis J. Wright. %% \item {REACTEQN: Support for chemical reaction equation systems} (chapter~\ref{REACTEQN});\ttindex{REACTEQN} %% %%This package allows a user to transform chemical reaction systems into %%ordinary differential equation systems (ODE) corresponding to the laws of %%pure mass action. %% %%Documentation for this package is in plain text. %% %%Author: Herbert Melenk. %% \item {RESET: Code to reset REDUCE to its initial state} (chapter~\ref{RESET});\ttindex{RESET} %% %%This package defines a command command RESETREDUCE that works through the %%history of previous commands, and clears any values which have been %%assigned, plus any rules, arrays and the like. It also sets the various %%switches to their initial values. It is not complete, but does work for %%most things that cause a gradual loss of space. It would be relatively %%easy to make it interactive, so allowing for selective resetting. %% %%There is no further documentation on this package. %% %%Author: John Fitch. %% \item {RESIDUE: A residue package} (chapter~\ref{RESIDUE});\ttindex{RESIDUE} %% %%This package supports the calculation of residues of arbitrary %%expressions. %% %%Author: Wolfram Koepf. %% \item {RLFI: REDUCE LaTeX formula interface} (chapter~\ref{RLFI});\ttindex{RLFI} %% %%This package adds \LaTeX syntax to REDUCE. Text generated by REDUCE in %%this mode can be directly used in \LaTeX source documents. Various %%mathematical constructions are supported by the interface including %%subscripts, superscripts, font changing, Greek letters, divide-bars, %%integral and sum signs, derivatives, and so on. %% %%Author: Richard Liska. %% \item {RSOLVE: Rational/integer polynomial solvers} (chapter~\ref{RSOLVE});\ttindex{RSOLVE} %% %%This package provides operators that compute the exact rational zeros %%of a single univariate polynomial using fast modular methods. The %%algorithm used is that described by R. Loos (1983): Computing rational %%zeros of integral polynomials by $p$-adic expansion, {\it SIAM J. %%Computing}, {\bf 12}, 286--293. %% %%Author: Francis J. Wright. %% \item {ROOTS: A REDUCE root finding package} (chapter~\ref{ROOTS});\ttindex{ROOTS} %% %%This root finding package can be used to find some or all of the roots of a %%univariate polynomial with real or complex coefficients, to the accuracy %%specified by the user. %% %%It is designed so that it can be used as an independent package, or it may %%be called from {\tt SOLVE} if {\tt ROUNDED} is on. For example, %%the evaluation of %%{\small\begin{verbatim} %% on rounded,complex; %% solve(x**3+x+5,x); %%\end{verbatim}} %%yields the result %%{\small\begin{verbatim} %% {X= - 1.51598,X=0.75799 + 1.65035*I,X=0.75799 - 1.65035*I} %%\end{verbatim}} %% %%This package loads automatically. %% %%Author: Stanley L. Kameny. %% \item {SCOPE: REDUCE source code optimization package} (chapter~\ref{SCOPE});\ttindex{SCOPE} %% %%SCOPE is a package for the production of an optimized form of a set of %%expressions. It applies an heuristic search for common (sub)expressions %%to almost any set of proper REDUCE assignment statements. The %%output is obtained as a sequence of assignment statements. GENTRAN is %%used to facilitate expression output. %% %%Author: J.A. van Hulzen. %% \item {SETS: A basic set theory package} (chapter~\ref{SETS});\ttindex{SETS} %% %%The SETS package provides algebraic-mode support for set operations on %%lists regarded as sets (or representing explicit sets) and on implicit %%sets represented by identifiers. %% %%Author: Francis J. Wright. %% \item {SPDE: A package for finding symmetry groups of {PDE}'s} (chapter~\ref{SPDE});\ttindex{SPDE} %% %%The package SPDE provides a set of functions which may be used to %%determine the symmetry group of Lie- or point-symmetries of a given system %%of partial differential equations. In many cases the determining system is %%solved completely automatically. In other cases the user has to provide %%additional input information for the solution algorithm to terminate. %% %%Author: Fritz Schwarz. %% \item {SPECFN: Package for special functions} (chapter~\ref{SPECFN}); \ttindex{SPECFN} %% %%\index{Gamma function} \ttindex{Gamma} %%\index{Digamma function} \ttindex{Digamma} %%\index{Polygamma functions} \ttindex{Polygamma} %%\index{Pochhammer's symbol} \ttindex{Pochhammer} %%\index{Euler numbers} \ttindex{Euler} %%\index{Bernoulli numbers} \ttindex{Bernoulli} %%\index{Zeta function (Riemann's)} \ttindex{Zeta} %%\index{Bessel functions}\ttindex{BesselJ}\ttindex{BesselY} %% \ttindex{BesselK}\ttindex{BesselI} %%\index{Hankel functions}\ttindex{Hankel1}\ttindex{Hankel2} %%\index{Kummer functions}\ttindex{KummerM}\ttindex{KummerU} %%\index{Struve functions}\ttindex{StruveH}\ttindex{StruveL} %%\index{Lommel functions}\ttindex{Lommel1}\ttindex{Lommel2} %%\index{Polygamma functions}\ttindex{Polygamma} %%\index{Beta function} \ttindex{Beta} %%\index{Whittaker functions}\ttindex{WhittakerM} %% \ttindex{WhittakerW} %%\index{Dilogarithm function} \ttindex{Dilog} %%\index{Psi function} \ttindex{Psi} %%\index{Orthogonal polynomials} %%\index{Hermite polynomials} \ttindex{HermiteP} %%\index{Jacobi's polynomials} \ttindex{JacobiP} %%\index{Legendre polynomials} \ttindex{LegendreP} %%\index{Laguerre polynomials} \ttindex{LaguerreP} %%\index{Chebyshev polynomials} \ttindex{ChebyshevT}\ttindex{ChebyshevU} %%\index{Gegenbauer polynomials}\ttindex{GegenbauerP} %%\index{Euler polynomials} \ttindex{EulerP} %%\index{Binomial coefficients} \ttindex{Binomial} %%\index{Stirling numbers}\ttindex{Stirling1}\ttindex{Stirling2} %% %%This special function package is separated into two portions to make it %%easier to handle. The packages are called SPECFN and SPECFN2. The first %%one is more general in nature, whereas the second is devoted to special %%special functions. Documentation for the first package can be found in %%the file specfn.tex in the ``doc'' directory, and examples in specfn.tst %%and specfmor.tst in the examples directory. %% %%The package SPECFN is designed to provide algebraic and numerical %%manipulations of several common special functions, namely: %% %%\begin{itemize} %%\item Bernoulli Numbers and Euler Numbers; %%\item Stirling Numbers; %%\item Binomial Coefficients; %%\item Pochhammer notation; %%\item The Gamma function; %%\item The Psi function and its derivatives; %%\item The Riemann Zeta function; %%\item The Bessel functions J and Y of the first and second kind; %%\item The modified Bessel functions I and K; %%\item The Hankel functions H1 and H2; %%\item The Kummer hypergeometric functions M and U; %%\item The Beta function, and Struve, Lommel and Whittaker functions; %%\item The Exponential Integral, the Sine and Cosine Integrals; %%\item The Hyperbolic Sine and Cosine Integrals; %%\item The Fresnel Integrals and the Error function; %%\item The Dilog function; %%\item Hermite Polynomials; %%\item Jacobi Polynomials; %%\item Legendre Polynomials; %%\item Laguerre Polynomials; %%\item Chebyshev Polynomials; %%\item Gegenbauer Polynomials; %%\item Euler Polynomials; %%\item Bernoulli Polynomials. %%\end{itemize} %% %%Author: Chris Cannam, with contributions from Winfried Neun, Herbert %%Melenk, Victor Adamchik, Francis Wright and several others. %% \item {SPECFN2: Package for special special functions} (chapter~\ref{SPECFN2});\ttindex{SPECFN2} %% %%\index{Generalized Hypergeometric functions} %%\index{Meijer's G function} %% %%This package provides algebraic manipulations of generalized %%hypergeometric functions and Meijer's G function. Generalized %%hypergeometric functions are simplified towards special functions and %%Meijer's G function is simplified towards special functions or generalized %%hypergeometric functions. %% %%Author: Victor Adamchik, with major updates by Winfried Neun. %% \item {SUM: A package for series summation} (chapter~\ref{SUM});\ttindex{SUM} %% %%This package implements the Gosper algorithm for the summation of series. %%It defines operators {\tt SUM} and {\tt PROD}. The operator {\tt SUM} %%returns the indefinite or definite summation of a given expression, and %%{\tt PROD} returns the product of the given expression. %% %%This package loads automatically. %% %%Author: Fujio Kako. %% \item {SYMMETRY: Operations on symmetric matrices} (chapter~\ref{SYMMETRY}); \ttindex{SYMMETRY} %% %%This package computes symmetry-adapted bases and block diagonal forms of %%matrices which have the symmetry of a group. The package is the %%implementation of the theory of linear representations for small finite %%groups such as the dihedral groups. %% %%Author: Karin Gatermann. %% \item {TAYLOR: Manipulation of Taylor series} (chapter~\ref{TAYLOR});\ttindex{TAYLOR} %% %%This package carries out the Taylor expansion of an expression in one or %%more variables and efficient manipulation of the resulting Taylor series. %%Capabilities include basic operations (addition, subtraction, %%multiplication and division) and also application of certain algebraic and %%transcendental functions. %% %%Author: Rainer Sch\"opf. %% \item {TPS: A truncated power series package} (chapter~\ref{TPS}); \ttindex{TPS}\ttindex{PS} %% %%This package implements formal Laurent series expansions in one variable %%using the domain mechanism of REDUCE. This means that power series %%objects can be added, multiplied, differentiated etc., like other first %%class objects in the system. A lazy evaluation scheme is used and thus %%terms of the series are not evaluated until they are required for printing %%or for use in calculating terms in other power series. The series are %%extendible giving the user the impression that the full infinite series is %%being manipulated. The errors that can sometimes occur using series that %%are truncated at some fixed depth (for example when a term in the required %%series depends on terms of an intermediate series beyond the truncation %%depth) are thus avoided. %% %%Authors: Alan Barnes and Julian Padget. %% \item {TRI: TeX REDUCE interface} (chapter~\ref{TRI});\ttindex{TRI} %% %%This package provides facilities written in REDUCE-Lisp for typesetting %%REDUCE formulas using \TeX. The \TeX-REDUCE-Interface incorporates three %%levels of \TeX output: without line breaking, with line breaking, and %%with line breaking plus indentation. %% %%Author: Werner Antweiler. %% \item {TRIGSIMP: Simplification and factorization of trigonometric and hyperbolic functions} (chapter~\ref{TRIGSIMP});\ttindex{TRIGSIMP} %%and hyperbolic functions}\ttindex{TRIGSIMP} %% %%TRIGSIMP is a useful tool for all kinds of trigonometric and hyperbolic %%simplification and factorization. There are three procedures included in %%TRIGSIMP: trigsimp, trigfactorize and triggcd. The first is for finding %%simplifications of trigonometric or hyperbolic expressions with many %%options, the second for factorizing them and the third for finding the %%greatest common divisor of two trigonometric or hyperbolic polynomials. %% %%Author: Wolfram Koepf. %% \item {XCOLOR: Calculation of the color factor in non-abelian gauge field theories} (chapter~\ref{XCOLOR});\ttindex{XCOLOR} %% %%This package calculates the color factor in non-abelian gauge field %%theories using an algorithm due to Cvitanovich. %% %%Documentation for this package is in plain text. %% %%Author: A. Kryukov. %% \item {XIDEAL: Gr\"obner Bases for exterior algebra} (chapter~\ref{XIDEAL}); \ttindex{XIDEAL} %% %%XIDEAL constructs Gr\"obner bases for solving the left ideal membership %%problem: Gr\"obner left ideal bases or GLIBs. For graded ideals, where each %%form is homogeneous in degree, the distinction between left and right %%ideals vanishes. Furthermore, if the generating forms are all homogeneous, %%then the Gr\"obner bases for the non-graded and graded ideals are %%identical. In this case, XIDEAL is able to save time by truncating the %%Gr\"obner basis at some maximum degree if desired. %% %%Author: David Hartley. %% \item {WU: Wu algorithm for polynomial systems} (chapter~\ref{WU});\ttindex{WU} %% %%This is a simple implementation of the Wu algorithm implemented in REDUCE %%working directly from ``A Zero Structure Theorem for %%Polynomial-Equations-Solving,'' Wu Wen-tsun, Institute of Systems Science, %%Academia Sinica, Beijing. %% %%Author: Russell Bradford. %% \item {ZEILBERG: A package for indefinite and definite summation} (chapter~\ref{ZEILBERG});\ttindex{ZEILBERG} %% %%This package is a careful implementation of the Gosper and Zeilberger %%algorithms for indefinite and definite summation of hypergeometric terms, %%respectively. Extensions of these algorithms are also included that are %%valid for ratios of products of powers, factorials, $\Gamma$ function %%terms, binomial coefficients, and shifted factorials that are %%rational-linear in their arguments. %% %%Authors: Gregor St\"olting and Wolfram Koepf. %% \item {ZTRANS: $Z$-transform package} (chapter~\ref{ZTRANS});\ttindex{ZTRANS} %% %%This package is an implementation of the $Z$-transform of a sequence. %%This is the discrete analogue of the Laplace Transform. %% %%Authors: Wolfram Koepf and Lisa Temme. \end{itemize} \chapter{Symbolic Mode}\index{Symbolic mode} At the system level, {\REDUCE} is based on a version of the programming language Lisp\index{Lisp} known as {\em Standard Lisp\/} which is described in J. Marti, Hearn, A. C., Griss, M. L. and Griss, C., ``Standard LISP Report" SIGPLAN Notices, ACM, New York, 14, No 10 (1979) 48-68. We shall assume in this section that the reader is familiar with the material in that paper. This also assumes implicitly that the reader has a reasonable knowledge about Lisp in general, say at the level of the LISP 1.5 Programmer's Manual (McCarthy, J., Abrahams, P. W., Edwards, D. J., Hart, T. P. and Levin, M. I., ``LISP 1.5 Programmer's Manual'', M.I.T. Press, 1965) or any of the books mentioned at the end of this section. Persons unfamiliar with this material will have some difficulty understanding this section. Although {\REDUCE} is designed primarily for algebraic calculations, its source language is general enough to allow for a full range of Lisp-like symbolic calculations. To achieve this generality, however, it is necessary to provide the user with two modes of evaluation, namely an algebraic mode\index{Algebraic mode} and a symbolic mode.\index{Symbolic mode} To enter symbolic mode, the user types {\tt symbolic;} \ttindex{SYMBOLIC} (or {\tt lisp;})\ttindex{LISP} and to return to algebraic mode one types {\tt algebraic;}.\ttindex{ALGEBRAIC} Evaluations proceed differently in each mode so the user is advised to check what mode he is in if a puzzling error arises. He can find his mode by typing\ttindex{EVAL\_MODE} {\small\begin{verbatim} eval_mode; \end{verbatim}} The current mode will then be printed as {\tt ALGEBRAIC} or {\tt SYMBOLIC}. Expression evaluation may proceed in either mode at any level of a calculation, provided the results are passed from mode to mode in a compatible manner. One simply prefixes the relevant expression by the appropriate mode. If the mode name prefixes an expression at the top level, it will then be handled as if the global system mode had been changed for the scope of that particular calculation. For example, if the current mode is {\tt ALGEBRAIC}, then the commands \extendedmanual{\newpage} {\small\begin{verbatim} symbolic car '(a); x+y; \end{verbatim}} will cause the first expression to be evaluated and printed in symbolic mode and the second in algebraic mode. Only the second evaluation will thus affect the expression workspace. On the other hand, the statement {\small\begin{verbatim} x + symbolic car '(12); \end{verbatim}} will result in the algebraic value {\tt X+12}. The use of {\tt SYMBOLIC} (and equivalently {\tt ALGEBRAIC}) in this manner is the same as any operator. That means that parentheses could be omitted in the above examples since the meaning is obvious. In other cases, parentheses must be used, as in {\small\begin{verbatim} symbolic(x := 'a); \end{verbatim}} Omitting the parentheses, as in {\small\begin{verbatim} symbolic x := a; \end{verbatim}} would be wrong, since it would parse as {\small\begin{verbatim} symbolic(x) := a; \end{verbatim}} For convenience, it is assumed that any operator whose {\em first\/} argument is quoted is being evaluated in symbolic mode, regardless of the mode in effect at that time. Thus, the first example above could be equally well written: {\small\begin{verbatim} car '(a); \end{verbatim}} Except where explicit limitations have been made, most {\REDUCE} algebraic constructions carry over into symbolic mode.\index{Symbolic mode} However, there are some differences. First, expression evaluation now becomes Lisp evaluation. Secondly, assignment statements are handled differently, as we shall discuss shortly. Thirdly, local variables and array elements are initialized to {\tt NIL} rather than {\tt 0}. (In fact, any variables not explicitly declared {\tt INTEGER} are also initialized to {\tt NIL} in algebraic mode, but the algebraic evaluator recognizes {\tt NIL} as {\tt 0}.) Finally, function definitions follow the conventions of Standard Lisp. To begin with, we mention a few extensions to our basic syntax which are designed primarily if not exclusively for symbolic mode. \section{Symbolic Infix Operators} There are three binary infix operators in {\REDUCE} intended for use in symbolic mode, namely . {\tt (CONS), EQ and MEMQ}. The precedence of these operators was given in another section. \section{Symbolic Expressions} These consist of scalar variables and operators and follow the normal rules of the Lisp meta language. {\it Examples:} {\small\begin{verbatim} x car u . reverse v simp (u+v^2) \end{verbatim}} \section{Quoted Expressions}\ttindex{QUOTE} Because symbolic evaluation requires that each variable or expression has a value, it is necessary to add to {\REDUCE} the concept of a quoted expression by analogy with the Lisp {\tt QUOTE} function. This is provided by the single quote mark {\tt '}. For example, \begin{quote} \begin{tabbing} {\tt '(a b c)} \= represents the Lisp S-expression \= {\tt (quote (a b c))}\kill {\tt 'a} \> represents the Lisp S-expression \> {\tt (quote a)} \\ {\tt '(a b c)} \> represents the Lisp S-expression \> {\tt (quote (a b c))} \end{tabbing} \end{quote} Note, however, that strings are constants and therefore evaluate to themselves in symbolic mode. Thus, to print the string {\tt "A String"}, one would write {\small\begin{verbatim} prin2 "A String"; \end{verbatim}} Within a quoted expression, identifier syntax rules are those of {\REDUCE}. Thus {\tt (A~!.~~B)} is the list consisting of the three elements {\tt A}, {\tt .}, and {\tt B}, whereas {\tt (A . B)} is the dotted pair of {\tt A} and {\tt B}. \section{Lambda Expressions}\ttindex{LAMBDA} \label{sec-lambda} {\tt LAMBDA} expressions provide the means for constructing Lisp {\tt LAMBDA} expressions in symbolic mode. They may not be used in algebraic mode. Syntax: {\small\begin{verbatim} <LAMBDA expression> ::= LAMBDA <varlist><terminator><statement> \end{verbatim}} where {\small\begin{verbatim} <varlist> ::= (<variable>,...,<variable>) \end{verbatim}} e.g., {\small\begin{verbatim} lambda (x,y); car x . cdr y; \end{verbatim}} is equivalent to the Lisp {\tt LAMBDA} expression {\small\begin{verbatim} (lambda (x y) (cons (car x) (cdr y))) \end{verbatim}} The parentheses may be omitted in specifying the variable list if desired. {\tt LAMBDA} expressions may be used in symbolic mode in place of prefix operators, or as an argument of the reserved word {\tt FUNCTION}. In those cases where a {\tt LAMBDA} expression is used to introduce local variables to avoid recomputation, a {\tt WHERE} statement can also be used. For example, the expression {\small\begin{verbatim} (lambda (x,y); list(car x,cdr x,car y,cdr y)) (reverse u,reverse v) \end{verbatim}} can also be written {\small\begin{verbatim} {car x,cdr x,car y,cdr y} where x=reverse u,y=reverse v \end{verbatim}} Where possible, {\tt WHERE} syntax is preferred to {\tt LAMBDA} syntax, since it is more natural. \section{Symbolic Assignment Statements}\index{Assignment} In symbolic mode, if the left side of an assignment statement is a variable, a {\tt SETQ} of the right-hand side to that variable occurs. If the left-hand side is an expression, it must be of the form of an array element, otherwise an error will result. For example, {\tt x:=y} translates into {\tt (SETQ X Y)} whereas {\tt a(3) := 3} will be valid if {\tt A} has been previously declared a single dimensioned array of at least four elements. \section{FOR EACH Statement}\ttindex{FOR EACH} The {\tt FOR EACH} form of the {\tt FOR} statement, designed for iteration down a list, is more general in symbolic mode. Its syntax is: {\small\begin{verbatim} FOR EACH ID:identifier {IN|ON} LST:list {DO|COLLECT|JOIN|PRODUCT|SUM} EXPRN:S-expr \end{verbatim}} As in algebraic mode, if the keyword {\tt IN} is used, iteration is on each element of the list. With {\tt ON}, iteration is on the whole list remaining at each point in the iteration. As a result, we have the following equivalence between each form of {\tt FOR EACH} and the various mapping functions in Lisp: \begin{center} {\tt \begin{tabular}{|l|lr r|} \hline & DO & COLLECT & JOIN \\ \hline IN & MAPC & MAPCAR & MAPCAN \\ ON & MAP & MAPLIST & MAPCON \\ \hline \end{tabular}} \end{center} {\it Example:} To list each element of the list {\tt (a b c)}: {\small\begin{verbatim} for each x in '(a b c) collect list x; \end{verbatim}} \section{Symbolic Procedures}\index{Symbolic procedure} All the functions described in the Standard Lisp Report are available to users in symbolic mode. Additional functions may also be defined as symbolic procedures. For example, to define the Lisp function {\tt ASSOC}, the following could be used: {\small\begin{verbatim} symbolic procedure assoc(u,v); if null v then nil else if u = caar v then car v else assoc(u, cdr v); \end{verbatim}} If the default mode were symbolic, then {\tt SYMBOLIC} could be omitted in the above definition. {\tt MACRO}s\ttindex{MACRO} may be defined by prefixing the keyword {\tt PROCEDURE} by the word {\tt MACRO}. (In fact, ordinary functions may be defined with the keyword {\tt EXPR} \ttindex{EXPR} prefixing {\tt PROCEDURE} as was used in the Standard Lisp Report.) For example, we could define a {\tt MACRO CONSCONS} by {\small\begin{verbatim} symbolic macro procedure conscons l; expand(cdr l,'cons); \end{verbatim}} Another form of macro, the {\tt SMACRO}\ttindex{SMACRO} is also available. These are described in the Standard Lisp Report. The Report also defines a function type {\tt FEXPR}.\ttindex{FEXPR} However, its use is discouraged since it is hard to implement efficiently, and most uses can be replaced by macros. At the present time, there are no {\tt FEXPR}s in the core REDUCE system. \section{Standard Lisp Equivalent of Reduce Input} A user can obtain the Standard Lisp equivalent of his {\REDUCE} input by turning on the switch {\tt DEFN}\ttindex{DEFN} (for definition). The system then prints the Lisp translation of his input but does not evaluate it. Normal operation is resumed when {\tt DEFN} is turned off. \section{Communicating with Algebraic Mode}\index{Mode communication} One of the principal motivations for a user of the algebraic facilities of {\REDUCE} to learn about symbolic mode\index{Symbolic mode} is that it gives one access to a wider range of techniques than is possible in algebraic mode\index{Algebraic mode} alone. For example, if a user wishes to use parts of the system defined in the basic system source code, or refine their algebraic code definitions to make them more efficient, then it is necessary to understand the source language in fairly complete detail. Moreover, it is also necessary to know a little more about the way {\REDUCE} operates internally. Basically, {\REDUCE} considers expressions in two forms: prefix form, which follow the normal Lisp rules of function composition, and so-called canonical form, which uses a completely different syntax. Once these details are understood, the most critical problem faced by a user is how to make expressions and procedures communicate between symbolic and algebraic mode. The purpose of this section is to teach a user the basic principles for this. If one wants to evaluate an expression in algebraic mode, and then use that expression in symbolic mode calculations, or vice versa, the easiest way to do this is to assign a variable to that expression whose value is easily obtainable in both modes. To facilitate this, a declaration {\tt SHARE}\ttindex{SHARE} is available. {\tt SHARE} takes a list of identifiers as argument, and marks these variables as having recognizable values in both modes. The declaration may be used in either mode. E.g., {\small\begin{verbatim} share x,y; \end{verbatim}} says that {\tt X} and {\tt Y} will receive values to be used in both modes. If a {\tt SHARE} declaration is made for a variable with a previously assigned algebraic value, that value is also made available in symbolic mode. \subsection{Passing Algebraic Mode Values to Symbolic Mode} If one wishes to work with parts of an algebraic mode \index{Algebraic mode} expression in symbolic mode,\index{Symbolic mode} one simply makes an assignment\index{Assignment} of a shared variable to the relevant expression in algebraic mode. For example, if one wishes to work with {\tt (a+b)\verb|^|2}, one would say, in algebraic mode: {\small\begin{verbatim} x := (a+b)^2; \end{verbatim}} assuming that {\tt X} was declared shared as above. If we now change to symbolic mode and say {\small\begin{verbatim} x; \end{verbatim}} its value will be printed as a prefix form with the syntax: {\small\begin{verbatim} (*SQ <standard quotient> T) \end{verbatim}} This particular format reflects the fact that the algebraic mode processor currently likes to transfer prefix forms from command to command, but doesn't like to reconvert standard forms\index{Standard form} (which represent polynomials) and standard quotients back to a true Lisp prefix form for the expression (which would result in excessive computation). So {\tt *SQ} is used to tell the algebraic processor that it is dealing with a prefix form which is really a standard quotient\index{Standard quotient} and the second argument ({\tt T} or {\tt NIL}) tells it whether it needs further processing (essentially, an {\em already simplified\/} flag). So to get the true standard quotient form in symbolic mode, one needs {\tt CADR} of the variable. E.g., {\small\begin{verbatim} z := cadr x; \end{verbatim}} would store in {\tt Z} the standard quotient form for {\tt (a+b)\verb|^|2}. Once you have this expression, you can now manipulate it as you wish. To facilitate this, a standard set of selectors\index{Selector} and constructors\index{Constructor} are available for getting at parts of the form. Those presently defined are as follows: \extendedmanual{\newpage} \begin{center} \vspace{10pt} {\large REDUCE Selectors\par} %\end{center} %\begin{center} \renewcommand{\arraystretch}{1.5} \begin{tabular}{lp{\rboxwidth}} {\tt DENR} & denominator of standard quotient \\ % {\tt LC} & leading coefficient of polynomial \\ % {\tt LDEG} & leading degree of polynomial \\ % {\tt LPOW} & leading power of polynomial \\ % {\tt LT} & leading term of polynomial \\ % {\tt MVAR} & main variable of polynomial \\ % {\tt NUMR} & numerator (of standard quotient) \\ % {\tt PDEG} & degree of a power \\ % {\tt RED} & reductum of polynomial \\ % {\tt TC} & coefficient of a term \\ % {\tt TDEG} & degree of a term \\ % {\tt TPOW} & power of a term \end{tabular} \end{center} \begin{center} \vspace{10pt} {\large REDUCE Constructors \par} %\end{center} %\begin{center} \renewcommand{\arraystretch}{1.5} \begin{tabular}{lp{\redboxwidth}} \verb|.+| & add a term to a polynomial \\ % \verb|./| & divide (two polynomials to get quotient) \\ \verb|.*| & multiply power by coefficient to produce term \\ % \verb|.^| & raise a variable to a power \end{tabular} \end{center} For example, to find the numerator of the standard quotient above, one could say: {\small\begin{verbatim} numr z; \end{verbatim}} or to find the leading term of the numerator: {\small\begin{verbatim} lt numr z; \end{verbatim}} Conversion between various data structures is facilitated by the use of a set of functions defined for this purpose. Those currently implemented include: {\renewcommand{\arraystretch}{1.5} \begin{tabular}{lp{\reduceboxwidth}} {\tt !*A2F} & convert an algebraic expression to a standard form. If result is rational, an error results; \\ % {\tt !*A2K} & converts an algebraic expression to a kernel. If this is not possible, an error results; \\ % {\tt !*F2A} & converts a standard form to an algebraic expression; \\ % {\tt !*F2Q} & convert a standard form to a standard quotient; \\ % {\tt !*K2F} & convert a kernel to a standard form; \\ {\tt !*K2Q} & convert a kernel to a standard quotient; \\ % {\tt !*P2F} & convert a standard power to a standard form; \\ % {\tt !*P2Q} & convert a standard power to a standard quotient; \\ % {\tt !*Q2F} & convert a standard quotient to a standard form. If the quotient denominator is not 1, an error results; \\ % {\tt !*Q2K} & convert a standard quotient to a kernel. If this is not possible, an error results; \\ % {\tt !*T2F} & convert a standard term to a standard form \\ % {\tt !*T2Q} & convert a standard term to a standard quotient. \end{tabular}} \subsection{Passing Symbolic Mode Values to Algebraic Mode} In order to pass the value of a shared variable from symbolic mode to algebraic mode, the only thing to do is make sure that the value in symbolic mode is a prefix expression. E.g., one uses {\tt (expt (plus a b) 2)} for {\tt (a+b)\verb|^|2}, or the format ({\tt *sq <standard quotient> t}) as described above. However, if you have been working with parts of a standard form they will probably not be in this form. In that case, you can do the following: \begin{enumerate} \item If it is a standard quotient, call {\tt PREPSQ} on it. This takes a standard quotient as argument, and returns a prefix expression. Alternatively, you can call {\tt MK!*SQ} on it, which returns a prefix form like ({\tt *SQ <standard quotient> T)} and avoids translation of the expression into a true prefix form. \item If it is a standard form, call {\tt PREPF} on it. This takes a standard form as argument, and returns the equivalent prefix expression. Alternatively, you can convert it to a standard quotient and then call {\tt MK!*SQ}. \item If it is a part of a standard form, you must usually first build up a standard form out of it, and then go to step 2. The conversion functions described earlier may be used for this purpose. For example, \begin{enumerate} \item If {\tt Z} is an expression which is a term, {\tt !*T2F Z} is a standard form. \item If {\tt Z} is a standard power, {\tt !*P2F Z} is a standard form. \item If {\tt Z} is a variable, you can pass it direct to algebraic mode. \end{enumerate} \end{enumerate} For example, to pass the leading term of {\tt (a+b)\verb|^|2} back to algebraic mode, one could say: {\small\begin{verbatim} y:= mk!*sq !*t2q lt numr z; \end{verbatim}} where {\tt Y} has been declared shared as above. If you now go back to algebraic mode, you can work with {\tt Y} in the usual way. \subsection{Complete Example} The following is the complete code for doing the above steps. The end result will be that the square of the leading term of $(a+b)^{2}$ is calculated. %%\begin{tabular}{lp{\rboxwidth}} %%{\tt share x,y;} & {\tt \% declare {\tt X} and %%{\tt Y} as shared} \\ %%{\tt x := (a+b)\verb|^|2;} & {\tt \% store (a+b)\verb|^|2 in X} \\ %%{\tt symbolic;} & {\tt \% transfer to symbolic mode} \\ %%{\tt z := cadr x;} & {\tt \% store a true standard quotient \newline %% \% in Z} \\[1.7pt] %%{\tt lt numr z;} & {\tt \% print the leading term of the \newline %% \% numerator of Z} \\ %%{\tt y := mk!*sq !*t2q lt numr z;} & {\tt \% store the %% prefix form of this \newline %% \% leading term in Y} \\ %%{\tt algebraic;} & {\tt \% return to algebraic mode} \\ %%{\tt y\verb|^|2;} & {\tt \% evaluate square of the leading \newline %%\% term of (a+b)\verb|^|2} %%\end{tabular} {\small\begin{verbatim} share x,y; % declare X and Y as shared x := (a+b)^2; % store (a+b)^2 in X symbolic; % transfer to symbolic mode z := cadr x; % store a true standard quotient in Z lt numr z; % print the leading term of the % numerator of Z y := mk!*sq !*t2q lt numr z; % store the prefix form of this % leading term in Y algebraic; % return to algebraic mode y^2; % evaluate square of the leading term % of (a+b)^2 \end{verbatim}} \subsection{Defining Procedures for Intermode Communication} If one wishes to define a procedure in symbolic mode for use as an operator in algebraic mode, it is necessary to declare this fact to the system by using the declaration {\tt OPERATOR}\ttindex{OPERATOR} in symbolic mode. Thus {\small\begin{verbatim} symbolic operator leadterm; \end{verbatim}} would declare the procedure {\tt LEADTERM} as an algebraic operator. This declaration {\em must\/} be made in symbolic mode as the effect in algebraic mode is different. The value of such a procedure must be a prefix form. The algebraic processor will pass arguments to such procedures in prefix form. Therefore if you want to work with the arguments as standard quotients you must first convert them to that form by using the function {\tt SIMP!*}. This function takes a prefix form as argument and returns the evaluated standard quotient. For example, if you want to define a procedure {\tt LEADTERM} which gives the leading term of an algebraic expression, one could do this as follows: \begin{samepage} {\small\begin{verbatim} symbolic operator leadterm; % Declare LEADTERM as a symbolic % mode procedure to be used in % algebraic mode. symbolic procedure leadterm u; % Define LEADTERM. mk!*sq !*t2q lt numr simp!* u; \end{verbatim}} \end{samepage} Note that this operator has a different effect than the operator {\tt LTERM} \ttindex{LTERM}. In the latter case, the calculation is done with respect to the second argument of the operator. In the example here, we simply extract the leading term with respect to the system's choice of main variable. Finally, if you wish to use the algebraic evaluator on an argument in a symbolic mode definition, the function {\tt REVAL} can be used. The one argument of {\tt REVAL} must be the prefix form of an expression. {\tt REVAL} returns the evaluated expression as a true Lisp prefix form. \section{Rlisp '88} Rlisp '88 is a superset of the Rlisp that has been traditionally used for the support of REDUCE. It is fully documented in the book Marti, J.B., ``{RLISP} '88: An Evolutionary Approach to Program Design and Reuse'', World Scientific, Singapore (1993). Rlisp '88 adds to the traditional Rlisp the following facilities: \begin{enumerate} \item more general versions of the looping constructs {\tt for}, {\tt repeat} and {\tt while}; \item support for a backquote construct; \item support for active comments; \item support for vectors of the form name[index]; \item support for simple structures; \item support for records. \end{enumerate} In addition, ``--'' is a letter in Rlisp '88. In other words, {\tt A-B} is an identifier, not the difference of the identifiers {\tt A} and {\tt B}. If the latter construct is required, it is necessary to put spaces around the - character. For compatibility between the two versions of Rlisp, we recommend this convention be used in all symbolic mode programs. To use Rlisp '88, type {\tt on rlisp88;}\ttindex{RLISP88}. This switches to symbolic mode with the Rlisp '88 syntax and extensions. While in this environment, it is impossible to switch to algebraic mode, or prefix expressions by ``algebraic''. However, symbolic mode programs written in Rlisp '88 may be run in algebraic mode provided the rlisp88 package has been loaded. We also expect that many of the extensions defined in Rlisp '88 will migrate to the basic Rlisp over time. To return to traditional Rlisp or to switch to algebraic mode, say ``off rlisp88''. \section{References} There are a number of useful books which can give you further information about LISP. Here is a selection: Allen, J.R., ``The Anatomy of LISP'', McGraw Hill, New York, 1978. McCarthy J., P.W. Abrahams, J. Edwards, T.P. Hart and M.I. Levin, ``LISP 1.5 Programmer's Manual'', M.I.T. Press, 1965. Touretzky, D.S, ``{LISP}: A Gentle Introduction to Symbolic Computation'', Harper \& Row, New York, 1984. Winston, P.H. and Horn, B.K.P., ``LISP'', Addison-Wesley, 1981. \chapter{Calculations in High Energy Physics} A set of {\REDUCE} commands is provided for users interested in symbolic calculations in high energy physics. Several extensions to our basic syntax are necessary, however, to allow for the different data structures encountered. \section{High Energy Physics Operators} \label{HEPHYS} We begin by introducing three new operators required in these calculations. \subsection{. (Cons) Operator}\index{Dot product} Syntax: {\small\begin{verbatim} (EXPRN1:vector_expression) . (EXPRN2:vector_expression):algebraic. \end{verbatim}} The binary {\tt .} operator, which is normally used to denote the addition of an element to the front of a list, can also be used in algebraic mode to denote the scalar product of two Lorentz four-vectors. For this to happen, the second argument must be recognizable as a vector expression \index{High energy vector expression} at the time of evaluation. With this meaning, this operator is often referred to as the {\em dot\/} operator. In the present system, the index handling routines all assume that Lorentz four-vectors are used, but these routines could be rewritten to handle other cases. Components of vectors can be represented by including representations of unit vectors in the system. Thus if {\tt EO} represents the unit vector {\tt (1,0,0,0)}, {\tt (p.eo)} represents the zeroth component of the four-vector P. Our metric and notation follows Bjorken and Drell ``Relativistic Quantum Mechanics'' (McGraw-Hill, New York, 1965). Similarly, an arbitrary component {\tt P} may be represented by {\tt (p.u)}. If contraction over components of vectors is required, then the declaration {\tt INDEX}\ttindex{INDEX} must be used. Thus {\small\begin{verbatim} index u; \end{verbatim}} declares {\tt U} as an index, and the simplification of {\small\begin{verbatim} p.u * q.u \end{verbatim}} would result in {\small\begin{verbatim} P.Q \end{verbatim}} The metric tensor $g^{\mu \nu}$ may be represented by {\tt (u.v)}. If contraction over {\tt U} and {\tt V} is required, then they should be declared as indices. Errors occur if indices are not properly matched in expressions. If a user later wishes to remove the index property from specific vectors, he can do it with the declaration {\tt REMIND}.\ttindex{REMIND} Thus {\tt remind v1...vn;} removes the index flags from the variables {\tt V1} through {\tt Vn}. However, these variables remain vectors in the system. \subsection{G Operator for Gamma Matrices}\index{Dirac $\gamma$ matrix} \ttindex{G} Syntax: {\small\begin{verbatim} G(ID:identifier[,EXPRN:vector_expression]) :gamma_matrix_expression. \end{verbatim}} {\tt G} is an n-ary operator used to denote a product of $\gamma$ matrices contracted with Lorentz four-vectors. Gamma matrices are associated with fermion lines in a Feynman diagram. If more than one such line occurs, then a different set of $\gamma$ matrices (operating in independent spin spaces) is required to represent each line. To facilitate this, the first argument of {\tt G} is a line identification identifier (not a number) used to distinguish different lines. Thus {\small\begin{verbatim} g(l1,p) * g(l2,q) \end{verbatim}} denotes the product of {\tt $\gamma$.p} associated with a fermion line identified as {\tt L1}, and {\tt $\gamma$.q} associated with another line identified as {\tt L2} and where {\tt p} and {\tt q} are Lorentz four-vectors. A product of $\gamma$ matrices associated with the same line may be written in a contracted form. Thus {\small\begin{verbatim} g(l1,p1,p2,...,p3) = g(l1,p1)*g(l1,p2)*...*g(l1,p3) . \end{verbatim}} The vector {\tt A} is reserved in arguments of G to denote the special $\gamma$ matrix $\gamma^{5}$. Thus \begin{quote} \begin{tabbing} \ \ \ \ \ {\tt g(l,a)}\hspace{0.2in} \= =\ \ \ $\gamma^{5}$ \hspace{0.5in} \= associated with the line {\tt L} \\[0.1in] \ \ \ \ \ {\tt g(l,p,a)} \> =\ \ \ $\gamma$.p $\times \gamma^{5}$ \> associated with the line {\tt L}. \end{tabbing} \end{quote} $\gamma^{\mu}$ (associated with the line {\tt L}) may be written as {\tt g(l,u)}, with {\tt U} flagged as an index if contraction over {\tt U} is required. The notation of Bjorken and Drell is assumed in all operations involving $\gamma$ matrices. \subsection{EPS Operator}\ttindex{EPS} Syntax: {\small\begin{verbatim} EPS(EXPRN1:vector_expression,...,EXPRN4:vector_exp) :vector_exp. \end{verbatim}} The operator {\tt EPS} has four arguments, and is used only to denote the completely antisymmetric tensor of order 4 and its contraction with Lorentz four-vectors. Thus \[ \epsilon_{i j k l} = \left\{ \begin{array}{cl} +1 & \mbox{if $i,j,k,l$ is an even permutation of 0,1,2,3} \\ -1 & \mbox{if an odd permutation} \\ 0 & \mbox{otherwise} \end{array} \right. \] A contraction of the form $\epsilon_{i j \mu \nu}p_{\mu}q_{\nu}$ may be written as {\tt eps(i,j,p,q)}, with {\tt I} and {\tt J} flagged as indices, and so on. \section{Vector Variables} Apart from the line identification identifier in the {\tt G} operator, all other arguments of the operators in this section are vectors. Variables used as such must be declared so by the type declaration {\tt VECTOR}, \ttindex{VECTOR} for example: {\small\begin{verbatim} vector p1,p2; \end{verbatim}} declares {\tt P1} and {\tt P2} to be vectors. Variables declared as indices or given a mass\ttindex{MASS} are automatically declared vector by these declarations. \section{Additional Expression Types} Two additional expression types are necessary for high energy calculations, namely \subsection{Vector Expressions}\index{High energy vector expression} These follow the normal rules of vector combination. Thus the product of a scalar or numerical expression and a vector expression is a vector, as are the sum and difference of vector expressions. If these rules are not followed, error messages are printed. Furthermore, if the system finds an undeclared variable where it expects a vector variable, it will ask the user in interactive mode whether to make that variable a vector or not. In batch mode, the declaration will be made automatically and the user informed of this by a message. {\tt Examples:} Assuming {\tt P} and {\tt Q} have been declared vectors, the following are vector expressions {\small\begin{verbatim} p 2*q/3 2*x*y*p - p.q*q/(3*q.q) \end{verbatim}} whereas {\tt p*q} and {\tt p/q} are not. \subsection{Dirac Expressions} These denote those expressions which involve $\gamma$ matrices. A $\gamma$ matrix is implicitly a 4 $\times$ 4 matrix, and so the product, sum and difference of such expressions, or the product of a scalar and Dirac expression is again a Dirac expression. There are no Dirac variables in the system, so whenever a scalar variable appears in a Dirac expression without an associated $\gamma$ matrix expression, an implicit unit 4 by 4 matrix is assumed. For example, {\tt g(l,p) + m} denotes {\tt g(l,p) + m*<unit 4 by 4 matrix>}. Multiplication of Dirac expressions, as for matrix expressions, is of course non-commutative. \section{Trace Calculations}\index{High energy trace} When a Dirac expression is evaluated, the system computes one quarter of the trace of each $\gamma$ matrix product in the expansion of the expression. One quarter of each trace is taken in order to avoid confusion between the trace of the scalar {\tt M}, say, and {\tt M} representing {\tt M * <unit 4 by 4 matrix>}. Contraction over indices occurring in such expressions is also performed. If an unmatched index is found in such an expression, an error occurs. The algorithms used for trace calculations are the best available at the time this system was produced. For example, in addition to the algorithm developed by Chisholm for contracting indices in products of traces, {\REDUCE} uses the elegant algorithm of Kahane for contracting indices in $\gamma$ matrix products. These algorithms are described in Chisholm, J. S. R., Il Nuovo Cimento X, 30, 426 (1963) and Kahane, J., Journal Math. Phys. 9, 1732 (1968). It is possible to prevent the trace calculation over any line identifier by the declaration {\tt NOSPUR}.\ttindex{NOSPUR} For example, {\small\begin{verbatim} nospur l1,l2; \end{verbatim}} will mean that no traces are taken of $\gamma$ matrix terms involving the line numbers {\tt L1} and {\tt L2}. However, in some calculations involving more than one line, a catastrophic error {\small\begin{verbatim} This NOSPUR option not implemented \end{verbatim}} can occur (for the reason stated!) If you encounter this error, please let us know! A trace of a $\gamma$ matrix expression involving a line identifier which has been declared {\tt NOSPUR} may be later taken by making the declaration {\tt SPUR}.\ttindex{SPUR} See also the CVIT package for an alternative mechanism\extendedmanual{ (chapter~\ref{CVIT})}. \section{Mass Declarations}\ttindex{MASS} It is often necessary to put a particle ``on the mass shell'' in a calculation. This can, of course, be accomplished with a {\tt LET} command such as {\small\begin{verbatim} let p.p= m^2; \end{verbatim}} but an alternative method is provided by two commands {\tt MASS} and {\tt MSHELL}.\ttindex{MSHELL} {\tt MASS} takes a list of equations of the form: {\small\begin{verbatim} <vector variable> = <scalar variable> \end{verbatim}} for example, {\small\begin{verbatim} mass p1=m, q1=mu; \end{verbatim}} The only effect of this command is to associate the relevant scalar variable as a mass with the corresponding vector. If we now say {\small\begin{verbatim} mshell <vector variable>,...,<vector variable>; \end{verbatim}} and a mass has been associated with these arguments, a substitution of the form {\small\begin{verbatim} <vector variable>.<vector variable> = <mass>^2 \end{verbatim}} is set up. An error results if the variable has no preassigned mass. \section{Example} We give here as an example of a simple calculation in high energy physics the computation of the Compton scattering cross-section as given in Bjorken and Drell Eqs. (7.72) through (7.74). We wish to compute the trace of $$\left. \alpha^2\over2 \right. \left({k^\prime\over k}\right)^2 \left({\gamma.p_f+m\over2m}\right)\left({\gamma.e^\prime \gamma.e \gamma.k_i\over2k.p_i} + {\gamma.e\gamma.e^\prime \gamma.k_f\over2k^\prime.p_i}\right) \left({\gamma.p_i+m\over2m}\right)$$ $$ \left({\gamma.k_i\gamma.e\gamma.e^\prime\over2k.p_i} + {\gamma.k_f\gamma.e^\prime\gamma.e\over2k^\prime.p_i} \right) $$ where $k_i$ and $k_f$ are the four-momenta of incoming and outgoing photons (with polarization vectors $e$ and $e^\prime$ and laboratory energies $k$ and $k^\prime$ respectively) and $p_i$, $p_f$ are incident and final electron four-momenta. Omitting therefore an overall factor ${\alpha^2\over2m^2}\left({k^\prime\over k}\right)^2$ we need to find one quarter of the trace of $${ \left( \gamma.p_f + m\right) \left({\gamma.e^\prime \gamma.e\gamma.k_i\over2k.p_i} + {\gamma.e\gamma.e^\prime \gamma.k_f\over 2k^\prime.p_i}\right) \left( \gamma.p_i + m\right)}$$ $${ \left({\gamma.k_i\gamma.e\gamma.e^\prime\over 2k.p_i} + {\gamma.k_f\gamma.e^\prime \gamma.e\over2k^\prime.p_i}\right) }$$ A straightforward REDUCE program for this, with appropriate substitutions (using {\tt P1} for $p_i$, {\tt PF} for $p_f$, {\tt KI} for $k_i$ and {\tt KF} for $k_f$) is {\small\begin{verbatim} on div; % this gives output in same form as Bjorken and Drell. mass ki= 0, kf= 0, p1= m, pf= m; vector e,ep; % if e is used as a vector, it loses its scalar identity as % the base of natural logarithms. mshell ki,kf,p1,pf; let p1.e= 0, p1.ep= 0, p1.pf= m^2+ki.kf, p1.ki= m*k,p1.kf= m*kp, pf.e= -kf.e, pf.ep= ki.ep, pf.ki= m*kp, pf.kf= m*k, ki.e= 0, ki.kf= m*(k-kp), kf.ep= 0, e.e= -1, ep.ep=-1; for all p let gp(p)= g(l,p)+m; comment this is just to save us a lot of writing; gp(pf)*(g(l,ep,e,ki)/(2*ki.p1) + g(l,e,ep,kf)/(2*kf.p1)) * gp(p1)*(g(l,ki,e,ep)/(2*ki.p1) + g(l,kf,ep,e)/ (2*kf.p1))$ write "The Compton cxn is",ws; \end{verbatim}} (We use {\tt P1} instead of {\tt PI} in the above to avoid confusion with the reserved variable {\tt PI}). This program will print the following result {\small\begin{verbatim} (-1) (-1) 2 The Compton cxn is 1/2*K*KP + 1/2*K *KP + 2*E.EP - 1 \end{verbatim}} \section{Extensions to More Than Four Dimensions} In our discussion so far, we have assumed that we are working in the normal four dimensions of QED calculations. However, in most cases, the programs will also work in an arbitrary number of dimensions. The command \ttindex{VECDIM} {\small\begin{verbatim} vecdim <expression>; \end{verbatim}} sets the appropriate dimension. The dimension can be symbolic as well as numerical. Users should note however, that the {\tt EPS} operator and the $\gamma_{5}$ symbol ({\tt A}) are not properly defined in other than four dimensions and will lead to an error if used. \chapter{{\REDUCE} and Rlisp Utilities} {\REDUCE} and its associated support language system Rlisp\index{Rlisp} include a number of utilities which have proved useful for program development over the years. The following are supported in most of the implementations of {\REDUCE} currently available. \section{The Standard Lisp Compiler}\index{Compiler} Many versions of {\REDUCE} include a Standard Lisp compiler that is automatically loaded on demand. You should check your system specific user guide to make sure you have such a compiler. To make the compiler active, the switch {\tt COMP}\ttindex{COMP} should be turned on. Any further definitions input after this will be compiled automatically. If the compiler used is a derivative version of the original Griss-Hearn compiler, (M. L. Griss and A. C. Hearn, ``A Portable LISP Compiler", SOFTWARE --- Practice and Experience 11 (1981) 541-605), there are other switches that might also be used in this regard. However, these additional switches are not supported in all compilers. They are as follows: %\ttindex{PLAP}\ttindex{PGWD}\ttindex{PWRDS} {\renewcommand{\arraystretch}{2} \begin{tabular}{lp{\reduceboxwidth}} {\tt PLAP} & If ON, causes the printing of the portable macros produced by the compiler; \\ % {\tt PGWD} & If ON, causes the printing of the actual assembly language instructions generated from the macros; \\ % {\tt PWRDS} & If ON, causes a statistic message of the form \newline {\tt <function> COMPILED, <words> WORDS, <words> LEFT} \newline to be printed. The first number is the number of words of binary program space the compiled function took, and the second number the number of words left unused in binary program space. \\ \end{tabular}} \section{Fast Loading Code Generation Program}\index{Fast loading of code} \label{sec-load} In most versions of {\REDUCE}, it is possible to take any set of Lisp, Rlisp or {\REDUCE} commands and build a fast loading version of them. In Rlisp or {\REDUCE}, one does the following: {\small\begin{verbatim} faslout <filename>; <commands or IN statements> faslend; \end{verbatim}} To load such a file, one uses the command {\tt LOAD},\ttindex{LOAD} e.g. {\tt load foo;} or {\tt load foo,bah;} This process produces a fast-loading version of the original file. In some implementations, this means another file is created with the same name but a different extension. For example, in PSL-based systems, the extension is {\tt b} (for binary). In CSL-based systems, however, this process adds the fast-loading code to a single file in which all such code is stored. Particular functions are provided by CSL for managing this file, and described in the CSL user documentation. In doing this build, as with the production of a Standard Lisp form of such statements, it is important to remember that some of the commands must be instantiated during the building process. For example, macros must be expanded, and some property list operations must happen. The {\REDUCE} sources should be consulted for further details on this. % To facilitate this, the {\tt EVAL} and {\tt IGNORE} flags may be % used. Note also that there can be no {\tt LOAD} command within the input % statements. To avoid excessive printout, input statements should be followed by a \$ instead of the semicolon. With {\tt LOAD} however, the input doesn't print out regardless of which terminator is used with the command. If you subsequently change the source files used in producing a fast loading file, don't forget to repeat the above process in order to update the fast loading file correspondingly. Remember also that the text which is read in during the creation of the fast load file, in the compiling process described above, is {\em not\/} stored in your {\REDUCE} environment, but only translated and output. If you want to use the file just created, you must then use {\tt LOAD} to load the output of the fast-loading file generation program. When the file to be loaded contains a complete package for a given application, {\tt LOAD\_PACKAGE}\ttindex{LOAD\_PACKAGE} rather than {\tt LOAD} should be used. The syntax is the same. However, {\tt LOAD\_PACKAGE} does some additional bookkeeping such as recording that this package has now been loaded, that is required for the correct operation of the system. \section{The Standard Lisp Cross Reference Program}\index{Cross reference} {\tt CREF}\ttindex{CREF} is a Standard Lisp program for processing a set of Standard LISP function definitions to produce: \begin{enumerate} \item A ``summary'' showing: \begin{enumerate} \item A list of files processed; \item A list of ``entry points'' (functions which are not called or are only called by themselves); \item A list of undefined functions (functions called but not defined in this set of functions); \item A list of variables that were used non-locally but not declared {\tt GLOBAL} or {\tt FLUID} before their use; \item A list of variables that were declared {\tt GLOBAL} but not used as {\tt FLUID}s, i.e., bound in a function; \item A list of {\tt FLUID} variables that were not bound in a function so that one might consider declaring them {\tt GLOBAL}s; \item A list of all {\tt GLOBAL} variables present; \item A list of all {\tt FLUID} variables present; \item A list of all functions present. \end{enumerate} \item A ``global variable usage'' table, showing for each non-local variable: \begin{enumerate} \item Functions in which it is used as a declared {\tt FLUID} or {\tt GLOBAL}; \item Functions in which it is used but not declared; \item Functions in which it is bound; \item Functions in which it is changed by {\tt SETQ}. \end{enumerate} \item A ``function usage'' table showing for each function: \begin{enumerate} \item Where it is defined; \item Functions which call this function; \item Functions called by it; \item Non-local variables used. \end{enumerate} \end{enumerate} The program will also check that functions are called with the correct number of arguments, and print a diagnostic message otherwise. The output is alphabetized on the first seven characters of each function name. \subsection{Restrictions} Algebraic procedures in {\REDUCE} are treated as if they were symbolic, so that algebraic constructs will actually appear as calls to symbolic functions, such as {\tt AEVAL}. \subsection{Usage} To invoke the cross reference program, the switch {\tt CREF} \ttindex{CREF} is used. {\tt on cref} causes the cref program to load and the cross-referencing process to begin. After all the required definitions are loaded, {\tt off cref} will cause the cross-reference listing to be produced. For example, if you wish to cross-reference all functions in the file {\tt tst.red}, and produce the cross-reference listing in the file {\tt tst.crf}, the following sequence can be used: {\small\begin{verbatim} out "tst.crf"; on cref; in "tst.red"$ off cref; shut "tst.crf"; \end{verbatim}} To process more than one file, more {\tt IN} statements may be added before the call of {\tt off cref}, or the {\tt IN} statement changed to include a list of files. \subsection{Options} Functions with the flag {\tt NOLIST} will not be examined or output. Initially, all Standard Lisp functions are so flagged. (In fact, they are kept on a list {\tt NOLIST!*}, so if you wish to see references to {\em all} functions, then {\tt CREF} should be first loaded with the command {\tt load cref}, and this variable then set to {\tt NIL}). It should also be remembered that any macros with the property list flag {\tt EXPAND}, or, if the switch {\tt FORCE} is on, without the property list flag {\tt NOEXPAND}, will be expanded before the definition is seen by the cross-reference program, so this flag can also be used to select those macros you require expanded and those you do not. \section{Prettyprinting Reduce Expressions}\index{Prettyprinting} {\REDUCE} includes a module for printing {\REDUCE} syntax in a standard format. This module is activated by the switch {\tt PRET}, \ttindex{PRET} which is normally off. Since the system converts algebraic input into an equivalent symbolic form, the printing program tries to interpret this as an algebraic expression before printing it. In most cases, this can be done successfully. However, there will be occasional instances where results are printed in symbolic mode form that bears little resemblance to the original input, even though it is formally equivalent. If you want to prettyprint a whole file, say {\tt off output,msg;} \ttindex{MSG} and (hopefully) only clean output will result. Unlike {\tt DEFN},\ttindex{DEFN} input is also evaluated with {\tt PRET} \ttindex{PRET} on. \section{Prettyprinting Standard Lisp S-Expressions}\index{Prettyprinting} REDUCE includes a module for printing S-expressions in a standard format. The Standard Lisp function for this purpose is {\tt PRETTYPRINT}\ttindex{PRETTYPRINT} which takes a Lisp expression and prints the formatted equivalent. Users can also have their {\REDUCE} input printed in this form by use of the switch {\tt DEFN}.\ttindex{DEFN} This is in fact a convenient way to convert {\REDUCE} (or Rlisp) syntax into Lisp. {\tt off msg;} will prevent warning messages from being printed. NOTE: When {\tt DEFN} is on, input is not evaluated. \chapter {Maintaining {\REDUCE}} {\REDUCE} continues to evolve both in terms of the number of facilities available, and the power of the individual facilities. Corrections are made as bugs are discovered, and awkward features simplified. In order to provide users with easy access to such enhancements, a {\em {\REDUCE} network library\/} has been established from which material can be extracted by anyone with electronic mail access to the Internet computer network. In addition to miscellaneous documents, source and utility files, the library includes a bibliography of papers referencing {\REDUCE} which contains over 800 entries. Instructions on using this library are sent to all registered {\REDUCE} users who provide a network address. If you would like a more complete list of the contents of the library, send to {\em reduce-netlib@rand.org\/} the single line message {\em send index\/} or {\em help}. The current {\REDUCE} information package can be obtained from the network library by including on a separate line {\em send info-package\/} and a demonstration file by including the line {\em send demonstration}. If you prefer, hard copies of the information package and the bibliography are available from the {\REDUCE} secretary at RAND, 1700 Main Street, P.O. Box 2138, Santa Monica, CA 90407-2138 ({\em reduce@rand.org}). Copies of the network library are also maintained at other addresses. At the time of writing, {\em reduce-netlib@can.nl\/} and {\em reduce-netlib@pi.cc.u-tokyo.ac.jp\/} may also be used instead of {\em reduce-netlib@rand.org}. A World Wide Web {\REDUCE} server with URL {\small\begin{verbatim} http://www.rrz.uni-koeln.de/REDUCE/ \end{verbatim}} is also supported. In addition to general information about {\REDUCE}, this server has pointers to the network library, the demonstration versions, examples of {\REDUCE} programming, a set of manuals, and the {\REDUCE} online help system. Finally, there is a {\REDUCE} electronic forum accessible from the same networks. This enables {\REDUCE} users to raise questions and discuss ideas concerning the use and development of {\REDUCE} with other users. Additions and changes to the network library and new releases of {\REDUCE} are also announced in this forum. Any user with appropriate electronic mail access is encouraged to register for membership in this forum. To do so, send a message requesting inclusion to \\ {\em reduce-forum-request@rand.org}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% BeginCodemist %%% Taken from Reduce.sty % \s{...} is a sentential form in descriptions. Enclosed \em text in <...> \newcommand{\s}[1] {$<${\em #1}$>$} % \meta{...} is an alternative sentential form in descriptions using \it. %\newcommand{\meta}[1]{\mbox{$\langle$\it#1\/$\rangle$}} % \k{...} is a keyword. Just do in bold for the moment. \renewcommand{\k}[1] {{\bf #1}} % \f is a function name. Just do this as tt. \newcommand{\f}[1] {{\tt #1}} % An example macro for numbering and indenting examples. \newcounter{examplectr} \newcommand{\example}{\refstepcounter{examplectr} \noindent{\bf Example \theexamplectr}} \part{Additional {\REDUCE} Documentation} \setcounter{examplectr}{0} The documentation in this section was written using to a large part the \LaTeX\ files provided by the authors, and distributed with \REDUCE. There has been extensive editing and much rewriting, so the responsibility for this part of the manual rests with the editor, John Fitch. It is hoped that this version of the documentation contains sufficient information about the facilities available that a user may be able to progress. It deliberately avoids discussions of algorithms or advanced use; for these the package author's own documentation should be consulted. In general the package documentation will contain more examples and in some cases additional facilities such as tracing. \chapter{ALGINT: Integration of square roots} \label{ALGINT} \typeout{{ALGINT: Integration of square roots}} {\footnotesize \begin{center} James Davenport \\ School of Mathematical Sciences \\ University of Bath \\ Bath BA2 7AY \\ England \\[0.05in] e--mail: jhd@maths.bath.ac.uk \end{center} } The package supplies no new functions, but extends the {\tt INT}\ttindex{INT} operator for indefinite integration so it can handle a wider range of expressions involving square roots. When it is loaded the controlling switch {\tt ALGINT}\ttindex{ALGINT} is turned on. If it is desired to revert to the standard integrator, then it may be turned off. The normal integrator can deal with some square roots but in an unsystematic fashion. {\small\begin{verbatim} 1: load_package algint; 2: int(sqrt(sqrt(a^2+x^2)+x)/x,x); 2 2 sqrt(a)*atan((sqrt(a)*sqrt(sqrt(a + x ) + x) 2 2 *sqrt(a + x ) 2 2 - sqrt(a)*sqrt(sqrt(a + x ) + x)*a 2 2 - sqrt(a)*sqrt(sqrt(a + x ) + x)*x)/(2 \end{verbatim}} \newpage {\small\begin{verbatim} 2 2 2 *a )) + 2*sqrt(sqrt(a + x ) + x) 2 2 + sqrt(a)*log(sqrt(sqrt(a + x ) + x) - sqrt(a)) 2 2 - sqrt(a)*log(sqrt(sqrt(a + x ) + x) + sqrt(a)) 3: off algint; 4: int(sqrt(sqrt(a^2+x^2)+x)/x,x); 2 2 sqrt(sqrt(a + x ) + x) int(-------------------------,x) x \end{verbatim}} There is also a switch {\tt TRA},\ttindex{TRA} which may be set on to provide detailed tracing of the algorithm used. This is not recommended for casual use. \chapter[APPLYSYM: Infinitesimal symmetries]{APPLYSYM: Infinitesimal symmetries of differential equations} \label{APPLYSYM} \typeout{[APPLYSYM: Infinitesimal symmetries]} {\footnotesize \begin{center} Thomas Wolf \\ School of Mathematical Sciences, Queen Mary and Westfield College \\ University of London \\ London E1 4NS, England \\[0.05in] e--mail: T.Wolf@maths.qmw.ac.uk \end{center} } The investigation of infinitesimal symmetries of differential equations (DEs) with computer algebra programs attracted considerable attention over the last years. The package {\tt APPLYSYM} concentrates on the implementation of applying symmetries for calculating similarity variables to perform a point transformation which lowers the order of an ODE or effectively reduces the number of explicitly occuring independent variables of a PDE(-system) and for generalising given special solutions of ODEs/PDEs with new constant parameters. A prerequisite for applying symmetries is the solution of first order quasilinear PDEs. The corresponding program {\tt QUASILINPDE}\ttindex{QUASILINPDE} can as well be used without {\tt APPLYSYM}\ttindex{APPLYSYM} for solving first order PDEs which are linear in their first order derivative and otherwise at most rationally non-linear. The following two PDEs are equations (2.40) and (3.12) taken from E. Kamke, "Loesungsmethoden und Loesungen von Differential- gleichungen, Partielle Differentialgleichungen erster Ordnung", B.G. Teubner, Stuttgart (1979). \newpage {\small {\small\begin{verbatim} ------------------------ Equation 2.40 ------------------------ 2 3 4 The quasilinear PDE: 0 = df(z,x)*x*y + 2*df(z,y)*y - 2*x 2 2 2 + 4*x *y*z - 2*y *z . The equivalent characteristic system: 3 4 2 2 2 0=2*(df(z,y)*y - x + 2*x *y*z - y *z ) 2 0=y *(2*df(x,y)*y - x) for the functions: x(y) z(y) . The general solution of the PDE is given through 4 2 2 log(y)*x - log(y)*x *y*z - y *z sqrt(y)*x 0 = ff(----------------------------------,-----------) 4 2 y x - x *y*z with arbitrary function ff(..). ------------------------ Equation 3.12 ------------------------ The quasilinear PDE: 0 = df(w,x)*x + df(w,y)*a*x + df(w,y)*b*y + df(w,z)*c*x + df(w,z)*d*y + df(w,z)*f*z. The equivalent characteristic system: 0=df(w,x)*x 0=df(z,x)*x - c*x - d*y - f*z 0=df(y,x)*x - a*x - b*y for the functions: z(x) y(x) w(x) . The general solution of the PDE is given through a*x + b*y - y 0 = ff(---------------,( - a*d*x + b*c*x + b*f*z - b*z - c*f*x b b x *b - x 2 f f f 2 f - d*f*y + d*y - f *z + f*z)/(x *b*f - x *b - x *f + x *f) ,w) with arbitrary function ff(..). \end{verbatim}} } The program {\tt DETRAFO}\ttindex{DETRAFO} can be used to perform point transformations of ODEs/PDEs (and -systems). For detailed explanations the user is referred to the paper {\em Programs for Applying Symmetries of PDEs} by Thomas Wolf, supplied as part of the Reduce documentation as {\tt applysym.tex} and published in the Proceedings of ISSAC'95 - 7/95 Montreal, Canada, ACM Press (1995). \chapter{ARNUM: An algebraic number package} \label{ARNUM} \typeout{{ARNUM: An algebraic number package}} {\footnotesize \begin{center} Eberhard Schr\"{u}fer \\ Institute SCAI.Alg \\ German National Research Center for Information Technology (GMD) \\ Schloss Birlinghoven \\ D-53754 Sankt Augustin, Germany \\[0.05in] e--mail: schruefer@gmd.de \end{center} } Algebraic numbers are the solutions of an irreducible polynomial over some ground domain. \index{i} The algebraic number $i$ (imaginary unit),\index{imaginary unit} for example, would be defined by the polynomial $i^2 + 1$. The arithmetic of algebraic number $s$ can be viewed as a polynomial arithmetic modulo the defining polynomial. The {\tt ARNUM}\ttindex{ARNUM} package provides a mechanism to define other algebraic numbers, and compute with them. \section{DEFPOLY}\ttindex{DEFPOLY} {\tt DEFPOLY} takes as its argument the defining polynomial for an algebraic number, or a number of defining polynomials for different algebraic numbers, and arranges that arithmetic with the new symbol(s) is performed relative to these polynomials. {\small\begin{verbatim} load_package arnum; defpoly sqrt2**2-2; 1/(sqrt2+1); SQRT2 - 1 (x**2+2*sqrt2*x+2)/(x+sqrt2); X + SQRT2 on gcd; (x**3+(sqrt2-2)*x**2-(2*sqrt2+3)*x-3*sqrt2)/(x**2-2); 2 X - 2*X - 3 -------------- X - SQRT2 off gcd; sqrt(x**2-2*sqrt2*x*y+2*y**2); ABS(X - SQRT2*Y) \end{verbatim}} The following example introduces both $\sqrt 2$ and $5^{1 \over 3}$: {\small\begin{verbatim} defpoly sqrt2**2-2,cbrt5**3-5; *** defining polynomial for primitive element: 6 4 3 2 A1 - 6*A1 - 10*A1 + 12*A1 - 60*A1 + 17 sqrt2; 5 4 3 2 48/1187*A1 + 45/1187*A1 - 320/1187*A1 - 780/1187*A1 + 735/1187*A1 - 1820/1187 sqrt2**2; 2 \end{verbatim}} \section{SPLIT\_FIELD}\ttindex{SPLIT\_FIELD} The function {\tt SPLIT\_FIELD} calculates a primitive element of minimal degree for which a given polynomial splits into linear factors. {\small\begin{verbatim} split_field(x**3-3*x+7); *** Splitting field is generated by: 6 4 2 A5 - 18*A5 + 81*A5 + 1215 4 2 {1/126*A5 - 5/42*A5 - 1/2*A5 + 2/7, 4 2 - (1/63*A5 - 5/21*A5 + 4/7), 4 2 1/126*A5 - 5/42*A5 + 1/2*A5 + 2/7} for each j in ws product (x-j); 3 X - 3*X + 7 \end{verbatim}} \chapter{ASSIST: Various Useful Utilities} \label{ASSIST} \typeout{{ASSIST: Various Useful Utilities}} {\footnotesize \begin{center} Hubert Caprasse \\ D\'epartement d'Astronomie et d'Astrophysique \\ Institut de Physique, B--5, Sart Tilman \\ B--4000 LIEGE 1, Belgium\\[0.05in] e--mail: caprasse@vm1.ulg.ac.be \end{center} } The {\tt ASSIST}\ttindex{ASSIST} package provides a number of general purpose functions which adapt \REDUCE\ to various calculational strategies. All the examples in this section require the {\tt ASSIST} package to be loaded. \section{Control of Switches} The two functions \f{SWITCHES, SWITCHORG} \ttindex{SWITCHES}\ttindex{SWITCHORG} have no argument and are called as if they were mere identifiers. \f{SWITCHES} displays the current status of the most often used switches when manipulating rational functions; {\tt EXP}, {\tt DIV}, {\tt MCD}, {\tt GCD}, {\tt ALLFAC}, {\tt INTSTR}, {\tt RAT}, {\tt RATIONAL}, {\tt FACTOR}. The switch {\tt DISTRIBUTE} which controls the handling of distributed polynomials is included as well (see section~\ref{DISTRIBUTE}). \f{SWITCHORG} resets (almost) {\em all} switches in the status they have when {\bf entering} into \REDUCE. (See also {\tt RESET}, chapter~\ref{RESET}\ttindex{RESET}). The new switch {\tt DISTRIBUTE} facilitates changing polynomials to a distributed form. \section{Manipulation of the List Structure} Functions for list manipulation are provided and are generalised to deal with the new structure {\tt BAG}. \begin{itemize} \item[i.] Generation of a list of length $n$ with its elements initialised to 0 and also to append to a list $l$ sufficient zeros to make it of length $n$:\ttindex{MKLIST} {\small\begin{verbatim} MKLIST n; %% n is an INTEGER MKLIST(l,n); %% l is List-like, n is an INTEGER \end{verbatim}} \item[ii.] Generation of a list of sublists of length $n$ containing $p$ elements equal to $0$ and $n-p$ elements equal to $1$. {\small\begin{verbatim} SEQUENCES 2; ==> {{0,0},{0,1},{1,0},{1,1}} \end{verbatim}} The function \f{KERNLIST}\ttindex{KERNLIST} transforms any prefix of a kernel into the {\bf \verb+list+} prefix. The output list is a copy: {\small\begin{verbatim} KERNLIST (<kernel>); ==> {<kernel arguments>} \end{verbatim}} There are four functions to delete elements from lists. The \f{DELETE} function deletes the first occurrence of its first argument from the second, while \f{REMOVE} removes a numbered element. \f{DELETE\_ALL} eliminates from a list {\em all} elements equal to its first argument. \f{DELPAIR} acts on list of pairs and eliminates from it the {\em first} pair whose first element is equal to its first argument:\ttindex{DELETE}\ttindex{REMOVE}\ttindex{DELETE\_ALL}\ttindex{DELPAIR} {\small\begin{verbatim} DELETE(x,{a,b,x,f,x}); ==> {a,b,f,x} REMOVE({a,b,x,f,x},3); ==> {a,b,f,x} DELETE_ALL(x,{a,b,x,f,x}); ==> {a,b,f} DELPAIR(a,{{a,1},{b,2},{c,3}}; ==> {{b,2},{c,3}} \end{verbatim}} \item[iv.] The function \f{ELMULT}\ttindex{ELMULT} returns an {\em integer} which is the {\em multiplicity} of its first argument in the list which is its second argument. The function \f{FREQUENCY}\ttindex{FREQUENCY} gives a list of pairs whose second element indicates the number of times the first element appears inside the original list: {\small\begin{verbatim} ELMULT(x,{a,b,x,f,x}) ==> 2 FREQUENCY({a,b,c,a}); ==> {{a,2},{b,1},{c,1}} \end{verbatim}} \item[v.] The function \f{INSERT}\ttindex{INSERT} inserts a given object into a list at the wanted position. The functions \f{INSERT\_KEEP\_ORDER}\ttindex{INSERT\_KEEP\_ORDER} and \f{MERGE\_LIST}\ttindex{MERGE\_LIST} keep a given ordering when inserting one element inside a list or when merging two lists. Both have 3 arguments. The last one is the name of a binary boolean ordering function: {\small\begin{verbatim} ll:={1,2,3}$ INSERT(x,ll,3); ==> {1,2,x,3} INSERT_KEEP_ORDER(5,ll,lessp); ==> {1,2,3,5} MERGE_LIST(ll,ll,lessp); ==> {1,1,2,2,3,3} \end{verbatim}} \item[vi.] Algebraic lists can be read from right to left or left to right. They {\em look} symmetrical. It is sometimes convenient to have functions which reflect this. So, as well as \f{FIRST} and \f{REST} this package provides the functions \f{LAST}\ttindex{LAST} and \f{BELAST}\ttindex{BELAST}. \f{LAST} gives the last element of the list while \f{BELAST} gives the list {\em without} its last element. \\ Various additional functions are provided. They are: \f{CONS}, \f{(.)}, \f{POSITION}, \f{DEPTH}, \f{PAIR}, \f{APPENDN}, \f{REPFIRST}, \f{REPLAST} \ttindex{CONS}\ttindex{.}\ttindex{POSITION}\ttindex{DEPTH} \ttindex{PAIR}\ttindex{APPENDN}\ttindex{REPLAST}\ttindex{REPLAST} The token ``dot'' needs a special comment. It corresponds to several different operations. \begin{enumerate} \item If one applies it on the left of a list, it acts as the \f{CONS} function. Note however that blank spaces are required around the dot: {\small\begin{verbatim} 4 . {a,b}; ==> {4,a,b} \end{verbatim}} \item If one applies it on the right of a list, it has the same effect as the \f{PART} operator: {\small\begin{verbatim} {a,b,c}.2; ==> b \end{verbatim}} \item If one applies it on 4--dimensional vectors, it acts as in the HEPHYS package (chapter~\ref{HEPHYS} \end{enumerate} \f{POSITION} returns the position of the first occurrence of x in a list or a message if x is not present in it. \f{DEPTH} returns an {\em integer} equal to the number of levels where a list is found if and only if this number is the {\em same} for each element of the list otherwise it returns a message telling the user that list is of {\em unequal depth}. \f{PAIR} has two arguments which must be lists. It returns a list whose elements are {\em lists of two elements.} The $n^{th}$ sublist contains the $n^{th}$ element of the first list and the $n^{th}$ element of the second list. These types of lists are called {\em association lists} or ALISTS in the following. \f{APPENDN} has {\em any} number of lists as arguments, and appends them all. \f{REPFIRST} has two arguments. The first one is any object, the second one is a list. It replaces the first element of the list by the object. \f{REPREST} has also two arguments. It replaces the rest of the list by its first argument and returns the new list without destroying the original list. {\small\begin{verbatim} ll:={{a,b}}$ ll1:=ll.1; ==> {a,b} ll.0; ==> list 0 . ll; ==> {0,{a,b}} DEPTH ll; ==> 2 PAIR(ll1,ll1); ==> {{a,a},{b,b}} REPFIRST{new,ll); ==> {new} ll3:=APPENDN(ll1,ll1,ll1); ==> {a,b,a,b,a,b} POSITION(b,ll3); ==> 2 REPREST(new,ll3); ==> {a,new} \end{verbatim}} \item[vii.] The functions \f{ASFIRST}\ttindex{ASFIRST}, \f{ASLAST}\ttindex{ASLAST}, \f{ASREST}\ttindex{ASREST}, \f{ASFLIST}\ttindex{ASFLIST}, \f{ASSLIST}\ttindex{ASSLIST}, and \f{RESTASLIST}\ttindex{RESTASLIST} act on ALISTS or on list of lists of well defined depths and have two arguments. The first is the key object which one seeks to associate in some way to an element of the association list which is the second argument. \f{ASFIRST} returns the pair whose first element is equal to the first argument. \f{ASLAST} returns the pair whose last element is equal to the first argument. \f{ASREST} needs a {\em list} as its first argument. The function seeks the first sublist of a list of lists (which is its second argument) equal to its first argument and returns it. \f{RESTASLIST} has a {\em list of keys} as its first arguments. It returns the collection of pairs which meet the criterion of \f{ASREST}. \f{ASFLIST} returns a list containing {\em all pairs} which satisfy to the criteria of the function \f{ASFIRST}. So the output is also an ALIST or a list of lists. \f{ASSLIST} returns a list which contains {\em all pairs} which have their second element equal to the first argument. {\small\begin{verbatim} lp:={{a,1},{b,2},{c,3}}$ ASFIRST(a,lp); ==> {a,1} ASLAST(1,lp); ==> {a,1} ASREST({1},lp); ==> {a,1} RESTASLIST({a,b},lp); ==> {{1},{2}} lpp:=APPEND(lp,lp)$ ASFLIST(a,lpp); ==> {{a,1},{a,1}} ASSLIST(1,lpp); ==> {{a,1},{a,1}} \end{verbatim}} \end{itemize} \section{The Bag Structure and its Associated Functions} The LIST structure of \REDUCE\ is very convenient for manipulating groups of objects which are, {\em a priori}, unknown. This structure is endowed with other properties such as ``mapping'' {\em i.e.\ }the fact that if \verb+OP+ is an operator one gets, by default, {\small\begin{verbatim} OP({x,y}); ==> {OP(x),OP(y)} \end{verbatim}} It is not permitted to submit lists to the operations valid on rings so that lists cannot be indeterminates of polynomials. Frequently procedure arguments cannot be lists. At the other extreme, so to say, one has the \verb+KERNEL+ structure associated to the algebraic declaration \verb+operator+. This structure behaves as an ``unbreakable'' one and, for that reason, behaves like an ordinary identifier. It may generally be bound to all non-numeric procedure parameters and it may appear as an ordinary indeterminate inside polynomials. \\ The \verb+BAG+ structure is intermediate between a list and an operator. From the operator it borrows the property to be a \verb+KERNEL+ and, therefore, may be an indeterminate of a polynomial. From the list structure it borrows the property to be a {\em composite} object.\\[5pt] \mbox{\underline{{\bf Definition}:\hfill}}\\[4pt] A bag is an object endowed with the following properties: \begin{enumerate} \item It is a \verb+KERNEL+ composed of an atomic prefix (its envelope) and its content (miscellaneous objects). \item Its content may be changed in an analogous way as the content of a list. During these manipulations the name of the bag is {\em conserved}. \item Properties may be given to the envelope. For instance, one may declare it \verb+NONCOM+ or \verb+SYMMETRIC+ etc.\ $\ldots$ \end{enumerate} \vspace{5pt} \mbox{\underline{{\bf Available Functions}:\hfill}} \begin{itemize} \item[i.] A default bag envelope \verb+BAG+\index{BAG} is defined. It is a reserved identifier. An identifier other than \verb+LIST+ or one which is already associated with a boolean function may be defined as a bag envelope through the command \f{PUTBAG}\ttindex{PUTBAG}. In particular, any operator may also be declared to be a bag. {\bf When and only when} the identifier is not an already defined function does \f{PUTBAG} puts on it the property of an OPERATOR PREFIX. The command: {\small\begin{verbatim} PUTBAG id1,id2,....idn; \end{verbatim}} declares \verb+id1,.....,idn+ as bag envelopes. Analogously, the command\ttindex{CLEARBAG} {\small\begin{verbatim} CLEARBAG id1,...idn; \end{verbatim}} eliminates the bag property on \verb+id1,...,idn+. \item[ii.] The boolean function \f{BAGP}\ttindex{BAGP} detects the bag property. {\small\begin{verbatim} aa:=bag(x,y,z)$ if BAGP aa then "ok"; ==> ok \end{verbatim}} \item[iii.] Most functions defined above for lists do also work for bags. Moreover functions subsequently defined for SETS (see section~\ref{A-SETS}) also work. However, because of the conservation of the envelope, they act somewhat differently. {\small\begin{verbatim} PUTBAG op; ==> T aa:=op(x,y,z)$ FIRST op(x,y,z); ==> op(x) REST op(x,y,z); ==> op(y,z) BELAST op(x,y,z); ==> op(x,y) APPEND(aa,aa); ==> op(x,y,z,x,y,z) LENGTH aa; ==> 3 DEPTH aa; ==> 1 \end{verbatim}} When ``appending'' two bags with {\em different} envelopes, the resulting bag gets the name of the one bound to the first parameter of \f{APPEND}. The function \f{LENGTH} gives the actual number of variables on which the operator (or the function) depends. The NAME of the ENVELOPE is kept by the functions \f{FIRST}, \f{SECOND}, \f{LAST} and \f{BELAST}. \item[iv.] The connection between the list and the bag structures is made easy thanks to \f{KERNLIST} which transforms a bag into a list and thanks to the coercion function \f{LISTBAG}\ttindex{LISTBAG}. This function has 2 arguments and is used as follows: {\small\begin{verbatim} LISTBAG(<list>,<id>); ==> <id>(<arg_list>) \end{verbatim}} The identifier \verb+<id>+ if allowed is automatically declared as a bag envelope or an error message is generated. Finally, two boolean functions which work both for bags and lists are provided. They are \f{BAGLISTP}\ttindex{BAGLISTP} and \f{ABAGLISTP}\ttindex{ABAGLISTP}. They return T or NIL (in a conditional statement) if their argument is a bag or a list for the first one, if their argument is a list of sublists or a bag containing bags for the second one. \end{itemize} \section{Sets and their Manipulation Functions} \label{A-SETS} The ASSIST package makes the Standard LISP set functions available in algebraic mode and also {\em generalises} them so that they can be applied on bag--like objects as well. \begin{itemize} \item[i.] The constructor \f{MKSET}\ttindex{MKSET} transforms a list or bag into a set by eliminating duplicates. {\small\begin{verbatim} MKSET({1,a,a1}); ==> {1,a} MKSET bag(1,a,a1); ==> bag(1,a) \end{verbatim}} \f{SETP}\ttindex{SETP} is a boolean function which recognises set--like objects. \item[ii.] The standard functions are \f{UNION}\ttindex{UNION}, \f{INTERSECT}\ttindex{INTERSECT}, \f{DIFFSET}\ttindex{DIFFSET} and \f{SYMDIFF}\ttindex{SYMDIFF}. They have two arguments which must be sets; otherwise an error message is issued. \end{itemize} \section{General Purpose Utility Functions} \begin{itemize} \item[i.] The functions \f{MKIDNEW}\ttindex{MKIDNEW}, \f{DELLASTDIGIT}\ttindex{DELLASTDIGIT}, \f{DETIDNUM}\ttindex{DETIDNUM}, \f{LIST\_TO\_IDS}\ttindex{LIST\_TO\_IDS} handle identifiers. \f{MKIDNEW}\ttindex{MKIDNEW} is a variant of \f{MKID}. \f{MKIDNEW} has either 0 or 1 argument. It generates an identifier which has not yet been used before. {\small\begin{verbatim} MKIDNEW(); ==> g0001 MKIDNEW(a); ==> ag0002 \end{verbatim}} \f{DELLASTDIGIT} takes an integer as argument, it strips it from its last digit. {\small\begin{verbatim} DELLASTDIGIT 45; ==> 4 \end{verbatim}} \f{DETIDNUM}, determines the trailing integer from an identifier. It is convenient when one wants to make a do loop starting from a set of indices $ a_1, \ldots , a_{n} $. {\small\begin{verbatim} DETIDNUM a23; ==> 23 \end{verbatim}} \f{LIST\_to\_IDS} generalises the function \f{MKID} to a list of atoms. It creates and interns an identifier from the concatenation of the atoms. The first atom cannot be an integer. {\small\begin{verbatim} LIST_TO_IDS {a,1,id,10}; ==> a1id10 \end{verbatim}} The function \f{ODDP}\ttindex{ODDP} detects odd integers. The function \f{FOLLOWLINE}\ttindex{FOLLOWLINE} is convenient when using the function \f{PRIN2} for controlling layout. {\small\begin{verbatim} <<prin2 2; prin2 5>>$ 25 <<prin2 2; followline(3); prin2 5>>$ 2 5 \end{verbatim}} The function \f{RANDOMLIST}\ttindex{RANDOMLIST} generates a list of positive random numbers. It takes two arguments which are both integers. The first one indicates the range inside which the random numbers are chosen. The second one indicates how many numbers are to be generated. {\small\begin{verbatim} RANDOMLIST(10,5); ==> {2,1,3,9,6} \end{verbatim}} \f{MKRANDTABL}\ttindex{MKRANDTABL} generates a table of random numbers. This table is either a one or two dimensional array. The base of random numbers may be either an integer or a floating point number. In this latter case the switch \f{rounded} must be ON. The function has three arguments. The first is either a one integer or a two integer list. The second is the base chosen to generate the random numbers. The third is the chosen name for the generated array. In the example below a two-dimensional table of integer random numbers is generated as array elements of the identifier {\f ar}. {\small\begin{verbatim} MKRANDTABL({3,4},10,ar); ==> *** array ar redefined {3,4} \end{verbatim}} The output is the array dimension. \f{COMBNUM(n,p)}\ttindex{COMBNUM} gives the number of combinations of $n$ objects taken $p$ at a time. It has the two integer arguments $n$ and $p$. \f{PERMUTATIONS(n)}\ttindex{PERMUTATIONS} gives the list of permutations on $n$ objects, each permutation being represented as a list. \f{CYCLICPERMLIST}\ttindex{CYCLICPERMLIST} gives the list of {\em cyclic} permutations. For both functions, the argument may also be a {\tt bag}. {\small\begin{verbatim} PERMUTATIONS {1,2} ==> {{1,2},{2,1}} CYCLICPERMLIST {1,2,3} ==> {{1,2,3},{2,3,1},{3,1,2}} \end{verbatim}} \f{COMBINATIONS}\ttindex{COMBINATIONS} gives a list of combinations on $n$ objects taken $p$ at a time. The first argument is a list (or a bag) and the second is the integer $p$. {\small\begin{verbatim} COMBINATIONS({1,2,3},2) ==> {{2,3},{1,3},{1,2}} \end{verbatim}} \f{REMSYM}\ttindex{REMSYM} is a command that erases the \REDUCE\ commands {\tt symmetric} or {\tt antisymmetric}. \f{SYMMETRIZE}\ttindex{SYMMETRIZE} is a powerful function which generate a symmetric expression. It has 3 arguments. The first is a list (or a list of lists) containing the expressions which will appear as variables for a kernel. The second argument is the kernel-name and the third is a permutation function which either exist in the algebraic or in the symbolic mode. This function may have been constructed by the user. Within this package the two functions \f{PERMUTATIONS} and \f{CYCLICPERMLIST} may be used. {\small\begin{verbatim} ll:={a,b,c}$ SYMMETRIZE(ll,op,cyclicpermlist); ==> OP(A,B,C) + OP(B,C,A) + OP(C,A,B) SYMMETRIZE(list ll,op,cyclicpermlist); ==> OP({A,B,C}) + OP({B,C,A}) + OP({C,A,B}) \end{verbatim}} Notice that taking for the first argument a list of lists gives rise to an expression where each kernel has a {\em list as argument}. Another peculiarity of this function is that, unless a pattern matching is made on the operator \verb+OP+, it needs to be reevaluated. Here is an illustration: {\small\begin{verbatim} op(a,b,c):=a*b*c$ SYMMETRIZE(ll,op,cyclicpermlist); ==> OP(A,B,C) + OP(B,C,A) + OP(C,A,B) for all x let op(x,a,b)=sin(x*a*b); SYMMETRIZE(ll,op,cyclicpermlist); ==> OP(B,C,A) + SIN(A*B*C) + OP(A,B,C) \end{verbatim}} The functions \f{SORTNUMLIST}\ttindex{SORTNUMLIST} and \f{SORTLIST}\ttindex{SORTLIST} are functions which sort lists. They use {\em bubblesort} and {\em quicksort} algorithms. \f{SORTNUMLIST} takes as argument a list of numbers. It sorts it in increasing order. \f{SORTLIST} is a generalisation of the above function. It sorts the list according to any well defined ordering. Its first argument is the list and its second argument is the ordering function. The content of the list is not necessary numbers but must be such that the ordering function has a meaning. {\small\begin{verbatim} l:={1,3,4,0}$ SORTNUMLIST l; ==> {0,1,3,4} ll:={1,a,tt,z}$ SORTLIST(ll,ordp); ==> {a,z,tt,1} \end{verbatim}} Note: using these functions for kernels or bags may be dangerous since they are destructive. If it is needed, it is recommended first to apply \f{KERNLIST} on them. The function \f{EXTREMUM}\ttindex{EXTREMUM} is a generalisation of the functions \f{MIN} and \f{MAX} to include general orderings. It is a 2 arguments function. The first is the list and the second is the ordering function. With the list \verb+ll+ defined in the last example, one gets {\small\begin{verbatim} EXTREMUM(ll,ordp); ==> 1 \end{verbatim}} \item[iii.] There are four functions to identify dependencies. \f{FUNCVAR}\ttindex{FUNCVAR} takes any expression as argument and returns the set of variables on which it depends. Constants are eliminated. {\small\begin{verbatim} FUNCVAR(e+pi+sin(log(y)); ==> {y} \end{verbatim}} \f{DEPATOM}\ttindex{DEPATOM} has an {\bf atom} as argument. It returns its argument if it is a number or if no dependency has previously been declared. Otherwise, it returns the list of variables on which in depends as declared in various {\tt DEPEND} declarations. {\small\begin{verbatim} DEPEND a,x,y; DEPATOM a; ==> {x,y} \end{verbatim}} The functions \f{EXPLICIT}\ttindex{EXPLICIT} and \f{IMPLICIT}\ttindex{IMPLICIT} make explicit or implicit the dependencies. {\small\begin{verbatim} depend a,x; depend x,y,z; EXPLICIT a; ==> a(x(y,z)) IMPLICIT ws; ==> a \end{verbatim}} These are useful when one does not know the names of the variables and (or) the nature of the dependencies. \f{KORDERLIST}\ttindex{KORDERLIST} is a zero argument function which display the actual ordering. {\small\begin{verbatim} KORDER x,y,z; KORDERLIST; ==> (x,y,z) \end{verbatim}} \item[iv.] A function \f{SIMPLIFY}\ttindex{SIMPLIFY} which takes an arbitrary expression is available which {\em forces} down-to-the-bottom simplification of an expression. It is useful with \f{SYMMETRIZE}. It has also proved useful to simplify some output expressions of the package EXCALC (chapter~\ref{EXCALC}). {\small\begin{verbatim} l:=op(x,y,z)$ op(x,y,z):=x*y*z$ SYMMETRIZE(l,op,cyclicpermlist); ==> op(x,y,z)+op(y,z,x)+op(z,x,y) SIMPLIFY ws; ==> op(y,z,x)+op(z,x,y)+x*y*z \end{verbatim}} \item[v.] Filtering functions for lists. \f{CHECKPROLIST}\ttindex{CHECKPROLIST} is a boolean function which checks if the elements of a list have a definite property. Its first argument is the list, and its second argument is a boolean function (\f{FIXP NUMBERP $\ldots$}) or an ordering function (as \f{ORDP}). \f{EXTRACTLIST}\ttindex{EXTRACTLIST} extracts from the list given as its first argument the elements which satisfy the boolean function given as its second argument. {\small\begin{verbatim} l:={1,a,b,"st")$ EXTRACTLIST(l,fixp); ==> {1} EXTRACTLIST(l,stringp); ==> {st} \end{verbatim}} \end{itemize} \section{Properties and Flags} It may be useful to provide analogous functions in algebraic mode to the properties and flags of LISP. Just using the symbolic mode functions to alter property lists of objects may easily destroy the integrity of the system. The functions which are here described {\bf do ignore} the property list and flags already defined by the system itself. They generate and track the {\em additional properties and flags} that the user issues using them. They offer the possibility of working on property lists in an algebraic context. \begin{description} \item[i. Flags] To a given identifier, one may associates another one linked to it ``in the background''. The three functions \f{PUTFLAG}\ttindex{PUTFLAG}, \f{DISPLAYFLAG}\ttindex{DISPLAYFLAG} and \f{CLEARFLAG}\ttindex{CLEARFLAG} handle them. \f{PUTFLAG} has 3 arguments. The first is the identifier or a list of identifiers, the second is the name of the flag, the third is T (true) or 0 (zero). When the third argument is T, it creates the flag, when it is 0 it destroys it. {\small\begin{verbatim} PUTFLAG(z1,flag_name,t); ==> flag_name PUTFLAG({z1,z2},flag1_name,t); ==> t PUTFLAG(z2,flag1_name,0); ==> \end{verbatim}} \f{DISPLAYFLAG} allows to extract flags. Continuing the example: {\small\begin{verbatim} DISPLAYFLAG z1; ==> {flag_name,flag1_name} DISPLAYFLAG z2; ==> {} \end{verbatim}} \f{CLEARFLAG} is a command which clears {\em all} flags associated to the identifiers $id_1, \ldots , id_n$. \item[ii. Properties] \f{PUTPROP}\ttindex{PUTPROP} has four arguments. The second argument is the {\em indicator} of the property. The third argument may be {\em any valid expression}. The fourth one is also T or 0. {\small\begin{verbatim} PUTPROP(z1,property,x^2,t); ==> z1 \end{verbatim}} In general, one enter {\small\begin{verbatim} PUTPROP(LIST(idp1,idp2,..),<propname>,<value>,T); \end{verbatim}} If the last argument is 0 then the property is removed. To display a specific property, one uses \f{DISPLAYPROP} which takes two arguments. The first is the name of the identifier, the second is the indicator of the property. {\small\begin{verbatim} 2 DISPLAYPROP(z1,property); ==> {property,x } \end{verbatim}} Finally, \f{CLEARPROP} is a nary commmand which clears {\em all} properties of the identifiers which appear as arguments. \end{description} \section{Control Functions} The ASSIST package also provides additional functions which improve the user control of the environment. \begin{itemize} \item[i.] The first set of functions is composed of unary and binary boolean functions. They are: {\small\begin{verbatim} ALATOMP x; x is anything. ALKERNP x; x is anything. DEPVARP(x,v); x is anything. (v is an atom or a kernel) \end{verbatim}} \f{ALATOMP}\ttindex{ALATOMP} has the value T iff x is an integer or an identifier {\em after} it has been evaluated down to the bottom. \f{ALKERNP}\ttindex{ALKERNP} has the value T iff x is a kernel {\em after} it has been evaluated down to the bottom. \f{DEPVARP}\ttindex{DEPVARP} returns T iff the expression x depends on v at {\bf any level}. The above functions together with \f{PRECP}\ttindex{PRECP} have been declared operator functions to ease the verification of their value. \f{NORDP}\ttindex{NORDP} is essentially equivalent to \verb+not+\f{ORDP} when inside a conditional statement. Otherwise, it can be used while \verb+not+\f{ORDP} cannot. \item[ii.] The next functions allow one to {\em analyse} and to {\em clean} the environment of \REDUCE\ which is created by the user while working interactively. Two functions are provided:\\ \f{SHOW}\ttindex{SHOW} allows to get the various identifiers already assigned and to see their type. \f{SUPPRESS}\ttindex{SUPPRESS} selectively clears the used identifiers or clears them all. It is to be stressed that identifiers assigned from the input of files are {\bf ignored}. Both functions have one argument and the same options for this argument: {\small\begin{verbatim} SHOW (SUPPRESS) all SHOW (SUPPRESS) scalars SHOW (SUPPRESS) lists SHOW (SUPPRESS) saveids (for saved expressions) SHOW (SUPPRESS) matrices SHOW (SUPPRESS) arrays SHOW (SUPPRESS) vectors (contains vector, index and tvector) SHOW (SUPPRESS) forms \end{verbatim}} The option \verb+all+ is the most convenient for \f{SHOW} but it may takes time to get the answer after one has worked several hours. When entering \REDUCE\ the option \verb+all+ for \f{SHOW} gives: {\small\begin{verbatim} SHOW all; ==> scalars are: NIL arrays are: NIL lists are: NIL matrices are: NIL vectors are: NIL forms are: NIL \end{verbatim}} It is a convenient way to remember the various options. Starting from a fresh environment {\small\begin{verbatim} a:=b:=1$ SHOW scalars; ==> scalars are: (A B) SUPPRESS scalars; ==> t SHOW scalars; ==> scalars are: NIL \end{verbatim}} \item[iii.] The \f{CLEAR}\ttindex{CLEAR} function of the system does not do a complete cleaning of \verb+OPERATORS+ and \verb+FUNCTIONS+. The following two functions do a more complete cleaning and, also automatically takes into account the {\em user} flag and properties that the functions \f{PUTFLAG} and \f{PUTPROP} may have introduced. Their names are \f{CLEAROP}\ttindex{CLEAROP} and \f{CLEARFUNCTIONS}\ttindex{CLEARFUNCTIONS}. \f{CLEAROP} takes one operator as its argument. \f{CLEARFUNCTIONS} is a nary command. If one issues {\small\begin{verbatim} CLEARFUNCTIONS a1,a2, ... , an $ \end{verbatim}} The functions with names \verb+ a1,a2, ... ,an+ are cleared. One should be careful when using this facility since the only functions which cannot be erased are those which are protected with the \verb+lose+ flag. \end{itemize} \section{Handling of Polynomials} The module contains some utility functions to handle standard quotients and several new facilities to manipulate polynomials. \begin{itemize} \item[i.] Two functions \f{ALG\_TO\_SYMB}\ttindex{ALG\_TO\_SYMB} and \f{SYMB\_TO\_ALG}\ttindex{SYMB\_TO\_ALG} allow the changing of an expression which is in the algebraic standard quotient form into a prefix lisp form and vice-versa. This is made in such a way that the symbol \verb+list+ which appears in the algebraic mode disappear in the symbolic form (there it becomes a parenthesis ``()'' ) and it is reintroduced in the translation from a symbolic prefix lisp expression to an algebraic one. The following example shows how the well-known lisp function \f{FLATTENS} can be trivially transportd into algebraic mode: {\small\begin{verbatim} algebraic procedure ecrase x; lisp symb_to_alg flattens1 alg_to_symb algebraic x; symbolic procedure flattens1 x; % ll; ==> ((A B) ((C D) E)) % flattens1 ll; (A B C D E) if atom x then list x else if cdr x then append(flattens1 car x, flattens1 cdr x) else flattens1 car x; \end{verbatim}} gives, for instance, {\small\begin{verbatim} ll:={a,{b,{c},d,e},{{{z}}}}$ ECRASE ll; ==> {A, B, C, D, E, Z} \end{verbatim}} \item[ii.] \f{LEADTERM}\ttindex{LEADTERM} and \f{REDEXPR}\ttindex{REDEXPR} are the algebraic equivalent of the symbolic functions \f{LT} and \f{RED}. They give the {\em leading term} and the {\em reductum} of a polynomial. They also work for rational functions. Their interest lies in the fact that they do not require to extract the main variable. They work according to the current ordering of the system: {\small\begin{verbatim} pol:=x+y+z$ LEADTERM pol; ==> x korder y,x,z; LEADTERM pol; ==> y REDEXPR pol; ==> x + z \end{verbatim}} By default, the representation of multivariate polynomials is recursive. With such a representation, the function \f{LEADTERM} does not necessarily extract a true monom. It extracts a monom in the leading indeterminate multiplied by a polynomial in the other indeterminates. However, very often one needs to handle true monoms separately. In that case, one needs a polynomial in {\em distributive} form. Such a form is provided by the package GROEBNER (chapter~\ref{GROEBNER}). The facility there may be too involved and the need to load an additional package can be a problem. So, a new switch is created to handle {\em distributed} polynomials. It is called {\tt DISTRIBUTE}\ttindex{DISTRIBUTE} and a new function \label{DISTRIBUTE} \f{DISTRIBUTE} puts a polynomial in distributive form. With the switch {\bf on}, \f{LEADTERM} gives {\bf true} monoms. \f{MONOM}\ttindex{MONOM} transforms a polynomial into a list of monoms. It works whatever the setting of the switch {\tt DISTRIBUTE}. \f{SPLITTERMS}\ttindex{SPLITTERMS} is analoguous to \f{MONOM} except that it gives a list of two lists. The first sublist contains the positive terms while the second sublist contains the negative terms. \f{SPLITPLUSMINUS}\ttindex{SPLITPLUSMINUS} gives a list whose first element is an expression of the positive part of the polynomial and its second element is its negative part. \item[iii.] Two complementary functions \f{LOWESTDEG}\ttindex{LOWESTDEG} and \f{DIVPOL}\ttindex{DIVPOL} are provided. The first takes a polynomial as its first argument and the name of an indeterminate as its second argument. It returns the {\em lowest degree} in that indeterminate. The second function takes two polynomials and returns both the quotient and its remainder. \end{itemize} \section{Handling of Transcendental Functions} The functions \f{TRIGREDUCE}\ttindex{TRIGREDUCE} and \f{TRIGEXPAND}\ttindex{TRIGEXPAND} and the equivalent ones for hyperbolic functions \f{HYPREDUCE}\ttindex{HYPREDUCE} and \f{HYPEXPAND}\ttindex{HYPEXPAND} make the transformations to multiple arguments and from multiple arguments to elementary arguments. {\small\begin{verbatim} aa:=sin(x+y)$ TRIGEXPAND aa; ==> SIN(X)*COS(Y) + SIN(Y)*COS(X) TRIGREDUCE ws; ==> SIN(Y + X) \end{verbatim}} When a trigonometric or hyperbolic expression is symmetric with respect to the interchange of {\tt SIN (SINH)} and {\tt COS (COSH)}, the application of \f{TRIG(HYP)REDUCE} may often lead to great simplifications. However, if it is highly asymmetric, the repeated application of \f{TRIG(HYP)REDUCE} followed by the use of \f{TRIG(HYP)EXPAND} will lead to {\em more} complicated but more symmetric expressions: {\small\begin{verbatim} aa:=(sin(x)^2+cos(x)^2)^3$ TRIGREDUCE aa; ==> 1 bb:=1+sin(x)^3$ TRIGREDUCE bb; ==> - SIN(3*X) + 3*SIN(X) + 4 --------------------------- 4 TRIGEXPAND ws; ==> 3 2 SIN(X) - 3*SIN(X)*COS(X) + 3*SIN(X) + 4 ------------------------------------------- 4 \end{verbatim}} See also the TRIGSIMP package (chapter~\ref{TRIGSIMP}). \section{Coercion from lists to arrays and converse} Sometimes when a list is very long and especially if frequent access to its elements are needed it is advantageous (temporarily) to transform it into an array. \f{LIST\_TO\_ARRAY}\ttindex{LIST\_TO\_ARRAY} has three arguments. The first is the list. The second is an integer which indicates the array dimension required. The third is the name of an identifier which will play the role of the array name generated by it. If the chosen dimension is not compatible with the list depth and structure an error message is issued. \f{ARRAY\_TO\_LIST}\ttindex{ARRAY\_TO\_LIST} does the opposite coercion. It takes the array name as its sole argument. \section{Handling of n--dimensional Vectors} Explicit vectors in {\tt EUCLIDEAN} space may be represented by list-like or bag-like objects of depth 1. The components may be bags but may {\bf not} be lists. Functions are provided to do the sum, the difference and the scalar product. When space-dimension is three there are also functions for the cross and mixed products. \f{SUMVECT}\ttindex{SUMVECT}, \f{MINVECT}\ttindex{MINVECT}, \f{SCALVECT}\ttindex{SCALVECT}, \f{CROSSVECT}\ttindex{CROSSVECT} have two arguments. \f{MPVECT}\ttindex{MPVECT} has three arguments. {\small\begin{verbatim} l:={1,2,3}$ ll:=list(a,b,c)$ SUMVECT(l,ll); ==> {A + 1,B + 2,C + 3} MINVECT(l,ll); ==> { - A + 1, - B + 2, - C + 3} SCALVECT(l,ll); ==> A + 2*B + 3*C CROSSVECT(l,ll); ==> { - 3*B + 2*C,3*A - C, - 2*A + B} MPVECT(l,ll,l); ==> 0 \end{verbatim}} \section{Handling of Grassmann Operators} \index{Grassmann Operators} Grassman variables are often used in physics. For them the multiplication operation is associative, distributive but anticommutative. The basic \REDUCE\ does not provide this. However implementing it in full generality would almost certainly decrease the overall efficiency of the system. This small module together with the declaration of antisymmetry for operators is enough to deal with most calculations. The reason is, that a product of similar anticommuting kernels can easily be transformed into an antisymmetric operator with as many indices as the number of these kernels. Moreover, one may also issue pattern matching rules to implement the anticommutativity of the product. The functions in this module represent the minimum functionality required to identify them and to handle their specific features. \f{PUTGRASS}\ttindex{PUTGRASS} is a (nary) command which give identifiers the property to be the names of Grassmann kernels. \f{REMGRASS}\ttindex{REMGRASS} removes this property. \f{GRASSP}\ttindex{GRASSP} is a boolean function which detects Grassmann kernels. \f{GRASSPARITY}\ttindex{GRASSPARITY} takes a {\bf monom} as argument and gives its parity. If the monom is a simple Grassmann kernel it returns 1. \f{GHOSTFACTOR}\ttindex{GHOSTFACTOR} has two arguments. Each one is a monom. It is equal to {\small\begin{verbatim} (-1)**(GRASSPARITY u * GRASSPARITY v) \end{verbatim}} Here is an illustration to show how the above functions work: {\small\begin{verbatim} PUTGRASS eta; if GRASSP eta(1) then "Grassmann kernel"; ==> Grassmann kernel aa:=eta(1)*eta(2)-eta(2)*eta(1); ==> AA := - ETA(2)*ETA(1) + ETA(1)*ETA(2) GRASSPARITY eta(1); ==> 1 GRASSPARITY (eta(1)*eta(2)); ==> 0 GHOSTFACTOR(eta(1),eta(2)); ==> -1 grasskernel:= {eta(~x)*eta(~y) => -eta y * eta x when nordp(x,y), (~x)*(~x) => 0 when grassp x}$ exp:=eta(1)^2$ exp where grasskernel; ==> 0 aa where grasskernel; ==> - 2*ETA(2)*ETA(1) \end{verbatim}} \section{Handling of Matrices} There are additional facilities for matrices. \begin{itemize} \item[i.] Often one needs to construct some {\tt UNIT} matrix of a given dimension. This construction is performed by the function \f{UNITMAT}\ttindex{UNITMAT}. It is a nary function. The command is {\small\begin{verbatim} UNITMAT M1(n1), M2(n2), .....Mi(ni) ; \end{verbatim}} where \verb+M1,...Mi+ are names of matrices and \verb+ n1, n2, ..., ni+ are integers. \f{MKIDM}\ttindex{MKIDM} is a generalisation of \f{MKID}\ttindex{MKID}. It allows the indexing of matrix names. If \verb+u+ and \verb+u1+ are two matrices, one can go from one to the other: {\small\begin{verbatim} matrix u(2,2);$ unitmat u1(2)$ u1; ==> [1 0] [ ] [0 1] mkidm(u,1); ==> [1 0] [ ] [0 1] \end{verbatim}} Note: MKIDM(V,1) will fail even if the matrix V1 exists, unless V is also a matrix. This function allows to make loops on matrices like the following. If \verb+U, U1, U2,.., U5+ are matrices: {\small\begin{verbatim} FOR I:=1:5 DO U:=U-MKIDM(U,I); \end{verbatim}} \item[ii.] The next functions map matrices onto bag-like or list-like objects and conversely they generate matrices from bags or lists. \f{COERCEMAT}\ttindex{COERCEMAT} transforms the matrix first argument into a list of lists. {\small\begin{verbatim} COERCEMAT(U,id) \end{verbatim}} When \verb+id+ is \verb+list+ the matrix is transformed into a list of lists. Otherwise it transforms it into a bag of bags whose envelope is equal to \verb+id+. \f{BAGLMAT}\ttindex{BAGLMAT} does the inverse. The {\bf first} argument is the bag-like or list-like object while the second argument is the matrix identifier. {\small\begin{verbatim} BAGLMAT(bgl,U) \end{verbatim}} \verb+bgl+ becomes the matrix \verb+U+. The transformation is {\bf not} done if \verb+U+ is {\em already} the name of a previously defined matrix, to avoid accidental redefinition of that matrix. \item[ii.] The functions \f{SUBMAT}\ttindex{SUBMAT}, \f{MATEXTR}\ttindex{MATEXTR}, \f{MATEXTC}\ttindex{MATEXTC} take parts of a given matrix. \f{SUBMAT} has three arguments. {\small\begin{verbatim} SUBMAT(U,nr,nc) \end{verbatim}} The first is the matrix name, and the other two are the row and column numbers. It gives the submatrix obtained from \verb+U+ deleting the row \verb+nr+ and the column \verb+nc+. When one of them is equal to zero only column \verb+nc+ or row \verb+nr+ is deleted. \f{MATEXTR} and \f{MATEXTC} extract a row or a column and place it into a list-like or bag-like object. {\small\begin{verbatim} MATEXTR(U,VN,nr) MATEXTC(U,VN,nc) \end{verbatim}} where \verb+U+ is the matrix, \verb+VN+ is the ``vector name'', \verb+nr+ and \verb+nc+ are integers. If \verb+VN+ is equal to {\tt list} the vector is given as a list otherwise it is given as a bag. \item[iii.] Functions which manipulate matrices: \f{MATSUBR}\ttindex{MATSUBR}, \f{MATSUBC}\ttindex{MATSUBC}, \f{HCONCMAT}\ttindex{HCONCMAT}, \f{VCONCMAT}\ttindex{VCONCMAT}, \f{TPMAT}\ttindex{TPMAT}, \f{HERMAT}\ttindex{HERMAT}. \f{MATSUBR} and \f{MATSUBC} substitute rows and columns. They have three arguments. {\small\begin{verbatim} MATSUBR(U,bgl,nr) MATSUBC(U,bgl,nc) \end{verbatim}} The meaning of the variables \verb+U, nr, nc+ is the same as above while \verb+bgl+ is a list-like or bag-like vector. Its length should be compatible with the dimensions of the matrix. \f{HCONCMAT} and \f{VCONCMAT} concatenate two matrices. {\small\begin{verbatim} HCONCMAT(U,V) VCONCMAT(U,V) \end{verbatim}} The first function concatenates horizontally, the second one concatenates vertically. The dimensions must match. \f{TPMAT} makes the tensor product of two matrices. It is also an {\em infix} function. {\small\begin{verbatim} TPMAT(U,V) or U TPMAT V \end{verbatim}} \f{HERMAT} takes the hermitian conjugate of a matrix {\small\begin{verbatim} HERMAT(U,HU) \end{verbatim}} where \verb+HU+ is the identifier for the hermitian matrix of \verb+U+. It should {\bf unassigned} for this function to work successfully. This is done on purpose to prevent accidental redefinition of an already used identifier. \item[iv.] \f{SETELMAT} and \f{GETELMAT} are functions of two integers. The first one reset the element \verb+(i,j)+ while the second one extract an element identified by \verb+(i,j)+. They may be useful when dealing with matrices {\em inside procedures}. \end{itemize} \chapter[ATENSOR: Tensor Simplification]% {ATENSOR: Package for Tensor Simplification} \label{ATENSOR} \typeout{{ATENSOR: Package for Tensor Simplification}} {\footnotesize \begin{center} V.~A.~Ilyin and A.~P.~Kryukov \\ \end{center} } \ttindex{ATENSOR} %\markboth{CHAPTER \ref{ATENSOR}. ATENSOR: TENSOR SIMPLIFICATION}{} %\thispagestyle{myheadings} Tensors are classical examples for Objects often used in mathematics and physics. Indexed objects can have very complicated and intricated properties. For example the Riemann tensor has symmetry properties with respect to permutation of indices. Moreover it satisfies the cyclic identity. There are a number of linear identities with many terms in the case of Riemann-Cartan geometry with torsion. From the user's point of view, there are three groups of tensor properties: \begin{itemize} \item {\bf S} - symmetry with respect to index permutation; \item {\bf I} - linear identities; \item {\bf D} - invariance with respect to renamings of dummy indices; \end{itemize} The problem under investigation can be formulated as whether two tensor expressions are equal or not by taking into account S-I-D properties. \section{Basic tensors and tensor expressions} Under basic tensors we understand the object with finite number of indices which can have such properties as {\it symmetry} and {\it multiterm linear identities} (including the {\it symmetry relations}). \\ Under tensor expression we understand any expression which can be obtained from basic tensors by summation with integer coefficients and multiplication (commutative) of basic tensors. \\ It is assumed that all terms in the tensor expression have the same number of indices. Some pairs of them are marked as dummy ones. The set of nondummy names have to be the same for each term in the tensor expression. The names of dummies can be arbitrary. \section{Operators for tensors} Use \f{TENSOR}\ttindex{TENSOR} to declare tensors and \f{TCLEAR}\ttindex{TCLEAR} to remove them. The command \f{TSYM}\ttindex{TSYM} defines symmetry relations of basic tensors and \f{KBASIS}\ttindex{KBASIS} determines the {\bf K}-Basis, which is the general name for a ``triangle'' set of linear independent vectors for a basic tensor considered as a separate tensor expression. It is possible to build the sum, the difference and the multiplication for tensors. It is assumed that indices with identical names means the summation over their values. \par {\bf Example}: {\small\begin{verbatim} 1: load atensor; 2: tensor s2,a3; 3: tsym s2(i,j) - s2(j,i), % Symmetric 3: a3(i,j,k) + a3(j,i,k), % Antisymm. 3: a3(i,j,k) - a3(j,k,i); 4: kbasis s2,a3; s2(j,i) + (-1)*s2(i,j) 1 a3(k,i,j) + a3(j,i,k) a3(k,j,i) + (-1)*a3(j,i,k) a3(i,k,j) + (-1)*a3(j,i,k) a3(i,j,k) + a3(j,i,k) a3(j,k,i) + a3(j,i,k) 5 \end{verbatim}} \section{Switches} There are two switches defined. The switch \f{DUMMYPRI}\ttindex{DUMMYPRI} prints dummy indices with internal names and numbers. It's default value is {\tt OFF}. The other switch called \f{SHORTEST}\ttindex{SHORTEST} prints tensor expressions in shortest form that was produced during evaluation. The default value is {\tt OFF}. \par \ \\ For further information refer to the documentation which comes with this package. \chapter[AVECTOR: Vector Algebra]% {AVECTOR: A vector algebra and calculus package} \label{AVECTOR} \typeout{{AVECTOR: Vector Algebra}} {\footnotesize \begin{center} David Harper \\ Astronomy Unit, Queen Mary and Westfield College \\ University of London \\ Mile End Road \\ London E1 4NS, England \\[0.05in] e--mail: adh@star.qmw.ac.uk \end{center} } \ttindex{AVECTOR} This package provides \REDUCE\ with the ability to perform vector algebra using the same notation as scalar algebra. The basic algebraic operations are supported, as are differentiation and integration of vectors with respect to scalar variables, cross product and dot product, component manipulation and application of scalar functions ({\em e.g.} cosine) to a vector to yield a vector result. \section{Vector declaration and initialisation} To declare a list of names to be vectors use the VEC command: \index{VEC command} {\small\begin{verbatim} VEC A,B,C; \end{verbatim}} declares the variables {\tt A}, {\tt B} and {\tt C} to be vectors. If they have already been assigned (scalar) values, these will be lost. When a vector is declared using the {\tt VEC} command, it does not have an initial value. If a vector value is assigned to a scalar variable, then that variable will automatically be declared as a vector and the user will be notified that this has happened. \index{AVEC function} A vector may be initialised using the {\tt AVEC} function which takes three scalar arguments and returns a vector made up from those scalars. For example {\small\begin{verbatim} A := AVEC(A1, A2, A3); \end{verbatim}} sets the components of the vector {\tt A} to {\tt A1}, {\tt A2} and {\tt A3}. \section{Vector algebra} (In the examples which follow, {\tt V}, {\tt V1}, {\tt V2} {\em etc} are assumed to be vectors while {\tt S}, {\tt S1}, {\tt S2} etc are scalars.) \index{+ ! vector}\index{- ! vector}\index{* ! vector}\index{/ ! vector} The scalar algebra operators +,-,* and / may be used with vector operands according to the rules of vector algebra. Thus multiplication and division of a vector by a scalar are both allowed, but it is an error to multiply or divide one vector by another. \begin{tabular}{l l} {\tt V := V1 + V2 - V3;} & Addition and subtraction \\ {\tt V := S1*3*V1;} & Scalar multiplication \\ {\tt V := V1/S;} & Scalar division \\ {\tt V := -V1;} & Negation \\ \end{tabular} \index{DOT ! vector}\index{Dot product}\index{CROSS ! vector} \index{cross product} \noindent Vector multiplication is carried out using the infix operators {\tt DOT} and {\tt CROSS}. These are defined to have higher precedence than scalar multiplication and division. \begin{tabular}{l l} {\tt V := V1 CROSS V2;} & Cross product \\ {\tt S := V1 DOT V2;} & Dot product \\ {\tt V := V1 CROSS V2 + V3;} & \\ {\tt V := (V1 CROSS V2) + V3;} & \\ \end{tabular} The last two expressions are equivalent due to the precedence of the {\tt CROSS} operator. \index{VMOD operator} The modulus of a vector may be calculated using the {\tt VMOD} operator. {\small\begin{verbatim} S := VMOD V; \end{verbatim}} A unit vector may be generated from any vector using the {\tt VMOD} operator. {\small\begin{verbatim} V1 := V/(VMOD V); \end{verbatim}} Components may be extracted from any vector using index notation in the same way as an array. \begin{tabular}{l l} {\tt V := AVEC(AX, AY, AZ);} & \\ {\tt V(0);} & yields AX \\ {\tt V(1);} & yields AY \\ {\tt V(2);} & yields AZ \\ \end{tabular} It is also possible to set values of individual components. Following from above: {\small\begin{verbatim} V(1) := B; \end{verbatim}} The vector {\tt V} now has components {\tt AX}, {\tt B}, {\tt AZ}. \index{vector ! differentiation} \index{vector ! integration} \index{differentiation ! vector} \index{differentiation ! vector} Vectors may be used as arguments in the differentiation and integration routines in place of the dependent expression. \begin{tabular}{l l} {\tt V := AVEC(X**2, SIN(X), Y);} & \\ {\tt DF(V,X);} & yields (2*X, COS(X), 0) \\ {\tt INT(V,X);} & yields (X**3/3, -COS(X), Y*X) \\ \end{tabular} Vectors may be given as arguments to monomial functions such as {\tt SIN}, {\tt LOG} and {\tt TAN}. The result is a vector obtained by applying the function component-wise to the argument vector. \begin{tabular}{l l} {\tt V := AVEC(A1, A2, A3);} & \\ {\tt SIN(V);} & yields (SIN(A1), SIN(A2), SIN(A3)) \\ \end{tabular} \section{Vector calculus} \index{DIV ! operator}\index{divergence ! vector field} \index{GRAD ! operator}\index{gradient ! vector field} \index{CURL ! operator}\index{curl ! vector field} \index{DELSQ ! operator}\index{Laplacian ! vector field} The vector calculus operators div, grad and curl are recognised. The Laplacian operator is also available and may be applied to scalar and vector arguments. \begin{tabular}{l l} {\tt V := GRAD S;} & Gradient of a scalar field \\ {\tt S := DIV V;} & Divergence of a vector field \\ {\tt V := CURL V1;} & Curl of a vector field \\ {\tt S := DELSQ S1;} & Laplacian of a scalar field \\ {\tt V := DELSQ V1;} & Laplacian of a vector field \\ \end{tabular} These operators may be used in any orthogonal curvilinear coordinate system. The user may alter the names of the coordinates and the values of the scale factors. Initially the coordinates are {\tt X}, {\tt Y} and {\tt Z} and the scale factors are all unity. \index{COORDS vector}\index{HFACTORS scale factors} There are two special vectors : {\tt COORDS} contains the names of the coordinates in the current system and {\tt HFACTORS} contains the values of the scale factors. \index{COORDINATES operator} The coordinate names may be changed using the {\tt COORDINATES} operator. {\small\begin{verbatim} COORDINATES R,THETA,PHI; \end{verbatim}} This command changes the coordinate names to {\tt R}, {\tt THETA} and {\tt PHI}. \index{SCALEFACTORS operator} The scale factors may be altered using the {\tt SCALEFACTORS} operator. {\small\begin{verbatim} SCALEFACTORS(1,R,R*SIN(THETA)); \end{verbatim}} This command changes the scale factors to {\tt 1}, {\tt R} and {\tt R SIN(THETA)}. Note that the arguments of {\tt SCALEFACTORS} must be enclosed in parentheses. This is not necessary with {\tt COORDINATES}. When vector differential operators are applied to an expression, the current set of coordinates are used as the independent variables and the scale factors are employed in the calculation. %%(See, for example, Batchelor G.K. 'An Introduction to Fluid %%Mechanics', Appendix 2.) \index{"!*CSYSTEMS global (AVECTOR)} Several coordinate systems are pre-defined and may be invoked by name. To see a list of valid names enter {\small\begin{verbatim} SYMBOLIC !*CSYSTEMS; \end{verbatim}} and \REDUCE\ will respond with something like {\small\begin{verbatim} (CARTESIAN SPHERICAL CYLINDRICAL) \end{verbatim}} \index{GETCSYSTEM command} To choose a coordinate system by name, use the command {\tt GETCSYSTEM}. To choose the Cartesian coordinate system : {\small\begin{verbatim} GETCSYSTEM 'CARTESIAN; \end{verbatim}} \index{PUTCSYSTEM command} Note the quote which prefixes the name of the coordinate system. This is required because {\tt GETCSYSTEM} (and its complement {\tt PUTCSYSTEM}) is a {\tt SYMBOLIC} procedure which requires a literal argument. \REDUCE\ responds by typing a list of the coordinate names in that coordinate system. The example above would produce the response {\small\begin{verbatim} (X Y Z) \end{verbatim}} whilst {\small\begin{verbatim} GETCSYSTEM 'SPHERICAL; \end{verbatim}} would produce {\small\begin{verbatim} (R THETA PHI) \end{verbatim}} Note that any attempt to invoke a coordinate system is subject to the same restrictions as the implied calls to {\tt COORDINATES} and {\tt SCALEFACTORS}. In particular, {\tt GETCSYSTEM} fails if any of the coordinate names has been assigned a value and the previous coordinate system remains in effect. A user-defined coordinate system can be assigned a name using the command {\tt PUTCSYSTEM}. It may then be re-invoked at a later stage using {\tt GETCSYSTEM}. \example\index{AVECTOR package ! example} We define a general coordinate system with coordinate names {\tt X},{\tt Y},{\tt Z} and scale factors {\tt H1},{\tt H2},{\tt H3} : {\small\begin{verbatim} COORDINATES X,Y,Z; SCALEFACTORS(H1,H2,H3); PUTCSYSTEM 'GENERAL; \end{verbatim}} This system may later be invoked by entering {\small\begin{verbatim} GETCSYSTEM 'GENERAL; \end{verbatim}} \section{Volume and Line Integration} Several functions are provided to perform volume and line integrals. These operate in any orthogonal curvilinear coordinate system and make use of the scale factors described in the previous section. Definite integrals of scalar and vector expressions may be calculated using the {\tt DEFINT} function\footnote{Not to be confused with the DEFINT package described in chapter~\ref{DEFINT}}. \example\index{AVECTOR package ! example} \index{DEFINT function}\index{integration ! definite (simple)} \index{definite integration (simple)} \noindent To calculate the definite integral of $\sin(x)^2$ between 0 and 2$\pi$ we enter {\small\begin{verbatim} DEFINT(SIN(X)**2,X,0,2*PI); \end{verbatim}} This function is a simple extension of the {\tt INT} function taking two extra arguments, the lower and upper bounds of integration respectively. \index{VOLINTEGRAL function}\index{integration ! volume} Definite volume integrals may be calculated using the {\tt VOLINTEGRAL} function whose syntax is as follows : \noindent {\tt VOLINTEGRAL}({\tt integrand}, vector {\tt lower-bound}, vector {\tt upper-bound}); \example\index{AVECTOR package ! example} \noindent In spherical polar coordinates we may calculate the volume of a sphere by integrating unity over the range $r$=0 to {\tt RR}, $\theta$=0 to {\tt PI}, $\phi$=0 to 2*$\pi$ as follows : \begin{tabular}{l l} {\tt VLB := AVEC(0,0,0);} & Lower bound \\ {\tt VUB := AVEC(RR,PI,2*PI);} & Upper bound in $r, \theta, \phi$ respectively \\ {\tt VOLINTORDER := (0,1,2);} & The order of integration \\ {\tt VOLINTEGRAL(1,VLB,VUB);} & \\ \end{tabular} \index{VOLINTORDER vector} Note the use of the special vector {\tt VOLINTORDER} which controls the order in which the integrations are carried out. This vector should be set to contain the number 0, 1 and 2 in the required order. The first component of {\tt VOLINTORDER} contains the index of the first integration variable, the second component is the index of the second integration variable and the third component is the index of the third integration variable. \example\index{AVECTOR package ! example} Suppose we wish to calculate the volume of a right circular cone. This is equivalent to integrating unity over a conical region with the bounds: \begin{tabular}{l l} z = 0 to H & (H = the height of the cone) \\ r = 0 to pZ & (p = ratio of base diameter to height) \\ phi = 0 to 2*PI & \\ \end{tabular} We evaluate the volume by integrating a series of infinitesimally thin circular disks of constant z-value. The integration is thus performed in the order : d($\phi$) from 0 to $2\pi$, dr from 0 to p*Z, dz from 0 to H. The order of the indices is thus 2, 0, 1. {\small\begin{verbatim} VOLINTORDER := AVEC(2,0,1); VLB := AVEC(0,0,0); VUB := AVEC(P*Z,H,2*PI); VOLINTEGRAL(1,VLB,VUB); \end{verbatim}} \index{LINEINT function}\index{DEFLINEINT function} \index{integration ! line}\index{line integrals} Line integrals may be calculated using the {\tt LINEINT} and {\tt DEFLINEINT} functions. Their general syntax is \noindent {\tt LINEINT}({\tt vector-fnct}, {\tt vector-curve}, {\tt variable}); \noindent{\tt DEFLINENINT}({\tt vector-fnct}, {\tt vector-curve}, {\tt variable},\\ \noindent\verb+ +{\tt lower-bnd}, {\tt upper-bnd}); \noindent where \begin{description} \item[{\tt vector-fnct}] is any vector-valued expression; \item[{\tt vector-curve}] is a vector expression which describes the path of integration in terms of the independent variable; \item[{\tt variable}] is the independent variable; \item[{\tt lower-bnd}] \item[{\tt upper-bnd}] are the bounds of integration in terms of the independent variable. \end{description} \example\index{AVECTOR package ! example} In spherical polar coordinates, we may integrate round a line of constant theta (`latitude') to find the length of such a line. The vector function is thus the tangent to the `line of latitude', (0,0,1) and the path is {\tt (0,LAT,PHI)} where {\tt PHI} is the independent variable. We show how to obtain the definite integral {\em i.e.} from $\phi=0$ to $2 \pi$ : {\small\begin{verbatim} DEFLINEINT(AVEC(0,0,1),AVEC(0,LAT,PHI),PHI,0,2*PI); \end{verbatim}} \chapter{BOOLEAN: A package for boolean algebra} \label{BOOLEAN} \typeout{{BOOLEAN: A package for boolean algebra}} {\footnotesize \begin{center} Herbert Melenk\\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: melenk@zib.de \end{center} } \ttindex{BOOLEAN} The package {\bf Boolean} supports the computation with boolean expressions in the propositional calculus. The data objects are composed from algebraic expressions (``atomic parts'', ``leafs'') connected by the infix boolean operators {\bf and}, {\bf or}, {\bf implies}, {\bf equiv}, and the unary prefix operator {\bf not}. {\bf Boolean} allows simplification of expressions built from these operators, and to test properties like equivalence, subset property etc. Also the reduction of a boolean expression by a partial evaluation and combination of its atomic parts is supported. \section{Entering boolean expressions} In order to distinguish boolean data expressions from boolean expressions in the \REDUCE\ programming language ({\em e.g.} in an {\bf if} statement), each expression must be tagged explicitly by an operator {\bf boolean}. Otherwise the boolean operators are not accepted in the \REDUCE\ algebraic mode input. The first argument of {\bf boolean} can be any boolean expression, which may contain references to other boolean values. {\small\begin{verbatim} load_package boolean; boolean (a and b or c); q := boolean(a and b implies c); boolean(q or not c); \end{verbatim}} Brackets are used to override the operator precedence as usual. The leafs or atoms of a boolean expression are those parts which do not contain a leading boolean operator. These are considered as constants during the boolean evaluation. There are two pre-defined values: \begin{itemize} \item {\bf true}, {\bf t} or {\bf 1} \item {\bf false}, {\bf nil} or {\bf 0} \end{itemize} These represent the boolean constants. In a result form they are used only as {\bf 1} and {\bf 0}. By default, a {\bf boolean} expression is converted to a disjunctive normal form. On output, the operators {\bf and} and {\bf or} are represented as \verb+/\+ and \verb+\/+, respectively. {\small\begin{verbatim} boolean(true and false); -> 0 boolean(a or not(b and c)); -> boolean(not(b) \/ not(c) \/ a) boolean(a equiv not c); -> boolean(not(a)/\c \/ a/\not(c)) \end{verbatim}} \section{Normal forms} The {\bf disjunctive} normal form is used by default. Alternatively a {\bf conjunctive} normal form can be selected as simplification target, which is a form with leading operator {\bf and}. To produce that form add the keyword {\bf and} as an additional argument to a call of {\bf boolean}. {\small\begin{verbatim} boolean (a or b implies c); -> boolean(not(a)/\not(b) \/ c) boolean (a or b implies c, and); -> boolean((not(a) \/ c)/\(not(b) \/ c)) \end{verbatim}} Usually the result is a fully reduced disjunctive or conjuntive normal form, where all redundant elements have been eliminated following the rules $ a \wedge b \vee \neg a \wedge b \longleftrightarrow b$ $ a \vee b \wedge \neg a \vee b \longleftrightarrow b$ Internally the full normal forms are computed as intermediate result; in these forms each term contains all leaf expressions, each one exactly once. This unreduced form is returned when the additional keyword {\bf full} is set: \newpage {\small\begin{verbatim} boolean (a or b implies c, full); -> boolean(a/\b/\c \/ a/\not(b)/\c \/ not(a)/\b/\c \/ not(a)/\not(b)/\c \/ not(a)/\not(b)/\not(c)) \end{verbatim}} The keywords {\bf full} and {\bf and} may be combined. \section{Evaluation of a boolean expression} If the leafs of the boolean expression are algebraic expressions which may evaluate to logical values because the environment has changed ({\em e.g.\ }variables have been bound), one can re--investigate the expression using the operator \f{TESTBOOL}\ttindex{TESTBOOL} with the boolean expression as argument. This operator tries to evaluate all leaf expressions in \REDUCE\ boolean style. As many terms as possible are replaced by their boolean values; the others remain unchanged. The resulting expression is contracted to a minimal form. The result {\bf 1} (= true) or {\bf 0} (=false) signals that the complete expression could be evaluated. In the following example the leafs are built as numeric greater test. For using ${\bf >}$ in the expressions the greater sign must be declared operator first. The error messages are meaningless. {\small\begin{verbatim} operator >; fm:=boolean(x>v or not (u>v)); -> fm := boolean(not(u>v) \/ x>v) v:=10$ testbool fm; ***** u - 10 invalid as number ***** x - 10 invalid as number -> boolean(not(u>10) \/ x>10) x:=3$ testbool fm; ***** u - 10 invalid as number -> boolean(not(u>10)) x:=17$ testbool fm; ***** u - 10 invalid as number -> 1 \end{verbatim}} \chapter[CALI: Commutative Algebra]{CALI: Computational Commutative Algebra} \label{CALI} \typeout{{CALI: Computational Commutative Algebra}} {\footnotesize \begin{center} Hans-Gert Gr\"abe \\ Institut f\"ur Informatik, Universit\"at Leipzig\\ Augustusplatz 10 -- 11\\ 04109 Leipzig, Germany \\[0.05in] e--mail: graebe@informatik.uni-leipzig.de \end{center} } \ttindex{CALI} This package contains algorithms for computations in commutative algebra closely related to the Gr\"obner algorithm for ideals and modules. Its heart is a new implementation of the Gr\"obner algorithm that also allows for the computation of syzygies. This implementation is also applicable to submodules of free modules with generators represented as rows of a matrix. As main topics CALI contains facilities for \begin{itemize} \item defining rings, ideals and modules, \item computing Gr\"obner bases and local standard bases, \item computing syzygies, resolutions and (graded) Betti numbers, \item computing (now also weighted) Hilbert series, multiplicities, independent sets, and dimensions, \item computing normal forms and representations, \item computing sums, products, intersections, quotients, stable quotients, elimination ideals etc., \item primality tests, computation of radicals, unmixed radicals, equidimensional parts, primary decompositions etc. of ideals and modules, \item advanced applications of Gr\"obner bases (blowup, associated graded ring, analytic spread, symmetric algebra, monomial curves etc.), \item applications of linear algebra techniques to zero dimensional ideals, as {\em e.g.\ }the FGLM change of term orders, border bases and affine and projective ideals of sets of points, \item splitting polynomial systems of equations mixing factorisation and the Gr\"obner algorithm, triangular systems, and different versions of the extended Gr\"obner factoriser. \end{itemize} There is more extended documentation on this package elsewhere, which includes facilities for tracing and switches to control its behaviour. \chapter[CAMAL: Celestial Mechanics]{CAMAL: Calculations in Celestial Mechanics} \label{CAMAL} \typeout{{CAMAL: Calculations in Celestial Mechanics}} {\footnotesize \begin{center} J. P. Fitch \\ School of Mathematical Sciences, University of Bath\\ BATH BA2 7AY, England \\[0.05in] e--mail: jpff@cs.bath.ac.uk \end{center} } \ttindex{CAMAL} The CAMAL package provides facilities for calculations in Fourier series similar to those in the specialist Celestial Mechanics system of the 1970s, and the Cambridge Algebra system in particular.\index{Fourier Series}\index{CAMAL}\index{Celestial Mechanics} \section{Operators for Fourier Series} \subsection*{\f{HARMONIC}}\ttindex{HARMONIC} The celestial mechanics system distinguish between polynomial variables and angular variables. All angles must be declared before use with the \f{HARMONIC} function. {\small\begin{verbatim} harmonic theta, phi; \end{verbatim}} \subsection*{\f{FOURIER}}\ttindex{FOURIER} The \f{FOURIER} function coerces its argument into the domain of a Fourier Series. The expression may contain {\em sine} and {\em cosine} terms of linear sums of harmonic variables. {\small\begin{verbatim} fourier sin(theta) \end{verbatim}} Fourier series expressions may be added, subtracted multiplies and differentiated in the usual \REDUCE\ fashion. Multiplications involve the automatic linearisation of products of angular functions. There are three other functions which correspond to the usual restrictive harmonic differentiation and integration, and harmonic substitution. \subsection*{\f{HDIFF} and \f{HINT}}\ttindex{HDIFF}\ttindex{HINT{}} Differentiate or integrate a Fourier expression with respect to an angular variable. Any secular terms in the integration are disregarded without comment. {\small\begin{verbatim} load_package camal; harmonic u; bige := fourier (sin(u) + cos(2*u)); aa := fourier 1+hdiff(bige,u); ff := hint(aa*aa*fourier cc,u); \end{verbatim}} \subsection*{\f{HSUB}}\ttindex{HSUB} The operation of substituting an angle plus a Fourier expression for an angles and expanding to some degree is called harmonic substitution. The function takes 5 arguments; the basic expression, the angle being replaced, the angular part of the replacement, the fourier part of the replacement and a degree to which to expand. {\small\begin{verbatim} harmonic u,v,w,x,y,z; xx:=hsub(fourier((1-d*d)*cos(u)),u,u-v+w-x-y+z,yy,n); \end{verbatim}} \section{A Short Example} The following program solves Kepler's Equation as a Fourier series to the degree $n$. {\small\begin{verbatim} bige := fourier 0; for k:=1:n do << wtlevel k; bige:=fourier e * hsub(fourier(sin u), u, u, bige, k); >>; write "Kepler Eqn solution:", bige$ \end{verbatim}} \chapter{CGB: Comprehensive Gr\"obner Bases} \label{CGB} \typeout{{CGB: Comprehensive Gr\"obner Bases}} {\footnotesize \begin{center} Andreas Dolzmann \& Thomas Sturm\\ Department of Mathematics and Computer Science\\ University of Passau\\ D-94030 Passau, Germany\\[1ex] e-mail: dolzmann@uni-passau.de, sturm@uni-passau.de \end{center} } \ttindex{REDLOG} \section{Introduction} Consider the ideal basis $F=\{ax,x+y\}$. Treating $a$ as a parameter, the calling sequence {\small\begin{verbatim} torder({x,y},lex)$ groebner{a*x,x+y}; {x,y} \end{verbatim}} yields $\{x,y\}$ as reduced Gr\"obner basis. This is, however, not correct under the specialization $a=0$. The reduced Gr\"obner basis would then be $\{x+y\}$. Taking these results together, we obtain $C=\{x+y,ax,ay\}$, which is correct wrt.~{\em all} specializations for $a$ including zero specializations. We call this set $C$ a {\em comprehensive Gr\"obner basis} ({\sc cgb}). The notion of a {\sc cgb} and a corresponding algorithm has been introduced bei Weispfenning \cite{Weispfenning:92}. This algorithm works by performing case distinctions wrt.~parametric coefficient polynomials in order to find out what the head monomials are under all possible specializations. It does thus not only determine a {\sc cgb}, but even classifies the contained polynomials wrt.~the specializations they are relevant for. If we keep the Gr\"obner bases for all cases separate and associate information on the respective specializations with them, we obtain a {\em Gr\"obner system}. For our example, the Gr\"obner system is the following; $$ \left[ \begin{array}{c|c} a\neq0 & \{x+y,ax,ay\}\\ a=0 & \{x+y\} \end{array} \right]. $$ A {\sc cgb} is obtained as the union of the single Gr\"obner bases in a Gr\"obner system. It has also been shown that, on the other hand, a Gr\"obner system can easily be reconstructed from a given {\sc cgb} \cite{Weispfenning:92}. The CGB package provides functions for computing both {\sc cgb}'s and Gr\"obner systems, and for turning Gr\"obner systems into {\sc cgb}'s. % \section{Using the REDLOG Package} For managing the conditions occurring with the {\sc cgb} computations, the CGB package uses the package REDLOG implementing first-order formulas, \cite{DolzmannSturm:97a,DolzmannSturm:99}, which is also part of the \textsc{reduce} distribution. % \section{Term Ordering Mode} The CGB package uses the settings made with the function \f{TORDER} of the GROEBNER package. This includes in particular the choice of the main variables. All variables not mentioned in the variable list argument of \f{TORDER} are parameters. The only term ordering modes recognized by \textsc{cgb} are \f{LEX} and \f{REVGRADLEX}. % \section{CGB: Comprehensive Gr\"ob\-ner Basis} The function \f{CGB}\ttindex{CGB} expects a list $F$ of expressions. It returns a {\sc cgb} of $F$ wrt.~the current \f{TORDER} setting. % \subsection*{Example:} {\small\begin{verbatim} torder({x,y},lex)$ cgb{a*x+y,x+b*y}; {x + b*y,a*x + y,(a*b - 1)*y} ws; {b*y + x, a*x + y, y*(a*b - 1)} \end{verbatim}} Note that the basis returned by the \f{CGB} call has not undergone the standard evaluation process: The returned polynomials are ordered wrt.~the chosen term order. Reevaluation changes this as can be seen with the output of \f{WS}. % \section{GSYS: Gr\"obner System} The function \f{GSYS}\ttindex{GSYS} follows the same calling conventions as \f{CGB}. It returns the complete Gr\"obner system represented as a nested list \begin{center} \begin{tt} $\bigl\{\bigl\{c_1,\{g_{11},\ldots,g_{1n_1}\}\bigr\},\dots, \bigl\{c_m,\{g_{m1},\dots,g_{1n_m}\}\bigr\}\bigr\}$. \end{tt} \end{center} The {\tt $c_i$} are conditions in the parameters represented as quantifier-free REDLOG formulas. Each choice of parameters will obey at least one of the {\tt $c_i$}. Whenever a choice of parameters obeys some {\tt $c_i$}, the corresponding {\tt $\{g_{i1},\ldots,g_{in_i}\}$} is a Gr\"obner basis for this choice. % \subsection*{Example:} {\small\begin{verbatim} torder({x,y},lex)$ gsys {a*x+y,x+b*y}; {{a*b - 1 <> 0 and a <> 0, {a*x + y,x + b*y,(a*b - 1)*y}}, {a <> 0 and a*b - 1 = 0, {a*x + y,x + b*y}}, {a = 0,{a*x + y,x + b*y}}} \end{verbatim}} As with the function \f{CGB}, the contained polynomials remain unevaluated. Computing a Gr\"obner system is not harder than computing a {\sc cgb}. In fact, \f{CGB} also computes a Gr\"obner system and then turns it into a {\sc cgb}. \subsection{Switch CGBGEN: Only the Generic Case} If the switch \f{CGBGEN}\ttindex{CGBGEN} is turned on, both \f{GSYS} and \f{CGB} will assume all parametric coefficients to be non-zero ignoring the other cases. For \f{CGB} this means that the result equals---up to auto-reduction---that of \f{GROEBNER}. A call to \f{GSYS} will return this result as a single case including the assumptions made during the computation: % \subsection*{Example:} {\small\begin{verbatim} torder({x,y},lex)$ on cgbgen; gsys{a*x+y,x+b*y}; {{a*b - 1 <> 0 and a <> 0, {a*x + y,x + b*y,(a*b - 1)*y}}} off cgbgen; \end{verbatim}} % \section{GSYS2CGB: Gr\"obner System to CGB} The call \f{GSYS2CGB}\ttindex{GSYS2CGB} turns a given Gr\"obner system into a {\sc cgb} by constructing the union of the Gr\"obner bases of the single cases. % \subsection*{Example:} {\small\begin{verbatim} torder({x,y},lex)$ gsys{a*x+y,x+b*y}$ gsys2cgb ws; {x + b*y,a*x + y,(a*b - 1)*y} \end{verbatim}} % \section{Switch CGBREAL: Computing over the Real Numbers}\label{cgbreal} All computations considered so far have taken place over the complex numbers, more precisely, over algebraically closed fields. Over the real numbers, certain branches of the {\sc cgb} computation can become inconsitent though they are not inconsistent over the complex numbers. Consider, e.g., a condition $a^2+1=0$. When turning on the switch \f{CGBREAL}\ttindex{CGBREAL}, all simplifications of conditions are performed over the real numbers. The methods used for this are described in \cite{DolzmannSturm:97c}. % \subsection*{Example:} {\small\begin{verbatim} torder({x,y},lex)$ off cgbreal; gsys {a*x+y,x-a*y}; 2 {{a + 1 <> 0 and a <> 0, 2 {a*x + y,x - a*y,(a + 1)*y}}, 2 {a <> 0 and a + 1 = 0,{a*x + y,x - a*y}}, {a = 0,{a*x + y,x - a*y}}} on cgbreal; gsys({a*x+y,x-a*y}); {{a <> 0, 2 {a*x + y,x - a*y,(a + 1)*y}}, {a = 0,{a*x + y,x - a*y}}} \end{verbatim}} \section{Switches} \begin{description} \item[\f{CGBREAL}] Compute over the real numbers. See Section~\ref{cgbreal} for details. \item[\f{CGBGS}\ttindex{CGBGS}] Gr\"obner simplification of the condition. The switch \f{CGBGS} can be turned on for applying advanced algebraic simplification techniques to the conditions. This will, in general, slow down the computation, but lead to a simpler Gr\"obner system. \item[\f{CGBSTAT}\ttindex{CGBSTAT}] Statistics of the CGB run. The switch \f{CGBSTAT} toggles the creation and output of statistical information on the CGB run. The statistical information is printed at the end of the run. \item[\f{CGBFULLRED}\ttindex{CGBFULLRED}] Full reduction. By default, the CGB functions perform full reductions in contrast to pure top reductions. By turning off the switch \f{CGBFULLRED}, reduction can be restricted to top reductions. \end{description} \chapter[CHANGEVR: Change of Variables in DEs]% {CHANGEVR: Change of Independent Variables in DEs} \label{CHANGEVR} \typeout{[CHANGEVR: Change of Variables in DEs]} {\footnotesize \begin{center} G. \"{U}\c{c}oluk \\ Department of Physics, Middle East Technical University \\ Ankara, Turkey\\[0.05in] e--mail: ucoluk@trmetu.bitnet \end{center} } The function {\tt CHANGEVAR} has (at least) four different arguments.\ttindex{CHANGEVAR} \begin{itemize} \item {\bf FIRST ARGUMENT} \\ is a list of the dependent variables of the differential equation. If there is only one dependent variable it can be given directly, not as a list. \item {\bf SECOND ARGUMENT} \\ is a list of the {\bf new} independent variables, or in the case of only one, the variable. \item {\bf THIRD ARGUMENT, FOURTH {\em etc.}} \\ are equations is of the form \begin{quote}{\tt{\em old variable} = {\em a function in new variables}}\end{quote} The left hand side cannot be a non-kernel structure. These give the old variables in terms of the new ones. \item {\bf LAST ARGUMENT} \\ is a list of algebraic expressions which evaluates to differential equations in the usual list notation. Again it is possible to omit the list form if there is only {\bf one} differential equation. \end{itemize} If the last argument is a list then the result of {\tt CHANGEVAR} is a list too. It is possible to display the entries of the inverse Jacobian. To do so, turn {\tt ON} the flag {\tt DISPJACOBIAN}\ttindex{DISPJACOBIAN}. \section{An example: the 2-D Laplace Equation} The 2-dimensional Laplace equation in Cartesian coordinates is: \[ \frac{\partial^{2} u}{\partial x^{2}} + \frac{\partial^{2} u}{\partial y^{2}} = 0 \] Now assume we want to obtain the polar coordinate form of Laplace equation. The change of variables is: \[ x = r \cos \theta, {\;\;\;\;\;\;\;\;\;\;} y = r \sin \theta \] The solution using {\tt CHANGEVAR} is {\small\begin{verbatim} CHANGEVAR({u},{r,theta},{x=r*cos theta,y=r*sin theta}, {df(u(x,y),x,2)+df(u(x,y),y,2)} ); \end{verbatim}} Here we could omit the curly braces in the first and last arguments (because those lists have only one member) and the curly braces in the third argument (because they are optional), but not in the second. So one could equivalently write {\small\begin{verbatim} CHANGEVAR(u,{r,theta},x=r*cos theta,y=r*sin theta, df(u(x,y),x,2)+df(u(x,y),y,2) ); \end{verbatim}} The {\tt u(x,y)} operator will be changed to {\tt u(r,theta)} in the result as one would do with pencil and paper. {\tt u(r,theta)} represents the the transformed dependent variable. \chapter[COMPACT: Compacting expressions]{COMPACT: Package for compacting expressions} \label{COMPACT} \typeout{{COMPACT: Package for compacting expressions}} {\footnotesize \begin{center} Anthony C. Hearn\\ RAND\\ Santa Monica \\ CA 90407-2138, U.S.A. \\[0.05in] e--mail: hearn@rand.org \end{center} } \ttindex{COMPACT}\index{COMPACT package}\index{side relations} \index{relations ! side} {COMPACT} is a package of functions for the reduction of a polynomial in the presence of side relations. The package defines one operator {COMPACT} \index{COMPACT operator} whose syntax is: \begin{quote} \k{COMPACT}(\s{expression}, \s{list}):\s{expression} \end{quote} \s{expression} can be any well-formed algebraic expression, and \s{list} an expression whose value is a list of either expressions or equations. For example {\small\begin{verbatim} compact(x**2+y**3*x-5y,{x+y-z,x-y-z1}); compact(sin(x)**10*cos(x)**3+sin(x)**8*cos(x)**5, {cos(x)**2+sin(x)**2=1}); let y = {cos(x)**2+sin(x)**2-1}; compact(sin(x)**10*cos(x)**3+sin(x)**8*cos(x)**5,y); \end{verbatim}} {COMPACT} applies the relations to the expression so that an equivalent expression results with as few terms as possible. The method used is briefly as follows: \begin{enumerate} \item Side relations are applied separately to numerator and denominator, so that the problem is reduced to the reduction of a polynomial with respect to a set of polynomial side relations. \item Reduction is performed sequentially, so that the problem is reduced further to the reduction of a polynomial with respect to a single polynomial relation. \item The polynomial being reduced is reordered so that the variables (kernels) occurring in the side relation have least precedence. \item Each coefficient of the remaining kernels (which now only contain the kernels in the side relation) is reduced with respect to that side relation. \item A polynomial quotient/remainder calculation is performed on the coefficient. The remainder is used instead of the original if it has fewer terms. \item The remaining expression is reduced with respect to the side relation using a ``nearest neighbour'' approach. \end{enumerate} \chapter[CRACK: Overdetermined systems of DEs]% {CRACK: Solving overdetermined systems of PDEs or ODEs} \label{CRACK} \typeout{[CRACK: Overdetermined systems of DEs]} {\footnotesize \begin{center} Thomas Wolf \\ School of Mathematical Sciences, Queen Mary and Westfield College \\ University of London \\ London E1 4NS, England \\[0.05in] e--mail: T.Wolf@maths.qmw.ac.uk \\ [0.10in] %%WWW: http://www.zib-berlin.de/Symbolik/crack.html \\[0.10in] Andreas Brand \\ Institut f\"{u}r Informatik \\ Friedrich Schiller Universit\"{a}t Jena \\ 07740 Jena, Germany \\[0.05in] e--mail: maa@hpux.rz.uni-jena.de \end{center} } \ttindex{CRACK} The package CRACK aims at solving or at least partially integrating single ordinary differential equations or partial differential equations (ODEs/PDEs), and systems of them, exactly and in full generality. Calculations done with input DEs include the \begin{itemize} \item integration of exact DEs and generalised exact DEs \item determination of monomial integrating factors \item direct and indirect separation of DEs \item systematic application of integrability conditions \item solution of single elementary ODEs by using the REDUCE package ODESOLVE (chapter~\ref{ODESOLVE}). \end{itemize} %More details are given in the manual CRACK.TEX. Input DEs may be polynomially non-linear in the unknown functions and their derivatives and may depend arbitrarily on the independent variables. Suitable applications of CRACK are the solution of \begin{itemize} \item overdetermined ODE/PDE-systems (overdetermined here just means that the number of unknown functions of all independent variables is less than the number of given equations for these functions). \item simple non-overdetermined DE-systems (such as characteristic ODE-systems of first order quasilinear PDEs). \end{itemize} The strategy is to have {\bf one} universal program (CRACK) which is as effective as possible for solving overdetermined PDE-systems and many application programs (such as LIEPDE) which merely generate an overdetermined PDE-system depending on what is to be investigated (for example, symmetries or conservation laws). Examples are: \begin{itemize} \item the investigation of infinitesimal symmetries of DEs (LIEPDE), \item the determination of an equivalent Lagrangian for second order ODEs (LAGRAN) \item the investigation of first integrals of ODEs which are polynomial in their highest derivative (FIRINT) \item the splitting of an $n^{th}$ order ODE into a first order ODE and an $(n-1)^{th}$ order problem (DECOMP) %%\item the search for conservation laws of PDEs (-systems) (CONLAW, not %% yet added to the library (Sep.\ 1995) but obtainable from T.W.) \end{itemize} Other applications where non-overdetermined problems are treated are \begin{itemize} \item the application of infinitesimal symmetries ({\em e.g.\ }calculated by LIEPDE) in the package APPLYSYM (chapter~\ref{APPLYSYM}), \item the program QUASILINPDE (also in the package APPLYSYM) for solving single first order quasilinear PDEs. \end{itemize} The kernel package for solving overdetermined or simple non-overdetermined DE-systems is accessible through a call to the program CRACK in the package CRACK. All the application programs (LIEPDE, LAGRAN, FIRINT, DECOMP except APPLYSYM) are contained in the package CRACKAPP. The programs APPLYSYM and QUASILINPDE are contained in the package APPLYSYM (described in chapter~\ref{APPLYSYM}). %%A short description of all the applications mentioned above including %%examples are given in an paper to be published in a special issue of %%"Mathematical and Computer Modelling", ed. B.\ Fuchssteiner, V.\ Gerdt %%and W.\ Oevel which also is available through ftp from %%euclid.maths.qmw.ac.uk as preprint file pub/crack/demo.ps. More details are %%given in the files CRACK.TEX and APPLYSYM.TEX and input examples are available %%in the test files CRACK.TST and APPLYSYM.TST. %%The latest versions of the programs, manuals and test files %%are available through ftp %%from euclid.maths.qmw.ac.uk and the directory /pub/crack. Details of the CRACK applications can be found in the example file. {\tt CRACK} is called by \begin{tabbing} {\tt CRACK}(\=\{{\it equ}$_1$, {\it equ}$_2$, \ldots , {\it equ}$_m$\}, \\ \>\{{\it ineq}$_1$, {\it ineq}$_2$, \ldots , {\it ineq}$_n$\}, \\ \>\{{\it fun}$_1$, {\it fun}$_2$, \ldots , {\it fun}$_p$\}, \\ \>\{{\it var}$_1$, {\it var}$_2$, \ldots , {\it var}$_q$\}); \end{tabbing} $m,n,p,q$ are arbitrary. \begin{itemize} \item The {\it equ}$_i$ are identically vanishing partial differential expressions, {\em i.e.\ } they represent equations $0 = {\it equ}_i$, which are to be solved for the functions ${\it fun}_j$ as far as possible, thereby drawing only necessary conclusions and not restricting the general solution. \item The {\it ineq}$_i$ are expressions which must not vanish identically for any solution to be determined, {\em i.e.\ }only such solutions are computed for which none of the {\it ineq}$_i$ vanishes identically in all independent variables. \item The dependence of the (scalar) functions ${\it fun}_j$ on possibly a number of variables is assumed to have been defined with DEPEND rather than declaring these functions as operators. Their arguments may themselves only be independent variables and not expressions. \item The functions ${\it fun}_j$ and their derivatives may only occur polynomially. Other unknown functions in ${\it equ}_i$ may be represented as operators. \item The ${\it var}_k$ are further independent variables, which are not already arguments of any of the ${\it fun}_j$. If there are none then the third argument is the empty list \{\}. \item The dependence of the ${\it equ}_i$ on the independent variables and on constants and functions other than ${\it fun}_j$ is arbitrary. \end{itemize} The result is a list of solutions \[ \{{\it sol}_1, \ldots \} \] where each solution is a list of 3 lists: \begin{tabbing} \{\=\{${\it con}_1, \; {\it con}_2, \ldots , \; {\it con}_q$\}, \\ \>\{${\it fun}_a={\it ex}_a, \;\; {\it fun}_b={\it ex}_b, \ldots , \;\; {\it fun}_p={\it ex}_p$\},\= \\ \>\{${\it fun}_c, \;\; {\it fun}_d, \ldots , \;\; {\it fun}_r$\} \>\} \end{tabbing} with integer $a, b, c, d, p, q, r.$ If {\tt CRACK} finds a contradiction as $0=1$ then there exists no solution and it returns the empty list \{\}. The empty list is also returned if no solution exists which does not violate the inequalities {\it ineq}$_i \neq 0.$ For example, in the case of a linear system as input, there is at most one solution ${\it sol}_1$. The expressions ${\it con}_i$ (if there are any), are the remaining necessary and sufficient conditions for the functions ${\it fun}_c,\ldots,{\it fun}_r$ in the third list. Those functions can be original functions from the equations to be solved (of the second argument of the call of {\tt CRACK}) or new functions or constants which arose from integrations. The dependence of new functions on variables is declared with {\tt DEPEND} and to visualise this dependence the algebraic mode function ${\tt FARGS({\it fun}_i)}$ can be used. If there are no ${\it con}_i$ then all equations are solved and the functions in the third list are unconstrained. The second list contains equations ${\it fun}_i={\it ex}_i$ where each ${\it fun}_i$ is an original function and ${\it ex}_i$ is the computed expression for ${\it fun}_i$. The exact behaviour of {\tt CRACK} can be modified by internal variables, and there is a help system particularly associated with {\tt CRACK}. Users are referred to the detailed documentation for more information. \chapter[CVIT:Dirac gamma matrix traces]% {CVIT: Fast calculation of Dirac gamma matrix traces} \label{CVIT} \typeout{[CVIT:Dirac gamma matrix traces]} {\footnotesize \begin{center} V. Ilyin, A. Kryukov, A. Rodionov and A. Taranov \\ Institute for Nuclear Physics \\ Moscow State University \\ Moscow, 119899 Russia \end{center} } \ttindex{CVIT} The package consists of 5 sections, and provides an alternative to the \REDUCE\ high-energy physics system. Instead of being based on $\Gamma$-matrices as a basis for a Clifford algebra, it is based on treating $\Gamma$-matrices as 3-j symbols, as described by Cvitanovic. The functions it provides are the same as those of the standard package. It does have four switches which control its behaviour. \noindent{\tt CVIT}\ttindex{CVIT} If it is on then use Kennedy-Cvitanovic algorithm else use standard facilities. \noindent{\tt CVITOP}\ttindex{CVITOP} Switches on Fierz optimisation. Default is off; \noindent{\tt CVITBTR}\ttindex{CVITBTR} Switches on the bubbles and triangles factorisation. The default is on. \noindent{\tt CVITRACE}\ttindex{CVITRACE} Controls internal tracing of the CVIT package. Default is off. {\small\begin{verbatim} index j1,j2,j3,; vecdim n$ g(l,j1,j2,j2,j1); 2 n g(l,j1,j2)*g(l1,j3,j1,j2,j3); 2 n g(l,j1,j2)*g(l1,j3,j1,j3,j2); n*( - n + 2) \end{verbatim}} \chapter{DEFINT: Definite Integration for REDUCE} \label{DEFINT} \typeout{{DEFINT: Definite Integration for REDUCE}} {\footnotesize \begin{center} Kerry Gaskell and Winfried Neun \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: neun@zib.de \\[0.10in] Stanley L. Kameny \\ Los Angeles, U.S.A. \end{center} } \ttindex{DEFINT} \REDUCE{}'s definite integration package is able to calculate the definite integrals of many functions, including several special functions. There are a number of parts of this package, including contour integration. The innovative integration process is to represent each function as a Meijer G-function, and then calculating the integral by using the following Meijer G integration formula. \begin{displaymath} \int_{0}^{\infty} x^{\alpha-1} G^{s t}_{u v} \left( \sigma x \ \Bigg\vert \ {( c_u) \atop (d_v)} \right) G^{m n}_{p q} \left( \omega x^{l/k} \ \Bigg\vert \ {(a_p) \atop (b_q)} \right) dx = k G^{i j}_{k l} \left( \xi \ \Bigg\vert \ {(g_k) \atop (h_l)} \right) \hspace{5mm} (1) \end{displaymath} The resulting Meijer G-function is then retransformed, either directly or via a hypergeometric function simplification, to give the answer. The user interface is via a four argument version of the \f{INT}\ttindex{INT} operator, with the lower and upper limits added. {\small\begin{verbatim} load_package defint; int(sin x,x,0,pi/2); 1 \end{verbatim}} \newpage {\small\begin{verbatim} int(log(x),x,1,5); 5*log(5) - 4 int(x*e^(-1/2x),x,0,infinity); 4 int(x^2*cos(x)*e^(-2*x),x,0,infinity); 4 ----- 125 int(x^(-1)*besselj(2,sqrt(x)),x,0,infinity); 1 int(si(x),x,0,y); cos(y) + si(y)*y - 1 int(besselj(2,x^(1/4)),x,0,y); 1/4 4*besselj(3,y )*y --------------------- 1/4 y \end{verbatim}} The DEFINT package also defines a number of additional transforms, such as the Laplace transform\index{Laplace transform}\footnote{See Chapter~\ref{LAPLACE} for an alternative Laplace transform with inverse Laplace transform}, the Hankel transform\index{Hankel transform}, the Y-transform\index{Y-transform}, the K-transform\index{K-transform}, the StruveH transform\index{StruveH transform}, the Fourier sine transform\index{Fourier sine transform}, and the Fourier cosine transform\index{Fourier cosine transform}. {\small\begin{verbatim} laplace_transform(cosh(a*x),x); - s --------- 2 2 a - s laplace_transform(Heaviside(x-1),x); 1 ------ s e *s hankel_transform(x,x); n + 4 gamma(-------) 2 ------------------- n - 2 2 gamma(-------)*s 2 fourier_sin(e^(-x),x); s -------- 2 s + 1 fourier_cos(x,e^(-1/2*x^2),x); 2 i*s s /2 sqrt( - pi)*erf(---------)*s + e *sqrt(2) sqrt(2) ---------------------------------------------- 2 s /2 e *sqrt(2) \end{verbatim}} It is possible to the user to extend the pattern-matching process by which the relevant Meijer G representation for any function is found. Details can be found in the complete documentation. \noindent{\bf Acknowledgement:} This package depends greatly on the pioneering work of Victor Adamchik, to whom thanks are due. \chapter[DESIR: Linear Homogeneous DEs]% {DESIR: Differential linear homogeneous equation solutions in the neighbourhood of irregular and regular singular points} \label{DESIR} \typeout{[DESIR: Linear Homogeneous DEs]} {\footnotesize \begin{center} C. Dicrescenzo, F. Richard--Jung, E. Tournier \\ Groupe de Calcul Formel de Grenoble \\ laboratoire TIM3 \\ France \\[0.05in] e--mail: dicresc@afp.imag.fr \end{center} } \ttindex{DESIR} This software enables the basis of formal solutions to be computed for an ordinary homogeneous differential equation with polynomial coefficients over Q of any order, in the neighbourhood of zero (regular or irregular singular point, or ordinary point). This software can be used in two ways, directly via the \f{DELIRE} procedure, or interactively with the \f{DESIR} procedure. The basic procedure is the f{DELIRE} procedure which enables the solutions of a linear homogeneous differential equation to be computed in the neighbourhood of zero. The \f{DESIR} procedure is a procedure without argument whereby \f{DELIRE} can be called without preliminary treatment to the data, that is to say, in an interactive autonomous way. This procedure also proposes some transformations on the initial equation. This allows one to start comfortably with an equation which has a non zero singular point, a polynomial right-hand side and parameters. \noindent{\tt delire(x,k,grille,lcoeff,param)} This procedure computes formal solutions of a linear homogeneous differential equation with polynomial coefficients over Q and of any order, in the neighbourhood of zero, regular or irregular singular point. {\tt x} is the variable, {\tt k} is the number of desired terms (that is for each formal series in $x_t$ appearing in polysol, $a_0+a_1 x_t+a_2 x_t^2+\ldots + a_n x_t^n+ \ldots$ we compute the $k+1$ first coefficients $a_0$, $a_1$ to $a_k$. The coefficients of the differential operator as polynomial in $x^{grille}$. In general grille is 1. The argument {\tt lcoeff} is a list of coefficients of the differential operator (in increasing order of differentiation) and {\tt param} is a list of parameters. The procedure returns the list of general solutions. {\small\begin{verbatim} lcoeff:={1,x,x,x**6}; 6 lcoeff := {1,x,x,x } param:={}; param := {} sol:=delire(x,4,1,lcoeff,param); 4 3 2 xt - 4*xt + 12*xt - 24*xt + 24 sol := {{{{0,1,-----------------------------------,1},{ 12 }}}, 4 3 {{{0,1,(6*log(xt)*xt - 18*log(xt)*xt 2 + 36*log(xt)*xt - 36*log(xt)*xt 4 3 - 5*xt + 9*xt - 36*xt + 36)/36,0},{} }}, 1 {{{-------,1, 4 4*xt 4 3 2 361*xt + 4*xt + 12*xt + 24*xt + 24 ---------------------------------------,10}, 24 {}}}} \end{verbatim}} \chapter{DFPART: Derivatives of generic functions} \label{DFPART} \typeout{{DFPART: Derivatives of generic functions}} {\footnotesize \begin{center} Herbert Melenk \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: melenk@zib.de \end{center} } \ttindex{DFPART} \index{derivatives} \index{partial derivatives} \index{generic function} The package {\tt DFPART} supports computations with total and partial derivatives of formal function objects. Such computations can be useful in the context of differential equations or power series expansions. \section{Generic Functions} A generic function is a symbol which represents a mathematical function. The minimal information about a generic function function is the number of its arguments. In order to facilitate the programming and for a better readable output this package assumes that the arguments of a generic function have default names such as $f(x,y)$, $q(rho,phi)$. A generic function is declared by prototype form in a statement\ttindex{GENERIC\_FUNCTION} \vspace{.1in} {\tt GENERIC\_FUNCTION} $fname(arg_1,arg_2\cdots arg_n)$; \vspace{.1in} \noindent where $fname$ is the (new) name of a function and $arg_i$ are symbols for its formal arguments. In the following $fname$ is referred to as ``generic function'', $arg_1,arg_2\cdots arg_n$ as ``generic arguments'' and $fname(arg_1,arg_2\cdots arg_n)$ as ``generic form''. Examples: {\small\begin{verbatim} generic_function f(x,y); generic_function g(z); \end{verbatim}} After this declaration {\REDUCE} knows that \begin{itemize} \item there are formal partial derivatives $\frac{\partial f}{\partial x}$, $\frac{\partial f}{\partial y}$ $\frac{\partial g}{\partial z}$ and higher ones, while partial derivatives of $f$ and $g$ with respect to other variables are assumed as zero, \item expressions of the type $f()$, $g()$ are abbreviations for $f(x,y)$, $g(z)$, \item expressions of the type $f(u,v)$ are abbreviations for\\ $sub(x=u,y=v,f(x,y))$ \item a total derivative $\frac{d f(u,v)}{d w}$ has to be computed as $\frac{\partial f}{\partial x} \frac{d u}{d w} + \frac{\partial f}{\partial y} \frac{d v}{d w}$ \end{itemize} \section{Partial Derivatives} The operator {\tt DFP}\ttindex{DFP} represents a partial derivative: \vspace{.1in} {\tt DFP}($expr,{dfarg_1,dfarg_2\cdots dfarg_n}$); \vspace{.1in} \noindent where $expr$ is a function expression and $dfarg_i$ are the differentiation variables. Examples: {\small\begin{verbatim} dfp(f(),{x,y}); \end{verbatim}} means $\frac{\partial ^2 f}{\partial x \partial y}$ and {\small\begin{verbatim} dfp(f(u,v),{x,y}); \end{verbatim}} stands for $\frac{\partial ^2 f}{\partial x \partial y} (u,v)$. For compatibility with the $DF$ operator the differentiation variables need not be entered in list form; instead the syntax of {\tt DF} can be used, where the function expression is followed by the differentiation variables, eventually with repetition numbers. Such forms are internally converted to the above form with a list as second parameter. The expression $expr$ can be a generic function with or without arguments, or an arithmetic expression built from generic functions and other algebraic parts. In the second case the standard differentiation rules are applied in order to reduce each derivative expressions to a minimal form. When the switch {\tt NAT} is on partial derivatives of generic functions are printed in standard index notation, that is $f_{xy}$ for $\frac{\partial ^2 f}{\partial x \partial y}$ and $f_{xy}(u,v)$ for $\frac{\partial ^2 f}{\partial x \partial y}(u,v)$. Therefore single characters should be used for the arguments whenever possible. Examples: {\small\begin{verbatim} generic_function f(x,y); generic_function g(y); dfp(f(),x,2); F XX dfp(f()*g(),x,2); F *G() XX dfp(f()*g(),x,y); F *G() + F *G XY X Y \end{verbatim}} The difference between partial and total derivatives is illustrated by the following example: {\small\begin{verbatim} generic_function h(x); dfp(f(x,h(x))*g(h(x)),x); F (X,H(X))*G(H(X)) X df(f(x,h(x))*g(h(x)),x); F (X,H(X))*G(H(X)) + F (X,H(X))*H (X)*G(H(X)) X Y X + G (H(X))*H (X)*F(X,H(X)) Y X \end{verbatim}} Normally partial differentials are assumed as non-commutative {\small\begin{verbatim} dfp(f(),x,y)-dfp(f(),y,x); F - F XY YX \end{verbatim}} However, a generic function can be declared to have globally interchangeable partial derivatives using the declaration {\tt DFP\_COMMUTE}\ttindex{DFP\_COMMUTE} which takes the name of a generic function or a generic function form as argument. For such a function differentiation variables are rearranged corresponding to the sequence of the generic variables. {\small\begin{verbatim} generic_function q(x,y); dfp_commute q(x,y); dfp(q(),{x,y,y}) + dfp(q(),{y,x,y}) + dfp(q(),{y,y,x}); 3*Q XYY \end{verbatim}} If only a part of the derivatives commute, this has to be declared using the standard {\REDUCE} rule mechanism. Please note that then the derivative variables must be written as list. \section{Substitutions} When a generic form or a {\tt DFP} expression takes part in a substitution the following steps are performed: \begin{enumerate} \item The substitutions are performed for the arguments. If the argument list is empty the substitution is applied to the generic arguments of the function; if these change, the resulting forms are used as new actual arguments. If the generic function itself is not affected by the substitution, the process stops here. \item If the function name or the generic function form occurs as a left hand side in the substitution list, it is replaced by the corresponding right hand side. \item The new form is partially differentiated according to the list of partial derivative variables. \item The (eventually modified) actual parameters are substituted into the form for their corresponding generic variables. This substitution is done by name. \end{enumerate} Examples: {\small\begin{verbatim} generic_function f(x,y); sub(y=10,f()); F(X,10) sub(y=10,dfp(f(),x,2)); F (X,10) XX sub(y=10,dfp(f(y,y),x,2)); F (10,10) XX sub(f=x**3*y**3,dfp(f(),x,2)); 3 6*X*Y generic_function ff(y,z); sub(f=ff,f(a,b)); FF(B,Z) \end{verbatim}} \chapter[DUMMY: Expressions with dummy vars]% {DUMMY: Canonical form of expressions with dummy variables} \label{DUMMY} \typeout{[DUMMY: Expressions with dummy variables]} {\footnotesize \begin{center} Alain Dresse \\ Universit\'e Libre de Bruxelles \\ Boulevard du Triomphe, CP 210/01 \\ B--1050 BRUXELLES, Belgium \\[0.05in] e--mail: adresse@ulb.ac.be \end{center} } \ttindex{DUMMY} An expression of the type $$ \sum_{a=1}^{n} f(a) $$ for any $n$ is simply written as $$ f(a) $$ and $a$ is a {\em dummy} index. If the previous expression is written as $$ \sum_{b=1}^{n} f(b) $$ $b$ is also a dummy index and, obviously we should be able to get the equality $$ f(a)-f(b);\, \rightarrow 0 $$ To declare dummy variables, two declarations are available:\ttindex{DUMMY\_BASE} \begin{itemize} \item[i.] {\small\begin{verbatim} dummy_base <idp>; \end{verbatim}} where {\tt idp} is the name of any unassigned identifier. \item[ii.]\ttindex{dummy\_names} {\small\begin{verbatim} dummy_names <d>,<dp>,<dpp> ....; \end{verbatim}} \end{itemize} The first declares {\tt idp1,$\cdots$, idpn} as dummy variables {\em i.e.\ }all variables of the form ``{\tt idxxx}'' where {\tt xxx} is a number will be dummy variables, such as {\tt id1, id2, ... , id23}. The second gives special names for dummy variables. All other arguments are assumed to be {\tt free}.\\ An example: {\small\begin{verbatim} dummy_base dv; ==> dv % dummy indices are dv1, dv2, dv3, ... dummy_names i,j,k; ==> t % dummy names are i,j,k. \end{verbatim}} When this is done, an expression like {\small\begin{verbatim} op(dv1)*sin(dv2)*abs(x)*op(i)^3*op(dv2)$ \end{verbatim}} is allowed. Notice that, dummy indices may not be repeated (it is not limited to tensor calculus) or that they be repeated many times inside the expression. By default all operators with dummy arguments are assumed to be {\em commutative} and without symmetry properties. This can be varied by declarations {\tt NONCOM}, {\tt SYMMETRIC} and {\tt AN\-TI\-SYM\-ME\-TRIC} may be used on the operators.\ttindex{NONCOM}\ttindex{SYMMETRIC}\ttindex{ANTISYMMETRIC} They can also be declared anticommutative.\ttindex{ANTICOM} {\small\begin{verbatim} anticom ao1, ao2; \end{verbatim}} More complex symmetries can be handled with {\tt SYMTREE}.\ttindex{SYMTREE} The corresponding declaration for the Riemann tensor is {\small\begin{verbatim} symtree (r, {!+, {!-, 1, 2}, {!-, 3, 4}}); \end{verbatim}} The symbols !*, !+ and !- at the beginning of each list mean that the operator has no symmetry, is symmetric and is antisymmetric with respect to the indices inside the list. Notice that the indices are not designated by their names but merely by their natural order of appearance. 1 means the first written argument of {\tt r}, 2 its second argument {\em etc.} In the example above r is symmetric with respect to interchange of the pairs of indices 1,2 and 3,4 respectively. \chapter{EDS: Exterior differential systems} \label{EDS} \typeout{{EDS: Exterior differential systems}} {\footnotesize \begin{center} David Hartley \\ Physics and Mathematical Physics \\ University of Adelaide SA 5005, Australia \\ e-mail: DHartley@physics.adelaide.edu.au \end{center} } \ttindex{EDS: Exterior differential dystems} \ttindex{EDS} \section{Introduction} Exterior differential systems give a geometrical framework for partial differential equations and more general differential geometric problems. The geometrical formulation has several advantages stemming from its coordinate-independence, including superior treatment of nonlinear and global problems. {\tt EDS} provides a number of tools for setting up and manipulating exterior differential systems and implements many features of the theory. Its main strengths are the ability to use anholonomic or moving frames and the care taken with nonlinear problems. The package is loaded %\footnote{The package {\tt EXCALC} %(Chap. \ref{EXCALC} p. \pageref{EXCALC}) and the package {\tt XIDEAL} %(Chap. \ref{XIDEAL} p. \pageref{XIDEAL}) are loaded automatically with %this package.} by typing \quad {\tt load eds;} \par Reading the full documentation, which comes with this package, is strongly recommended. The test file eds.tst, which is also in the package, provides three inspiring examples on the subject. \\ EDS uses E.~Schr{\"u}fer's EXCALC package for the underlying exterior calculus operations. \section{Data Structures and Concepts} \subsection{EDS} A simple \meta{EDS}, or exterior differential system, is a triple {\tt (S,$\Omega$,M)}, where {\it M} is a {\it coframing}, {\it S} is a system on {\it M}, and {\it $\Omega$} is an independence condition. Exterior differential equations without independence condition are not treated by {\tt EDS}. {\it $\Omega$} should be either a decomposable \meta{p-form} or a \meta{system} of 1-forms on {\it M}. \\ More generally an \meta{EDS} is a list of simple \meta{EDS} objects where the various coframings are all disjoint. \\ The solutions of {\it (S,$\Omega$,M)} are integral manifolds, or immersions on which {\it S} vanishes and the rank of $\Omega$ is preserved. Solutions at a single point are described by integral elements. \subsection{Coframing} Within the context of {\tt EDS}, a {\it coframing} means a real finite-dimensional differentiable manifold with a given global cobasis. The information about a coframing required by {\tt EDS} is kept in a \meta{coframing} object. The cobasis is the identifying element of an {\tt EDS}. In addition to the cobasis, there can be given {\it coordinates, structure equations} and {\it restrictions}. In addition to the cobasis, {\it coordinates, structure equations} and {\it restrictions} can be given. The coordinates may be an incomplete or overcomplete set. The structure equations express the exterior derivative of the coordinates and cobasis elements as needed. All coordinate differentials must be expressed in terms of the given cobasis, but not all cobasis derivatives need be known. The restrictions are a set of inequalities describing point sets not in the manifold. \\ Please note that the \meta{coframing} object is by no means a full description of a differentiable manifold. However, the \meta{coframing} object carries sufficient information about the underlying manifold to allow a range of exterior systems calculations to be carried out. \subsection{Systems and background coframing} The label \meta{system} refers to a list $\{<${\it p-form expr}$>,\ldots\}$ of differential forms. If an {\tt EDS} operator also accepts a \meta{system} as argument, then any extra information which is required is taken from the background coframing. \\ It is possible to activate the rules and orderings of a \f{COFRAMING} operator globally, by making it the {\it background coframing}. All subsequent \f{EXCALC} \ttindex{EXCALC} operations will be governed by those rules. Operations on \meta{EDS} objects are unaffected, since their coframings are still activated locally. \subsection{Integral elements} An \meta{integral element} of an exterior system $(S,\Omega,M)$ is a subspace $P \subset T_pM$ of the tangent space at some point $p \in M$. This integral element can be represented by its annihilator $P^\perp \subset T^*_pM$, comprising those 1-forms at $p$ which annihilate every vector in $P$. This can also be understood as a maximal set of 1-forms at $p$ such that $S \simeq 0 \pmod{P^\perp}$ and the rank of $\Omega$ is preserved modulo $P^\perp$. \\ An \meta{integral element} in EDS is a distribution of 1-forms on $M$, specified as a \meta{system} of 1-forms. \subsection{Properties and normal form} For large problems, it can require a great deal of computation to establish whether, for example, a system is closed or not. In order to save recomputing such properties, an \meta{EDS} object carries a list of \meta{properties} of the form \begin{list}{} \item {\tt \{\meta{keyword} = \meta{value},$\cdots$\}} \end{list} where \meta{keyword} is one of \f{closed}, \f{quasilinear}, \f{pfaffian} or \f{involutive}, and \meta{value} is either \f{0} (false) or \f{1} (true). These properties are suppressed when an \meta{EDS} is printed, unless the \f{nat} switch is \f{off}. They can be examined using the \f{PROPERTIES} operator. \\ Parts of the theory of exterior differential systems apply only at points on the underlying manifold where the system is in some sense non-singular. To ensure the theory applies, EDS automatically works all exterior systems $(S,\Omega,M)$ into a {\em normal form}. This means that the Pfaffian component of $S$ and the independence condition $\Omega$ are in {\it solved} forms, distinguished terms from the 1-forms in $S$ have been eliminated from the rest of $S$ and from $\Omega$ and any 1-forms in $S$ which vanish modulo the independence condition are removed from the system and their coefficients are appended as 0-forms. \section{The EDS Package} In the descriptions of the various operators we define the following abbreviations for function parameters: \vspace{0.25cm} \begin{tabular}{ll} $E$, $E'$ & \meta{EDS}\\ $S$ & \meta{system}\\ $M$, $N$ & \meta{coframing}, or a \meta{system} specifying a \meta{coframing}\\ $r$ & \meta{integer}\\ $\Omega$ & \meta{p-form}\\ $f$ & \meta{map}\\ $rsx$ & \meta{list of inequalities}\\ $cob$ & \meta{list of 1-form variables}\\ $crd$, $dep$, $ind$ & \meta{list of 0-form variables}\\ $drv$ & \meta{list of rules for exterior derivatives}\\ $pde$ & \meta{list of expressions or equations}\\ $X$ & \meta{transform}\\ $T$ & \meta{tableau}\\ $P$ & \meta{integral element}\\ \end{tabular} \subsection{Constructing EDS objects} An EDS \meta{coframing} is constructed using the \f{COFRAMING} operator. In one form it examines the argument for 0-form and 1-form variables. The more basic syntax takes the \meta{cobasis} as a list of 1-forms, \meta{coordinates} as a list of 0-forms, \meta{restrictions} as a list of inequalities and \meta{structure equations} as a list giving the exterior derivatives of the coordinates and cobasis elements. All arguments except the cobasis are optional. \\ A simple \meta{EDS} is constructed using the \f{EDS} operator where the \meta{indep. condition} can be either a decomposable \meta{p-form} or a \meta{system} of 1-forms. The \meta{coframing} and the \meta{properties} arguments can be omitted. The {\it EDS} is put into normal form before being returned. With \f{SET\_COFRAMING} the background coframing is set. \\ The operator \f{PDS2EDS} encodes a PDE system into an \meta{EDS} object. \\ \begin{tabular}{lll} \f{COFRAMING}(cob,crd,rsx,drv)\ttindex{COFRAMING} & \f{COFRAMING}(S)\ttindex{COFRAMING} & \f{EDS}(S,$\Omega$,M)\ttindex{EDS} \\ \f{CONTACT}(r,M,N)\ttindex{CONTACT} & \f{PDE2EDS}(pde,dep,ind)\ttindex{PDE2EDS} & \f{SET\_COFRAMING}(M)\ttindex{SET\_COFRAMING} \\ \f{SET\_COFRAMING}(E)\ttindex{SET\_COFRAMING} & \f{SET\_COFRAMING}()\ttindex{SET\_COFRAMING} \end{tabular} \vspace{0.5cm} {\bf Example:} {\small\begin{verbatim} 1: load eds; 2: pform {x,y,z,p,q}=0,{e(i),w(i,j)}=1; 3: indexrange {i,j,k}={1,2},{a,b,c}={3}; 4: eds({d z - p*d x - q*d y, d p^d q},{d x,d y}); EDS({d z - p*d x - q*d y,d p^d q},d x^d y) 5: OMrules:=index_expand {d e(i)=>-w(i,-j)^e(j),w(i,-j)+w(j,-i)=>0}$ 6: eds({e(a)},{e(i)}) where OMrules; 3 1 2 EDS({e },{e ,e }) 7: coframing ws; 3 2 1 2 1 2 2 coframing({e ,w ,e ,e },{},{d e => - e ^w , 1 1 2 1 2 d e => e ^w },{}) 1 \end{verbatim}} \subsection{Inspecting EDS objects} Using these operators you can get parts of your \meta{EDS} object. The \f{PROPERTIES}(E) operator for example returns a list of properties which are normally not printed out, unless the \f{NAT}\ttindex{NAT} switch is off. \\ \begin{tabular}{lll} \f{COFRAMING}(E)\ttindex{COFRAMING} & \f{COFRAMING}()\ttindex{COFRAMING} & \f{COBASIS}(M)\ttindex{COBASIS} \\ \f{COBASIS}(E)\ttindex{COBASIS} & \f{COORDINATES}(M)\ttindex{COORDINATES} & \f{COORDINATES}(E)\ttindex{COORDINATES} \\ \f{STRUCTURE\_EQUATIONS}(M)\ttindex{STRUCTURE\_EQUATIONS} & \f{STRUCTURE\_EQUATIONS}(E)\ttindex{STRUCTURE\_EQUATIONS} & \f{RESTRICTIONS}(M)\ttindex{RESTRICTIONS} \\ \f{RESTRICTIONS}(E)\ttindex{RESTRICTIONS} & \f{SYSTEM}(E)\ttindex{SYSTEM} & \f{INDEPENDENCE}(E)\ttindex{INDEPENDENCE} \\ \f{PROPERTIES}(E)\ttindex{PROPERTIES} & \f{ONE\_FORMS}(E)\ttindex{ONE\_FORMS} & \f{ONE\_FORMS}(S)\ttindex{ONE\_FORMS} \\ \f{ZERO\_FORMS}(E)\ttindex{ZERO\_FORMS} & \f{ZERO\_FORMS}(S)\ttindex{ZERO\_FORMS} & \end{tabular} \vspace{0.5cm} {\bf Example:} {\small\begin{verbatim} 8: depend u,x,y; depend v,x,y; 9: pde2eds({df(u,y,y)=df(v,x),df(v,y)=y*df(v,x)}); EDS({d u - u *d x - u *d y, d u - u *d x - u *d y, x y x x x y x d u - u *d x - v *d y, d v - v *d x - v *y*d y},d x^d y) y y x x x x 10: dependencies; {{u,y,x},{v,y,x}} 11: coordinates contact(3,{x},{u}); {x,u,u ,u ,u } x x x x x x 12: fdomain u=u(x); 13: coordinates {d u+d y}; {x,y} \end{verbatim}} \subsection{Manipulating EDS objects} These operators allow you to manipulate your \meta{EDS} objects. The \f{AUGMENT}(E,S) operator, see example below, appends the extra forms in the second argument to the system part of the first. The original \meta{EDS} remains unchanged. As another example by using the \f{TRANSFORM} operator a change of the cobasis is made, where the argument \meta{transform} is a list of substitutions. \\ \begin{tabular}{llll} \f{AUGMENT}(E,S)\ttindex{AUGMENT} & $M$ \f{CROSS} $N$\ttindex{CROSS} & $E$ \f{CROSS} $N$\ttindex{CROSS} & \f{PULLBACK(E,f)}\ttindex{PULLBACK} \\ \f{PULLBACK}(S,f)\ttindex{PULLBACK} & \f{PULLBACK}($\Omega$,f)\ttindex{PULLBACK} & \f{PULLBACK}(M,f)\ttindex{PULLBACK} & \f{RESTRICT}(E,f)\ttindex{RESTRICT} \\ \f{RESTRICT}(S,f)\ttindex{RESTRICT} & \f{RESTRICT}($\Omega$,f)\ttindex{RESTRICT} & \f{RESTRICT}(M,f)\ttindex{RESTRICT} & \f{TRANSFORM}(M,X)\ttindex{TRANSFORM} \\ \f{TRANSFORM}(E,X)\ttindex{TRANSFORM} & \f{TRANSFORM}(S,X)\ttindex{TRANSFORM} & \f{TRANSFORM}($\Omega$,X)\ttindex{TRANSFORM} & \f{LIFT(E)}\ttindex{LIFT} \\ \end{tabular} \vspace{0.5cm} {\bf Example:} {\small\begin{verbatim} % Non-Pfaffian system for a Monge-Ampere equation 14: PFORM {x,y,z}=0$ 15: S := CONTACT(1,{x,y},{z}); s := EDS({d z - z *d x - z *d y},d x^d y) x y 16: S:= AUGMENT(S,{d z(-x)^d z(-y)}); s := EDS({d z - z *d x - z *d y, x y d z ^d z },d x^d y) x y \end{verbatim}} \subsection{Analysing and Testing exterior systems} {\bf Analysing exterior systems} \par This section introduces higher level operators for extracting information about exterior systems. Many of them require a \meta{EDS} in normal form generated in positive degree as input, but some can also analyse a \meta{system} or a single \meta{p-form}. \\ \begin{tabular}{lll} \f{CARTAN\_SYSTEM}(E)\ttindex{CARTAN\_SYSTEM} & \f{CARTAN\_SYSTEM}(S)\ttindex{CARTAN\_SYSTEM} & \f{CARTAN\_SYSTEM}($\Omega$)\ttindex{CARTAN\_SYSTEM} \\ \f{CAUCHY\_SYSTEM}(E)\ttindex{CAUCHY\_SYSTEM} & \f{CAUCHY\_SYSTEM}(S)\ttindex{CAUCHY\_SYSTEM} & \f{CAUCHY\_SYSTEM}($\Omega$)\ttindex{CAUCHY\_SYSTEM} \\ \f{CHARACTERS}(E)\ttindex{CHARACTERS} & \f{CHARACTERS}(T)\ttindex{CHARACTERS} & \f{CHARACTERS}(E,P)\ttindex{CHARACTERS} \\ \f{CLOSURE}(E)\ttindex{CLOSURE} & \f{DERIVED\_SYSTEM}(E)\ttindex{DERIVED\_SYSTEMS} & \f{DERIVED\_SYSTEM}(S)\ttindex{DERIVED\_SYSTEMS} \\ \f{DIM\_GRASSMANN\_VARIETY}(E)\ttindex{DIM\_GRASSMANN\_VARIETY} & \f{DIM\_GRASSMANN\_VARIETY}(E,P)\ttindex{DIM\_GRASSMANN\_VARIETY} & \f{DIM}(M)\ttindex{DIM} \\ \f{DIM}(E)\ttindex{DIM} & \f{INVOLUTION}(E)\ttindex{INVOLUTION} & \f{LINEARISE}(E,P)\ttindex{LINEARISE} \\ \f{INTEGRAL\_ELEMENT}(E)\ttindex{INTEGRAL\_ELEMENT} & \f{PROLONG}(E)\ttindex{PROLONG} & \f{TABLEAU}(E)\ttindex{TABLEAU} \\ \f{TORSION}(E)\ttindex{TORSION} & \f{GRASSMANN\_VARIETY}(E)\ttindex{GRASSMANN\_VARIETY} & \end{tabular} \par \ \\ {\bf Testing exterior systems} \par The following operators allow various properties of an \meta{EDS} to be checked. The result is either a {\bf 1} or a {\bf 0}, so these operators can be used in boolean expressions. Since checking these properties is very time-consuming, the result of the first test is stored on the \meta{properties} record of an \meta{EDS} to avoid re-checking. This memory can be cleared using the \f{CLEANUP}\ttindex{CLEANUP} opearator. \\ \begin{tabular}{llll} \f{CLOSED}(E)\ttindex{CLOSED} & \f{CLOSED}(S)\ttindex{CLOSED} & \f{CLOSED}($\Omega$)\ttindex{CLOSED} & \f{INVOLUTIVE}(E)\ttindex{INVOLUTIVE} \\ \f{PFAFFIAN}(E)\ttindex{PFAFFIAN} & \f{QUASILINEAR}(E)\ttindex{QUASILINEAR} & \f{SEMILINEAR}(E)\ttindex{SEMILINEAR} & $E$ \f{EQUIV} $E'$\ttindex{EQUIV} \\ \end{tabular} \vspace{0.5cm} \subsection{Switches} EDS provides several switches to govern the display of information and enhance the speed or reliability of the calculations. For example the switch \f{EDSVERBOSE} if {\tt ON} will display additional information as the calculation progresses, which might generate too much output for larger problems. \\ All switches are {\tt OFF} by default. \begin{tabular}{llllll} \f{EDSVERBOSE}\ttindex{EDSVERBOSE} & \f{EDSDEBUG}\ttindex{EDSDEBUG} & \f{EDSSLOPPY}\ttindex{EDSSLOPPY} & \f{EDSDISJOINT}\ttindex{EDSDISJOINT} & \f{RANPOS}\ttindex{RANPOS} & \f{GENPOS}\ttindex{GENPOS} \\ \end{tabular} \subsection{Auxilliary functions} The operators of this section are designed to ease working with exterior forms and exterior systems in {\REDUCE}\ . \\ \begin{tabular}{lll} \f{COORDINATES}(S)\ttindex{COORDINATES} & \f{INVERT}(X)\ttindex{INVERT} & \f{STRUCTURE\_EQUATIONS}(X)\ttindex{STRUCTURE\_EQUATIONS} \\ \f{STRUCTURE\_EQUATIONS}(X,$X^{-1}$)\ttindex{STRUCTURE\_EQUATIONS} & \f{LINEAR\_DIVISORS}($\Omega$)\ttindex{LINEAR\_DIVISORS} & \f{EXFACTORS}($\Omega$)\ttindex{EXFACTORS} \\ \f{INDEX\_EXPAND}(ANY)\ttindex{INDEX\_EXPAND} & \f{PDE2JET}(pde,dep,ind)\ttindex{PDE2JET} & \f{MKDEPEND}(list)\ttindex{MKDEPEND} \\ \f{DISJOIN}(f,g,...)\ttindex{DISJOIN} & \f{CLEANUP}(E)\ttindex{CLEANUP} & \f{CLEANUP}(M)\ttindex{CLEANUP} \\ \f{REORDER}(E)\ttindex{REORDER} & \f{REORDER}(M)\ttindex{REORDER} & \end{tabular} \subsection{Experimental Functions} The following operators are experimental facilities since, they are either algorithmically not well-founded, or their implementation is very unstable, or they have known bugs. \\ \begin{tabular}{lll} \f{POINCARE}($\Omega$)\ttindex{POINCARE} & \f{INVARIANTS}(E,crd)\ttindex{INVARIANTS} & \f{INVARIANTS}(S,crd)\ttindex{INVARIANTS} \\ \f{SYMBOL\_RELATIONS}(E,$\pi$)\ttindex{SYMBOL\_RELATIONS} & \f{SYMBOL\_MATRIX}(E,$\xi$)\ttindex{SYMBOL\_MATRIX} & \f{CHARACTERISTIC\_VARIETY}(E,$\xi$)\ttindex{CHARACTERISTIC\_VARIETY} \\ \end{tabular} \vspace{0.5cm} {\bf Example:} {\small\begin{verbatim} 17: % Riemann invariants for Euler-Poisson-Darboux equation. 17: % Set up the EDS for the equation, and examine tableau. 17: depend u,x,y; EPD :=PDE2EDS{DF(u,x,y)=-(df(u,x)+df(u,y))/(x+y)}$ 19: tableau EPD; [d u 0 ] [ x x ] [ ] [ 0 d u ] [ y y] 20: % 1-form dx is characteristic: construct characteristic EDS. 20: xvars {}; C := cartan_system select(~f^d x=0,system closure epd)$ 22: S := augment(eds(system EPD,d y),C)$ 23: % Compute derived flag 23: while not equiv(S,S1 := derived_system S) do S := S1; 24: % Stabilised. Find the Riemann invariants. 24: invariants(S,reverse coordinates S); {x, u *x + u *y + u, x x - u *x - u *y - 2*u } x x x x x \end{verbatim}} \chapter[EXCALC: Differential Geometry]% {EXCALC: A differential geometry package} \label{EXCALC} \typeout{{EXCALC: A differential geometry package}} {\footnotesize \begin{center} Eberhard Schr\"{u}fer \\ GMD, Institut I1 \\ Postfach 1316 \\ 53757 St. Augustin, GERMANY \\[0.05in] e--mail: schruefer@gmd.de \end{center} } \ttindex{EXCALC} {\bf EXCALC} is designed for easy use by all who are familiar with the calculus of Modern Differential Geometry. Its syntax is kept as close as possible to standard textbook notations. Therefore, no great experience in writing computer algebra programs is required. It is almost possible to input to the computer the same as what would have been written down for a hand-calculation. For example, the statement {\small\begin{verbatim} f*x^y + u _| (y^z^x) \end{verbatim}} \index{exterior calculus} would be recognized by the program as a formula involving exterior products and an inner product. The program is currently able to handle scalar-valued exterior forms, vectors and operations between them, as well as non-scalar valued forms (indexed forms). With this, it should be an ideal tool for studying differential equations, doing calculations in general relativity and field theories, or doing such simple things as calculating the Laplacian of a tensor field for an arbitrary given frame. With the increasing popularity of this calculus, this program should have an application in almost any field of physics and mathematics. \section{Declarations} Geometrical objects like exterior forms or vectors are introduced to the system by declaration commands. The declarations can appear anywhere in a program, but must, of course, be made prior to the use of the object. Everything that has no declaration is treated as a constant; therefore zero-forms must also be declared. An exterior form is introduced by\label{PFORM}\index{PFORM statement} \index{exterior form ! declaration} \hspace*{2em} \k{PFORM} \s{declaration$_1$}, \s{declaration$_2$}, \ldots; where \begin{tabbing} \s{declaration} ::= \s{name} $\mid$ \s{list of names}=\s{number} $\mid$ \s{identifier} $\mid$ \\ \s{expression} \\ \s{name} ::= \s{identifier} $\mid$ \s{identifier}(\s{arguments}) \end{tabbing} For example {\small\begin{verbatim} pform u=k,v=4,f=0,w=dim-1; \end{verbatim}} declares {\tt U} to be an exterior form of degree {\tt K}, {\tt V} to be a form of degree 4, {\tt F} to be a form of degree 0 (a function), and {\tt W} to be a form of degree {\tt DIM}-1. The declaration of vectors is similar. The command {\tt TVECTOR}\label{TVECTOR} takes a list of names.\index{TVECTOR command}\index{exterior form ! vector} \hspace*{2em} \k{TVECTOR} \s{name$_1$}, \s{name$_2$}, \ldots; For example, to declare {\tt X} as a vector and {\tt COMM} as a vector with two indices, one would say {\small\begin{verbatim} tvector x,comm(a,b); \end{verbatim}} The exterior degree of a symbol or a general expression can be obtained with the function \label{EXDEGREE}\index{EXDEGREE command} \hspace*{2em} \k{EXDEGREE} \s{expression}; Example: {\small\begin{verbatim} exdegree(u + 3*chris(k,-k)); 1 \end{verbatim}} \section{Exterior Multiplication} \index{"\^{} ! exterior multiplication}\index{exterior product} Exterior multiplication between exterior forms is carried out with the nary infix operator \^{ } (wedge)\label{wedge}. Factors are ordered according to the usual ordering in {\REDUCE} using the commutation rule for exterior products. {\small\begin{verbatim} pform u=1,v=1,w=k; u^v; U^V v^u; - U^V u^u; 0 w^u^v; K ( - 1) *U^V^W (3*u-a*w)^(w+5*v)^u; A*(5*U^V^W - U^W^W) \end{verbatim}} It is possible to declare the dimension of the underlying space by\label{SPACEDIM}\index{SPACEDIM command}\index{dimension} \hspace*{2em} \k{SPACEDIM} \s{number} $\mid$ \s{identifier}; If an exterior product has a degree higher than the dimension of the space, it is replaced by 0: \section{Partial Differentiation} Partial differentiation is denoted by the operator {\tt @}\label{at}. Its capability is the same as the {\REDUCE} {\tt DF} operator. \index{"@ operator}\index{partial differentiation} \index{differentiation ! partial} \example\index{EXCALC package ! example} {\small\begin{verbatim} @(sin x,x); COS(X) @(f,x); 0 \end{verbatim}} An identifier can be declared to be a function of certain variables. \index{FDOMAIN command} This is done with the command {\tt FDOMAIN}\label{FDOMAIN}. The following would tell the partial differentiation operator that {\tt F} is a function of the variables {\tt X} and {\tt Y} and that {\tt H} is a function of {\tt X}. {\small\begin{verbatim} fdomain f=f(x,y),h=h(x); \end{verbatim}} Applying {\tt @} to {\tt F} and {\tt H} would result in {\small\begin{verbatim} @(x*f,x); F + X*@ F X @(h,y); 0 \end{verbatim}} \index{tangent vector} The partial derivative symbol can also be an operator with a single argument. It then represents a natural base element of a tangent vector\label{at1}. \section{Exterior Differentiation} \index{exterior differentiation} Exterior differentiation of exterior forms is carried out by the operator {\tt d}\label{d}. Products are normally differentiated out, {\small\begin{verbatim} pform x=0,y=k,z=m; d(x * y); X*d Y + d X^Y \end{verbatim}} This expansion can be suppressed by the command {\tt NOXPND D}\label{NOXPNDD}.\index{NOXPND ! D} Expansion is performed again when the command {\tt XPND D}\label{XPNDD} is executed.\index{XPND ! D} If an argument of an implicitly defined function has further dependencies the chain rule will be applied {\em e.g.}\index{chain rule} {\small\begin{verbatim} fdomain y=y(z); d f; @ F*d X + @ F*@ Y*d Z X Y Z \end{verbatim}} Expansion into partial derivatives can be inhibited by {\tt NOXPND @}\label{NOXPNDA} and enabled again by {\tt XPND @}\label{XPNDA}. \index{NOXPND ! "@}\index{XPND ! "@} \section{Inner Product} \index{inner product ! exterior form} The inner product between a vector and an exterior form is represented by the diphthong \_$|$ \label{innerp} (underscore or-bar), which is the notation of many textbooks. If the exterior form is an exterior product, the inner product is carried through any factor. \index{\_$\mid$ operator} \example\index{EXCALC package ! example} {\small\begin{verbatim} pform x=0,y=k,z=m; tvector u,v; u _| (x*y^z); K X*(( - 1) *Y^U _| Z + U _| Y^Z) \end{verbatim}} \section{Lie Derivative} \index{Lie Derivative} The Lie derivative can be taken between a vector and an exterior form or between two vectors. It is represented by the infix operator $|$\_ \label{lie}. In the case of Lie differentiating, an exterior form by a vector, the Lie derivative is expressed through inner products and exterior differentiations, {\em i.e.}\index{$\mid$\_ operator} {\small\begin{verbatim} pform z=k; tvector u; u |_ z; U _| d Z + d(U _| Z) \end{verbatim}} \section{Hodge-* Duality Operator} \index{Hodge-* duality operator}\index{"\# ! Hodge-* operator} The Hodge-*\label{hodge} duality operator maps an exterior form of degree {\tt K} to an exterior form of degree {\tt N-K}, where {\tt N} is the dimension of the space. The double application of the operator must lead back to the original exterior form up to a factor. The following example shows how the factor is chosen here {\small\begin{verbatim} spacedim n; pform x=k; # # x; 2 (K + K*N) ( - 1) *X*SGN \end{verbatim}} \index{SGN ! indeterminate sign}\index{coframe} The indeterminate SGN in the above example denotes the sign of the determinant of the metric. It can be assigned a value or will be automatically set if more of the metric structure is specified (via COFRAME), {\em i.e.} it is then set to $g/|g|$, where $g$ is the determinant of the metric. If the Hodge-* operator appears in an exterior product of maximal degree as the leftmost factor, the Hodge-* is shifted to the right according to {\small\begin{verbatim} pform {x,y}=k; # x ^ y; 2 (K + K*N) ( - 1) *X^# Y \end{verbatim}} \section{Variational Derivative} \index{derivative ! variational}\index{variational derivative} \ttindex{VARDF} The function {\tt VARDF}\label{VARDF} returns as its value the variation of a given Lagrangian n-form with respect to a specified exterior form (a field of the Lagrangian). In the shared variable \ttindex{BNDEQ"!*} {\tt BNDEQ!*}, the expression is stored that has to yield zero if integrated over the boundary. Syntax: \hspace*{2em} \k{VARDF}(\s{Lagrangian n-form},\s{exterior form}) \example\index{EXCALC package ! example} {\small\begin{verbatim} spacedim 4; pform l=4,a=1,j=3; l:=-1/2*d a ^ # d a - a^# j$ %Lagrangian of the e.m. field vardf(l,a); - (# J + d # d A) %Maxwell's equations bndeq!*; - 'A^# d A %Equation at the boundary \end{verbatim}} For the calculation of the conserved currents induced by symmetry operators (vector fields), the function {\tt NOETHER}\label{NOETHER} \index{NOETHER function} is provided. It has the syntax: \hspace*{2em} \k{NOETHER}(\s{Lagrangian n-form},\s{field},\s{symmetry generator}) \example\index{EXCALC package ! example} {\small\begin{verbatim} pform l=4,a=1,f=2; spacedim 4; l:= -1/2*d a^#d a; %Free Maxwell field; tvector x(k); %An unspecified generator; noether(l,a,x(-k)); ( - 2*d(X _|A)^# d A - (X _|d A)^# d A + d A^(X _|# d A))/2 K K K \end{verbatim}} \section{Handling of Indices} \index{exterior form ! with indices} Exterior forms and vectors may have indices. On input, the indices are given as arguments of the object. A positive argument denotes a superscript and a negative argument a subscript. On output, the indexed quantity is displayed two dimensionally if {\tt NAT} is on. \index{NAT flag} Indices may be identifiers or numbers. \example\index{EXCALC package ! example} {\small\begin{verbatim} pform om(k,l)=m,e(k)=1; e(k)^e(-l); K E ^E L om(4,-2); 4 OM 2 \end{verbatim}} In certain cases, one would like to inhibit the summation over specified index names, or at all. For this the command \index{NOSUM command} \hspace*{2em} \k{NOSUM} \s{indexname$_1$}, \ldots;\label{NOSUM} and the switch {\tt NOSUM} are\index{NOSUM switch} available. The command {\tt NOSUM} has the effect that summation is not performed over those indices which had been listed. The command {\tt RENOSUM}\label{RENOSUM} enables summation again. The switch {\tt NOSUM}, if on, inhibits any summation.\index{RENOSUM command} \label{INDEXSYMMETRIES}\index{INDEXSYMMETRIES command} It is possible to declare symmetry properties for an indexed quantity by the command {\tt INDEX\_SYMMETRIES}. A prototypical example is as follows {\small\begin{verbatim} index_symmetries u(k,l,m,n): symmetric in {k,l},{m,n} antisymmetric in {{k,l},{m,n}}, g(k,l),h(k,l): symmetric; \end{verbatim}} It declares the object {\tt u} symmetric in the first two and last two indices and antisymmetric with respect to commutation of the given index pairs. If an object is completely symmetric or antisymmetric, the indices need not to be given after the corresponding keyword as shown above for {\tt g} and {\tt h}. \section{Metric Structures} \index{metric structure}\index{coframe} A metric structure is defined in {\bf EXCALC} by specifying a set of basis one-forms (the coframe) together with the metric. Syntax:\label{COFRAME} \begin{tabbing} \hspace*{2em} \k{COFRAME} \= \s{identifier}\s{(index$_1$)}=\s{expression$_1$}, \\ \> \s{identifier}\s{(index$_2$)}=\s{expression$_2$}, \\ \> . \\ \> . \\ \> . \\ \> \s{identifier}\s{(index$_n$)}=\s{expression$_n$} \\ \> \hspace{1em} \k{WITH} \k{METRIC} \s{name}=\s{expression}; \\ \end{tabbing} \index{Euclidean metric}\index{COFRAME ! WITH METRIC} This statement automatically sets the dimension of the space and the index range. The clause {\tt WITH METRIC} can be omitted if the metric \index{COFRAME ! WITH SIGNATURE} is Euclidean and the shorthand {\tt WITH SIGNATURE \s{diagonal elements}} \label{SIGNATURE} can be used in the case of a pseudo-Euclidean metric. The splitting of a metric structure in its metric tensor coefficients and basis one-forms is completely arbitrary including the extremes of an orthonormal frame and a coordinate frame. \newpage \example\index{EXCALC package ! example} {\small\begin{verbatim} coframe e r=d r, e(ph)=r*d ph with metric g=e(r)*e(r)+e(ph)*e(ph); %Polar coframe \end{verbatim}} The frame dual to the frame defined by the {\tt COFRAME} command can be introduced by \k{FRAME} command.\index{FRAME command} \hspace*{2em} \k{FRAME} \s{identifier};\label{FRAME} This command causes the dual property to be recognised, and the tangent vectors of the coordinate functions are replaced by the frame basis vectors. \example\index{EXCALC package ! example} {\small\begin{verbatim} coframe b r=d r,b ph=r*d ph,e z=d z; %Cylindrical coframe; frame x; on nero; x(-k) _| b(l); R NS := 1 R PH NS := 1 PH Z NS := 1 Z x(-k) |_ x(-l); %The commutator of the dual frame; NS := X /R PH R PH NS := ( - X )/R %i.e. it is not a coordinate base; R PH PH \end{verbatim}} \index{DISPLAYFRAME command}\index{tracing ! EXCALC} As a convenience, the frames can be displayed at any point in a program by the command {\tt DISPLAYFRAME;}\label{DISPLAYFRAME}. \index{Hodge-* duality operator} The Hodge-* duality operator returns the explicitly constructed dual element if applied to coframe base elements. The metric is properly taken into account. \index{Levi-Cevita tensor}\ttindex{EPS} The total antisymmetric Levi-Cevita tensor {\tt EPS}\label{EPS} is also available. The value of {\tt EPS} with an even permutation of the indices in a covariant position is taken to be +1. \section{Riemannian Connections} \index{Riemannian Connections} The command {\tt RIEMANNCONX} is provided for calculating the \index{RIEMANNCONX command} \label{RIEMANNCONX} connection 1 forms. The values are stored on the name given to {\tt RIEMANNCONX}. This command is far more efficient than calculating the connection from the differential of the basis one-forms and using inner products. \section{Ordering and Structuring} \index{ordering ! exterior form}\index{FORDER command} The ordering of an exterior form or vector can be changed by the command {\tt FORDER}.\label{FORDER} In an expression, the first identifier or kernel in the arguments of {\tt FORDER} is ordered ahead of the second, and so on, and ordered ahead of all not appearing as arguments. This ordering is done on the internal level and not only on output. The execution of this statement can therefore have tremendous effects on computation time and memory requirements. {\tt REMFORDER}\label{REMFORDER} brings back standard ordering for those elements that are listed as arguments.\index{REMFORDER command} An expression can be put in a more structured form by renaming a subexpression. This is done with the command {\tt KEEP} which has the syntax\index{KEEP command}\label{KEEP} \hspace*{2em} \k{KEEP} \s{name$_1$}=\s{expression$_1$},\s{name$_2$}=\s{expression$_2$}, \ldots \index{exterior product} The capabilities of {\tt KEEP} are currently very limited. Only exterior products should occur as righthand sides in {\tt KEEP}. \noindent{\bf Note:} This is just an introduction to the full power of {\tt EXCALC}. The reader if referred to the full documentation. \chapter[FIDE: Finite differences for PDEs]% {FIDE: Finite difference method for partial differential equations} \label{FIDE} \typeout{[FIDE: Finite differences for PDEs]} {\footnotesize \begin{center} Richard Liska \\ Faculty of Nuclear Science and Physical Engineering \\ Technical University of Prague \\ Brehova 7, 115 19 Prague 1, Czech Republic \\[0.05in] e--mail: tjerl@aci.cvut.cz \end{center} } \ttindex{FIDE} The FIDE package performs automation of the process of numerical solving partial differential equations systems (PDES) by generating finite difference methods. In the process one can find several stages in which computer algebra can be used for performing routine analytical calculations, namely: transforming differential equations into different coordinate systems, discretisation of differential equations, analysis of difference schemes and generation of numerical programs. The FIDE package consists of the following modules: \begin{description} \item[EXPRES] for transforming PDES into any orthogonal coordinate system. \item[IIMET] for discretisation of PDES by integro-interpolation method. \item[APPROX] for determining the order of approximation of difference scheme. \item[CHARPOL] for calculation of amplification matrix and characteristic polynomial of difference scheme, which are needed in Fourier stability analysis.\ \item[HURWP] for polynomial roots locating necessary in verifying the von Neumann stability condition. \item[LINBAND] for generating the block of FORTRAN code, which solves a system of linear algebraic equations with band matrix appearing quite often in difference schemes. \end{description} For more details on this package are given in the FIDE documentation, and in the examples. A flavour of its capabilities can be seen from the following simple example. {\small\begin{verbatim} off exp; factor diff; on rat,eqfu; % Declare which indexes will be given to coordinates coordinates x,t into j,m; % Declares uniform grid in x coordinate grid uniform,x; % Declares dependencies of functions on coordinates dependence eta(t,x),v(t,x),eps(t,x),p(t,x); % Declares p as known function given p; same eta,v,p; iim a, eta,diff(eta,t)-eta*diff(v,x)=0, v,diff(v,t)+eta/ro*diff(p,x)=0, eps,diff(eps,t)+eta*p/ro*diff(v,x)=0; ***************************** ***** Program ***** IIMET Ver 1.1.2 ***************************** Partial Differential Equations ============================== diff(eta,t) - diff(v,x)*eta = 0 diff(p,x)*eta --------------- + diff(v,t) = 0 ro diff(v,x)*eta*p diff(eps,t) + ----------------- = 0 ro Backtracking needed in grid optimalization 0 interpolations are needed in x coordinate Equation for eta variable is integrated in half grid point Equation for v variable is integrated in half grid point Equation for eps variable is integrated in half grid point 0 interpolations are needed in t coordinate Equation for eta variable is integrated in half grid point Equation for v variable is integrated in half grid point Equation for eps variable is integrated in half grid point Equations after Discretization Using IIM : ========================================== (4*(eta(j,m + 1) - eta(j,m) - eta(j + 1,m) + eta(j + 1,m + 1))*hx - ( (eta(j + 1,m + 1) + eta(j,m + 1)) *(v(j + 1,m + 1) - v(j,m + 1)) + (eta(j + 1,m) + eta(j,m))*(v(j + 1,m) - v(j,m))) *(ht(m + 1) + ht(m)))/(4*(ht(m + 1) + ht(m))*hx) = 0 (4*(v(j,m + 1) - v(j,m) - v(j + 1,m) + v(j + 1,m + 1))*hx*ro + ((eta(j + 1,m + 1) + eta(j,m + 1)) *(p(j + 1,m + 1) - p(j,m + 1)) + (eta(j + 1,m) + eta(j,m))*(p(j + 1,m) - p(j,m))) *(ht(m + 1) + ht(m)))/(4*(ht(m + 1) + ht(m))*hx*ro) = 0 (4*(eps(j,m + 1) - eps(j,m) - eps(j + 1,m) + eps(j + 1,m + 1))*hx*ro + (( eta(j + 1,m + 1)*p(j + 1,m + 1) + eta(j,m + 1)*p(j,m + 1)) *(v(j + 1,m + 1) - v(j,m + 1)) + (eta(j + 1,m)*p(j + 1,m) + eta(j,m)*p(j,m)) *(v(j + 1,m) - v(j,m)))*(ht(m + 1) + ht(m)))/(4 *(ht(m + 1) + ht(m))*hx*ro) = 0 clear a; clearsame; cleargiven; \end{verbatim}} \chapter[FPS: Formal power series]% {FPS: Automatic calculation of formal power series} \label{FPS} \typeout{[FPS: Formal power series]} {\footnotesize \begin{center} Wolfram Koepf and Winfried Neun\\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: Koepf@zib.de and Neun@zib.de \end{center} } \ttindex{FPS} This package can expand functions of certain type into their corresponding Laurent-Puiseux series as a sum of terms of the form \begin{displaymath} \sum_{k=0}^{\infty} a_{k} (x-x_{0})^{k/n + s} \end{displaymath} where $s$ is the `shift number', $n$ is the `Puiseux number', and $x_0$ is the `point of development'. The following types are supported: \begin{itemize} \item {\bf functions of `rational type'}, which are either rational or have a rational derivative of some order; \item {\bf functions of `hypergeometric type'} where $a_{k+m}/a_k$ is a rational function for some integer $m$, the `symmetry number'; \item {\bf functions of `exp-like type'} which satisfy a linear homogeneous differential equation with constant coefficients. \end{itemize} {\tt FPS(f,x,x0)}\ttindex{FPS} tries to find a formal power series expansion for {\tt f} with respect to the variable {\tt x} at the point of development {\tt x0}. It also works for formal Laurent (negative exponents) and Puiseux series (fractional exponents). If the third argument is omitted, then {\tt x0:=0} is assumed. Example: {\tt FPS(asin(x)\verb+^+2,x)} results in {\small\begin{verbatim} 2*k 2*k 2 2 x *2 *factorial(k) *x infsum(----------------------------,k,0,infinity) factorial(2*k + 1)*(k + 1) \end{verbatim}} If possible, the output is given using factorials. In some cases, the use of the Pochhammer symbol {\tt pochhammer(a,k)}$:=a(a+1)\cdots(a+k-1)$ is necessary. {\tt SimpleDE(f,x)} tries to find a homogeneous linear differential equation with polynomial coefficients for $f$ with respect to $x$. Make sure that $y$ is not a used variable. The setting {\tt factor df;} is recommended to receive a nicer output form. Examples: {\tt SimpleDE(asin(x)\verb+^+2,x)} then results in {\small\begin{verbatim} 2 df(y,x,3)*(x - 1) + 3*df(y,x,2)*x + df(y,x) \end{verbatim}} The depth for the search of a differential equation for {\tt f} is controlled by the variable {\tt fps\verb+_+search\verb+_+depth};\ttindex{fps\_search\_depth} higher values for {\tt fps\verb+_+search\verb+_+depth} will increase the chance to find the solution, but increases the complexity as well. The default value for {\tt fps\verb+_+search\verb+_+depth} is 5. For {\tt FPS(sin(x\verb+^+(1/3)),x)}, or {\tt SimpleDE(sin(x\verb+^+(1/3)),x)} {\em e.g.}, a setting {\tt fps\verb+_+search\verb+_+depth:=6} is necessary. The output of the FPS package can be influenced by the\ttindex{TRACEFPS} switch {\tt tracefps}. Setting {\tt on tracefps} causes various prints of intermediate results. \chapter{GENTRAN: A code generation package} \label{GENTRAN} \typeout{{GENTRAN: A code generation package}} {\footnotesize \begin{center} Barbara L. Gates \\ RAND \\ Santa Monica CA 90407-2138 \\ U.S.A. \\[0.1in] Michael C. Dewar \\ School of Mathematical Sciences, The University of Bath \\ Bath BA2 7AY, England \\[0.05in] e--mail: mcd@maths.bath.ac.uk \end{center} } \ttindex{GENTRAN} GENTRAN is an automatic code GENerator and TRANslator which runs under \REDUCE. It constructs complete numerical programs based on sets of algorithmic specifications and symbolic expressions. Formatted FORTRAN, RATFOR, PASCAL or C code can be generated through a series of interactive commands or under the control of a template processing routine. Large expressions can be automatically segmented into subexpressions of manageable size, and a special file-handling mechanism maintains stacks of open I/O channels to allow output to be sent to any number of files simultaneously and to facilitate recursive invocation of the whole code generation process. GENTRAN provides the flexibility necessary to handle most code generation applications. It is designed to work with the SCOPE code optimiser. GENTRAN is a large system with a great many options. This section will only describe the FORTRAN generation facilities, and in broad outline only. The full manual is available as part of the \REDUCE\ documentation. \section{Simple Use} A substantial subset of all expressions and statements in the \REDUCE{} programming language can be translated directly into numerical code. The {\bf GENTRAN} command takes a \REDUCE\ expression, statement, or procedure definition, and translates it into code in the target language. \begin{describe}{Syntax:} {\bf GENTRAN} {\it stmt} [ {\bf OUT} {\it f1,f2,\dots\ ,fn} ]{\it ;} \end{describe} {\it stmt} is any \REDUCE\ expression, statement (simple, compound, or group), or procedure definition that can be translated by GENTRAN into the target language. {\it stmt} may contain any number of calls to the special functions {\bf EVAL}, {\bf DECLARE}, and {\bf LITERAL}. {\it f1,f2,\dots\ ,fn } is an optional argument list containing one or more {\it f}'s, where each {\it f} is one of: \par \begin{tabular}{lll} {\it an atom} &= &an output file\\ {\bf T} &= &the terminal\\ {\bf NIL} &= &the current output file(s)\\ \ttindex{ALL"!*} {\bf ALL!*} &= &all files currently open for output \\ & & by GENTRAN (see section~\ref{GENTRAN:output})\\ \end{tabular} If the optional part of the command is not given, generated code is simply written to the current output file. However, if it is given, then the current output file is temporarily overridden. Generated code is written to each file represented by {\it f1,f2,\dots\ ,fn} for this command only. Files which were open prior to the call to {\bf GENTRAN} will remain open after the call, and files which did not exist prior to the call will be created, opened, written to, and closed. The output stack will be exactly the same both before and after the call. {\bf GENTRAN} returns the name(s) of the file(s) to which code was written. \index{GENTRAN package ! example} {\small\begin{verbatim} 1: GENTRANLANG!* := 'FORTRAN$ 2: GENTRAN 2: FOR I:=1:N DO 2: V(I) := 0$ DO 25001 I=1,N V(I)=0.0 25001 CONTINUE \end{verbatim}} \section{Precision} \label{precision} \index{precision}\index{DOUBLE switch} By default {\bf GENTRAN} generates constants and type declarations in single precision form. If the user requires double precision output then the switch {\bf DOUBLE} must be set {\bf ON}. \index{PRECISION command}\index{PRINT"!-PRECISION command} To ensure the correct number of floating point digits are generated it may be necessary to use either the {\bf PRECISION} or {\bf PRINT!-PRECISION} commands. The former alters the number of digits \REDUCE\ calculates, the latter only the number of digits \REDUCE\ prints. Each takes an integer argument. It is not possible to set the printed precision higher than the actual precision. Calling {\bf PRINT!-PRECISION} with a negative argument causes the printed precision to revert to the actual precision. \subsection{The EVAL Function} \label{eval} \begin{describe}{Syntax:} {\bf EVAL} {\it exp} \end{describe}\ttindex{EVAL} \begin{describe}{Argument:} {\it exp} is any \REDUCE\ expression or statement which, after evaluation by \REDUCE, results in an expression that can be translated by GENTRAN into the target language. \end{describe} When {\bf EVAL} is called on an expression which is to be translated, it tells {\bf GENTRAN} to give the expression to \REDUCE\ for evaluation first, and then to translate the result of that evaluation. {\small\begin{verbatim} f; 2 2*X - 5*X + 6 \end{verbatim}} We wish to generate an assignment statement for the quotient of F and its derivative. {\small\begin{verbatim} 1: GENTRAN 1: Q := EVAL(F)/EVAL(DF(F,X))$ Q=(2.0*X**2-(5.0*X)+6.0)/(4.0*X-5.0) \end{verbatim}} \subsection{The :=: Operator} \index{:=:} \label{rsetq}\index{GENTRAN ! preevaluation}\index{rsetq operator} In many applications, assignments must be generated in which the left-hand side is some known variable name, but the right-hand side is an expression that must be evaluated. For this reason, a special operator is provided to indicate that the expression on the right-hand side is to be evaluated prior to translation. This special operator is {\bf :=:} ({\em i.e.} the usual \REDUCE\ assignment operator with an extra ``:'' on the right). \begin{describe}{\example} {\small\begin{verbatim} 1: GENTRAN 1: DERIV :=: DF(X^4-X^3+2*x^2+1,X)$ DERIV=4.0*X**3-(3.0*X**2)+4.0*X \end{verbatim}} \end{describe} \subsection{The ::= Operator} \label{lsetq} \index{matrices ! in GENTRAN} When assignments to matrix or array elements must be generated, many times the indices of the element must be evaluated first. The special operator\index{::=}\index{lsetq operator} {\bf ::=} can be used within a call to {\bf GENTRAN} to indicate that the indices of the matrix or array element on the left-hand side of the assignment are to be evaluated prior to translation. (This is the usual \REDUCE{} assignment operator with an extra ``:'' on the left.) \begin{describe}{\example} We wish to generate assignments which assign zeros to all elements on the main diagonal of M, an n x n matrix. {\small\begin{verbatim} 10: FOR j := 1 : 8 DO 10: GENTRAN 10: M(j,j) ::= 0$ M(1,1)=0.0 M(2,2)=0.0 : : M(8,8)=0.0 \end{verbatim}} \end{describe} {\bf LSETQ} may be used interchangeably with {\bf ::=} on input.\ttindex{LSETQ} \subsection{The ::=: Operator} \label{lrsetq} \index{::=:} \index{lrsetq operator} In applications in which evaluated expressions are to be assigned to array elements with evaluated subscripts, the {\bf ::=:} operator can be used. It is a combination of the {\bf ::=} and {\bf :=:} operators described in sections~\ref{rsetq} and ~\ref{lsetq}. \index{matrices ! in GENTRAN} \begin{describe}{\example} The following matrix, M, has been derived symbolically: \newpage {\small\begin{verbatim} ( A 0 -1 1) ( ) ( 0 B 0 0) ( ) ( -1 0 C -1) ( ) ( 1 0 -1 D) \end{verbatim}} We wish to generate assignment statements for those elements on the main diagonal of the matrix. {\small\begin{verbatim} 10: FOR j := 1 : 4 DO 10: GENTRAN 10: M(j,j) ::=: M(j,j)$ M(1,1)=A M(2,2)=B M(3,3)=C M(4,4)=D \end{verbatim}} \end{describe} The alternative alphanumeric identifier associated with {\bf ::=:} is {\bf LRSETQ}.\ttindex{LRSETQ} \section{Explicit Type Declarations} \label{explicit:type} Type declarations are automatically generated each time a subprogram heading is generated. Type declarations are constructed from information stored in the GENTRAN symbol table. The user can place entries into the symbol table explicitly through calls to the special GENTRAN function {\bf DECLARE}.\index{DECLARE function} \begin{describe}{Syntax:} {\bf \ \ DECLARE} {\it v1,v2,\dots\ ,vn} {\bf :} {\it type;} or \begin{tabular}{ll} {\bf DECLARE}\\ {\bf $<$$<$}\\ &{\it v11,v12,\dots\ ,v1n} {\bf :} {\it type1;}\\ &{\it v21,v22,\dots\ ,v2n} {\bf :} {\it type2;}\\ & :\\ & :\\ &{\it vn1,vnn,\dots\ ,vnn} {\bf :} {\it typen;}\\ {\bf $>$$>$}{\it ;} \end{tabular} \end{describe} \begin{describe}{Arguments:} Each {\it v1,v2,\dots\ ,vn} is a list of one or more variables (optionally subscripted to indicate array dimensions), or variable ranges (two letters separated by a ``-''). {\it v}'s are not evaluated unless given as arguments to {\bf EVAL}. Each {\it type} is a variable type in the target language. Each must be an atom, optionally preceded by the atom {\bf IMPLICIT}. \index{IMPLICIT option} {\it type}'s are not evaluated unless given as arguments to {\bf EVAL}. \end{describe} The {\bf DECLARE} statement can also be used to declare subprogram types ({\em i.e.\ } {\bf SUBROUTINE} or {\bf FUNCTION}) for \index{SUBROUTINE}\index{FUNCTION} FORTRAN and RATFOR code, and function types for all four languages. \section{Expression Segmentation} \label{segmentation}\index{segmenting expressions} Symbolic derivations can easily produce formulas that can be anywhere from a few lines to several pages in length. Such formulas can be translated into numerical assignment statements, but unless they are broken into smaller pieces they may be too long for a compiler to handle. (The maximum number of continuation lines for one statement allowed by most FORTRAN compilers is only 19.) Therefore GENTRAN \index{continuation lines} contains a segmentation facility which automatically {\it segments}, or breaks down unreasonably large expressions. The segmentation facility generates a sequence of assignment statements, each of which assigns a subexpression to an automatically generated temporary variable. This sequence is generated in such a way that temporary variables are re-used as soon as possible, thereby keeping the number of automatically generated variables to a minimum. The facility can be turned on or off by setting the mode \index{GENTRANSEG switch} switch {\bf GENTRANSEG} accordingly ({\em i.e.\ }by calling the \REDUCE\ function {\bf ON} or {\bf OFF} on it). The user can control the maximum allowable expression size by setting the \ttindex{MAXEXPPRINTLEN"!*} variable {\bf MAXEXPPRINTLEN!*} to the maximum number of characters allowed in an expression printed in the target language (excluding spaces automatically printed by the formatter). The {\bf GENTRANSEG} switch is on initially, and {\bf MAXEXPPRINTLEN!*} is initialised to 800. \section{Template Processing}\label{GENTRAN:template} \index{GENTRAN ! templates}\index{templates}\index{code templates} In some code generation applications pieces of the target numerical program are known in advance. A {\it template} file containing a program outline is supplied by the user, and formulas are derived in \REDUCE, converted to numerical code, and inserted in the corresponding places in the program outline to form a complete numerical program. A template processor is provided by GENTRAN for use in these applications. \label{templates}\index{GENTRANIN command} \begin{describe}{Syntax:} {\bf GENTRANIN} {\it f1,f2,\dots\ ,fm} [{\bf OUT} {\it f1,f2,\dots\ ,fn\/}]{\it ;} \end{describe} \begin{describe}{Arguments:} {\it f1,f2,\dots\ ,fm\/} is an argument list containing one or more {\it f\/}'s, where each {\it f\/} is one of: \begin{center} \begin{tabular}{lll} {\it an atom}& = &a template (input) file\\ {\bf T}& = &the terminal\\ \end{tabular} \end{center} {\it f1,f2,\dots\ ,fn\/} is an optional argument list containing one or more {\it f\/}'s, where each {\it f\/} is one of: \begin{center} \begin{tabular}{lll} {\it an atom}& = &an output file\\ {\bf T}& = &the terminal\\ {\bf NIL}& = &the current output file(s)\\ {\bf ALL!*}& = &all files currently open for output \\ & & by GENTRAN (see section~\ref{GENTRAN:output}) \\ \end{tabular} \end{center} \end{describe} {\bf GENTRANIN} processes each template file {\it f1,f2,\dots\ ,fm} sequentially. A template file may contain any number of parts, each of which is either an active or an inactive part. All active parts start with the character sequence {\bf ;BEGIN;} and end with {\bf ;END;}. The end of the template file is indicated by an extra {\bf ;END;} character sequence.\index{;BEGIN; marker} \index{;END; marker} Inactive parts of template files are assumed to contain code in the target language. All inactive parts are copied to the output. Active parts may contain any number of \REDUCE\ expressions, statements, and commands. They are not copied directly to the output. Instead, they are given to \REDUCE\ for evaluation in algebraic mode. All output generated by each evaluation is sent to the output file(s). Returned values are only printed on the terminal.\index{GENTRAN ! preevaluation} Active parts will most likely contain calls to {\bf GENTRAN} to generate code. This means that the result of processing a template file will be the original template file with all active parts replaced by generated code. If {\bf OUT} {\it f1,f2,\dots\ ,fn} is not given, generated code is simply written to the current-output file. However, if {\bf OUT} {\it f1,f2,\dots\ ,fn} is given, then the current-output file is temporarily overridden. Generated code is written to each file represented by {\it f1,f2,\dots\ ,fn} for this command only. Files which were open prior to the call to {\bf GENTRANIN} will remain open after the call, and files which did not exist prior to the call will be created, opened, written to, and closed. The output-stack will be exactly the same both before and after the call. {\bf GENTRANIN} returns the names of all files written to by this command. \newpage \begin{describe}{\example} Suppose we wish to generate a FORTRAN subprogram to compute the determinant of a 3 x 3 matrix. We can construct a template file with an outline of the FORTRAN subprogram and \REDUCE\ and GENTRAN commands to fill it in: \index{matrices ! in GENTRAN} Contents of file {\tt det.tem}: \end{describe} {\small\begin{verbatim} REAL FUNCTION DET(M) REAL M(3,3) ;BEGIN; OPERATOR M$ MATRIX MM(3,3)$ MM := MAT( (M(1,1),M(1,2),M(1,3)), (M(2,1),M(2,2),M(2,3)), (M(3,1),M(3,2),M(3,3)) )$ GENTRAN DET :=: DET(MM)$ ;END; RETURN END ;END; \end{verbatim}} \begin{describe}{} Now we can generate a FORTRAN subprogram with the following \REDUCE\ session: {\small\begin{verbatim} 1: GENTRANLANG!* := 'FORTRAN$ 2: GENTRANIN 2: "det.tem" 2: OUT "det.f"$ \end{verbatim}} Contents of file det.f: \end{describe} {\small\begin{verbatim} REAL FUNCTION DET(M) REAL M(3,3) DET=M(3,3)*M(2,2)*M(1,1)-(M(3,3)*M(2,1)*M(1,2))-(M(3,2) . *M(2,3)*M(1,1))+M(3,2)*M(2,1)*M(1,3)+M(3,1)*M(2,3)*M(1 . ,2)-(M(3,1)*M(2,2)*M(1,3)) RETURN END \end{verbatim}} \section{Output Redirection}\label{GENTRAN:output} \index{GENTRAN ! file output} \index{GENTRANOUT command}\index{GENTRANSHUT command} The {\bf GENTRANOUT} and {\bf GENTRANSHUT} commands are identical to the \REDUCE\ {\bf OUT} and {\bf SHUT} commands with the following exceptions: \begin{itemize} \item {\bf GENTRANOUT} and {\bf GENTRANSHUT} redirect {\it only\/} code which is printed as a side effect of GENTRAN commands. \item {\bf GENTRANOUT} allows more than one file name to be given to indicate that generated code is to be sent to two or more files. (It is particularly convenient to be able to have generated code sent to the terminal screen and one or more file simultaneously.) \item {\bf GENTRANOUT} does not automatically erase existing files; it prints a warning message on the terminal and asks the user whether the existing file should be erased or the whole command be aborted. \end{itemize} \chapter[GEOMETRY: Plane geometry]% {GEOMETRY: Mechanized (Plane) Geometry Manipulations} \label{GEOMETRY} \typeout{{GEOMETRY: Mechanized (Plane) Geometry Manipulations}} \newcommand{\xxyy}[2] {\noindent{\f{#1}} \\\hspace*{1cm} \parbox[t]{9cm}{#2} \\[6pt]} \newcommand{\geo}{{\sc Geometry}} \newenvironment{code}{\tt \begin{tabbing} \hspace*{1cm}\=\hspace*{1cm}\=\hspace*{1cm}\= \hspace*{1cm}\=\hspace*{1cm}\=\kill}{\end{tabbing}} {\footnotesize \begin{center} Hans-Gert Gr\"abe \\ Universit\"at Leipzig, Germany \\ e-mail: graebe@informatik.uni-leipzig.de \\ \end{center} } \ttindex{GEOMETRY} %\markboth{CHAPTER \ref{GEOMETRY}. GEOMETRY: (PLANE) GEOMETRY MANIPULATIONS}{} %\thispagestyle{myheadings} \section{Introduction} This package provides tools for formulation and mechanized proofs of geometry statements in the spirit of the ``Chinese Prover'' of W.-T. Wu \cite{Wu:94} and the fundamental book \cite{Chou:88} of S.-C. Chou who proved 512 geometry theorems with this mechanized method, see also \cite{Chou:84}, \cite{Chou:90}, \cite{Wu:84a}, \cite{Wu:84b}. The general idea behind this approach is an algebraic reformulation of geometric conditions using generic coordinates. A (mathematically strong) proof of the geometry statement then may be obtained from appropriate manipulations of these algebraic expressions. A CAS as, e.g., Reduce is well suited to mechanize these manipulations. For a more detailed introduction to the topic see the accompanying file {\tt geometry.tex} in \$REDUCEPATH/packages/geometry/. \section{Basic Data Types and Constructors} The basic data types in this package are {\tt Scalar, Point, Line, Circle1 and Circle}. \\ The function \f{POINT($a,b$)} creates a {\tt Point} in the plane with the $(x,y)$-coordinates $(a,b)$. A {\tt Line} is created with the function \f{LINE($a,b,c$)} and fulfills the equation $ ax + by + c = 0$. For circles there are two constructors. You can use \f{CIRCLE($c_1,c_2,c_3,c_4$)} to create a {\tt Circle} where the scalar variables solve the equation $c_1(x^2+y^2) + c_2x + c_3y + c_4 = 0$. Note that lines are a subset of the circles with $c_1=0$. The other way to create a {\tt Circle} is the function \f{CIRCLE1($M,s$)}. The variable $M$ here denotes a {\tt Point} and $s$ the squared radius. Please note that this package mostly uses the squared distances and radiuses. There are various functions whose return type is {\tt Scalar}. Booleans are represented as extended booleans, i.e.\ the procedure returns a {\tt Scalar} that is zero iff the condition is fulfilled. For example, the function call \f{POINT\_ON\_CIRCLE(P,c)} returns zero if the {\tt Point} $P$ is on the circle, otherwise $P$ is not on the circle. In some cases also a non zero result has a geometric meaning. For example, \f{COLLINEAR(A,B,C)} returns the signed area of the corresponding parallelogram. \section{Procedures} This section contains a short description of all procedures available in \geo. Per convention distances and radiuses of circles are squared. \bigskip \xxyy{ANGLE\_SUM(a,b:Scalar):Scalar \ttindex{ANGLE\_SUM}} {Returns $\tan(\alpha+\beta)$, if $a=\tan(\alpha), b=\tan(\beta)$.} \xxyy{ALTITUDE(A,B,C:Point):Line \ttindex{ALTITUDE}} {The altitude from $A$ onto $g(BC)$. } \xxyy{C1\_CIRCLE(M:Point,sqr:Scalar):Circle \ttindex{C1\_CIRCLE}} {The circle with given center and sqradius.} \xxyy{CC\_TANGENT(c1,c2:Circle):Scalar \ttindex{CC\_TANGENT}} {Zero iff $c_1$ and $c_2$ are tangent.} \xxyy{CHOOSE\_PC(M:Point,r,u):Point \ttindex{CHOOSE\_PC}} {Chooses a point on the circle around $M$ with radius $r$ using its rational parametrization with parameter $u$.} \xxyy{CHOOSE\_PL(a:Line,u):Point \ttindex{CHOOSE\_PL}} {Chooses a point on $a$ using parameter $u$.} \xxyy{CIRCLE(c1,c2,c3,c4:Scalar):Circle \ttindex{CIRCLE}} {The {\tt Circle} constructor.} \xxyy{CIRCLE1(M:Point,sqr:Scalar):Circle1 \ttindex{CIRCLE1}} {The {\tt Circle1} constructor. } \xxyy{CIRCLE\_CENTER(c:Circle):Point \ttindex{CIRCLE\_CENTER}} {The center of $c$.} \xxyy{CIRCLE\_SQRADIUS(c:Circle):Scalar \ttindex{CIRCLE\_SQRADIUS}} {The sqradius of $c$.} \xxyy{CL\_TANGENT(c:Circle,l:Line):Scalar \ttindex{CL\_TANGENT}} {Zero iff $l$ is tangent to $c$.} \xxyy{COLLINEAR(A,B,C:Point):Scalar \ttindex{COLLINEAR}} {Zero iff $A,B,C$ are on a common line. In general the signed area of the parallelogram spanned by $\vec{AB}$ and $\vec{AC}$. } \xxyy{CONCURRENT(a,b,c:Line):Scalar \ttindex{CONCURRENT}} {Zero iff $a,b,c$ have a common point.} \xxyy{INTERSECTION\_POINT(a,b:Line):Point \ttindex{INTERSECTION\_POINT}} {The intersection point of the lines $a,b$. } \xxyy{L2\_ANGLE(a,b:Line):Scalar \ttindex{L2\_ANGLE}} {Tangens of the angle between $a$ and $b$. } \xxyy{LINE(a,b,c:Scalar):Line \ttindex{LINE}} {The {\tt Line} constructor.} \xxyy{LOT(P:Point,a:Line):Line \ttindex{LOT}} {The perpendicular from $P$ onto $a$.} \xxyy{MEDIAN(A,B,C:Point):Line \ttindex{MEDIAN}} {The median line from $A$ to $BC$.} \xxyy{MIDPOINT(A,B:Point):Point \ttindex{MIDPOINT}} {The midpoint of $AB$. } \xxyy{MP(B,C:Point):Line \ttindex{MP}} {The midpoint perpendicular of $BC$.} \xxyy{ORTHOGONAL(a,b:Line):Scalar \ttindex{ORTHOGONAL}} {zero iff the lines $a,b$ are orthogonal. } \xxyy{OTHER\_CC\_POINT(P:Point,c1,c2:Circle):Point \ttindex{OTHER\_CC\_POINT}} { $c_1$ and $c_2$ intersect at $P$. The procedure returns the second intersection point. } \xxyy{OTHER\_CL\_POINT(P:Point,c:Circle,l:Line):Point \ttindex{OTHER\_CL\_POINT}} {$c$ and $l$ intersect at $P$. The procedure returns the second intersection point.} \xxyy{P3\_ANGLE(A,B,C:Point):Scalar \ttindex{P3\_ANGLE}} {Tangens of the angle between $\vec{BA}$ and $\vec{BC}$. } \xxyy{P3\_CIRCLE(A,B,C:Point):Circle\ \ttindex{P3\_CIRCLE} {\rm or\ }\\ P3\_CIRCLE1(A,B,C:Point):Circle1\ttindex{P3\_CIRCLE1} } {The circle through 3 given points. } \xxyy{P4\_CIRCLE(A,B,C,D:Point):Scalar \ttindex{P4\_CIRCLE}} {Zero iff four given points are on a common circle. } \xxyy{PAR(P:Point,a:Line):Line \ttindex{PAR}} {The line through $P$ parallel to $a$. } \xxyy{PARALLEL(a,b:Line):Scalar \ttindex{PARALLEL}} {Zero iff the lines $a,b$ are parallel. } \xxyy{PEDALPOINT(P:Point,a:Line):Point \ttindex{PEDALPOINT}} {The pedal point of the perpendicular from $P$ onto $a$.} \xxyy{POINT(a,b:Scalar):Point \ttindex{POINT}} {The {\tt Point} constructor.} \xxyy{POINT\_ON\_BISECTOR(P,A,B,C:Point):Scalar \ttindex{POINT\_ON\_BISECTOR}} {Zero iff $P$ is a point on the (inner or outer) bisector of the angle $\angle\,ABC$.} \xxyy{POINT\_ON\_CIRCLE(P:Point,c:Circle):Scalar\ \ttindex{POINT\_ON\_CIRCLE} {\rm or\ }\\ POINT\_ON\_CIRCLE1(P:Point,c:Circle1):Scalar \ttindex{POINT\_ON\_CIRCLE1}} {Zero iff $P$ is on the circle $c$.} \xxyy{POINT\_ON\_LINE(P:Point,a:Line):Scalar \ttindex{POINT\_ON\_LINE}} {Zero iff $P$ is on the line $a$. } \xxyy{PP\_LINE(A,B:Point):Line \ttindex{PP\_LINE}} {The line through $A$ and $B$.} \xxyy{SQRDIST(A,B:Point):Scalar \ttindex{SQRDIST}} {Square of the distance between $A$ and $B$.} \xxyy{SYMPOINT(P:Point,l:Line):Point \ttindex{SYMPOINT}} {The point symmetric to $P$ wrt.\ the line $l$.} \xxyy{SYMLINE(a:Line,l:Line):Line \ttindex{SYMLINE}} {The line symmetric to $a$ wrt.\ the line $l$.} \xxyy{VARPOINT(A,B:Point,u):Point \ttindex{VARPOINT}} {The point $D=u\cdot A+(1-u)\cdot B$. } \noindent \geo \ supplies as additional tools the functions \bigskip \xxyy{EXTRACTMAT(polys,vars) \ttindex{EXTRACTMAT}} {Returns the coefficient matrix of the list of equations $polys$ that are linear in the variables $vars$. } \xxyy{RED\_HOM\_COORDS(u:\{Line,Circle\}) \ttindex{RED\_HOM\_COORDS}} {Returns the reduced homogeneous coordinates of $u$, i.e., divides out the content. } \newpage \section{Examples} \example Create three points as the vertices of a generic triangle. \\ {\tt A:=Point(a1,a2); B:=Point(b1,b2); C:=Point(c1,c2);} \\ \noindent The midpoint perpendiculars of $\Delta\,ABC$ pass through a common point since \begin{code}\+\> concurrent(mp(A,B),mp(B,C),mp(C,A)); \end{code} simplifies to zero. \medskip \example \noindent The intersection point of the midpoint perpendiculars \begin{code}\+\> M:=intersection\_point(mp(A,B),mp(B,C)); \end{code} is the center of the circumscribed circle since \begin{code}\+\> sqrdist(M,A) - sqrdist(M,B); \end{code} simplifies to zero. \medskip \example \noindent {\em Euler's line}: \begin{quote} The center $M$ of the circumscribed circle, the orthocenter $H$ and the barycenter $S$ are collinear and $S$ divides $MH$ with ratio 1:2. \end{quote} Compute the coordinates of the corresponding points \begin{code}\+\> M:=intersection\_point(mp(a,b,c),mp(b,c,a));\\ H:=intersection\_point(altitude(a,b,c),altitude(b,c,a));\\ S:=intersection\_point(median(a,b,c),median(b,c,a)); \end{code} and then prove that \begin{code}\+\> collinear(M,H,S);\\ sqrdist(S,varpoint(M,H,2/3)); \end{code} both simplify to zero. \medskip \chapter[GNUPLOT: Plotting Functions]% {GNUPLOT: Display of functions and surfaces} \label{GNUPLOT} \typeout{{GNUPLOT: Display of functions and surfaces}} {\footnotesize \begin{center} Herbert Melenk \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: melenk@zib.de \end{center} } \ttindex{GNUPLOT} The {\bf gnuplot} system provides easy to use graphics output for curves or surfaces which are defined by formulas and/or data sets. The \REDUCE\ GNUPLOT package lets one use the GNUPLOT graphical output directly from inside \REDUCE, either for the interactive display of curves/surfaces or for the production of pictures on paper. For a full understanding of use of the \REDUCE\ GNUPLOT package it is best to be familiar with {\bf gnuplot}. The main command is {\tt PLOT}\ttindex{PLOT}. It accepts an arbitrary list of arguments which are either an expression to be plotted, a range expressions or an option. {\small\begin{verbatim} load_package gnuplot; plot(w=sin(a),a=(0 .. 10),xlabel="angle",ylabel="sine"); \end{verbatim}} The expression can be in one or two unknowns, or a list of two functions for the x and y values. It can also be an implicit equation in 2-dimensional space. {\small\begin{verbatim} plot(x**3+x*y**3-9x=0); \end{verbatim}} The dependent and independent variables can be limited to a range with the syntax shown in the first example. If omitted the independent variables range from -10 to 10 and the dependent variable is limited only by the precision of the IEEE floating point arithmetic. There are a great deal of options, either as keywords or as {\tt variable=string}. Options include: {\tt title}\ttindex{title}: assign a heading (default: empty) {\tt xlabel}\ttindex{xlabel}: set label for the x axis {\tt ylabel}\ttindex{ylabel}: set label for the y axis {\tt zlabel}\ttindex{zlabel}: set label for the z axis {\tt terminal}\ttindex{terminal}: select an output device {\tt size}\ttindex{size}: rescale the picture {\tt view}\ttindex{view}: set a viewpoint {\tt (no)}{\tt contour}\ttindex{contour}: 3d: add contour lines {\tt (no)}{\tt surface}\ttindex{surface}: 3d: draw surface (default: yes) {\tt (no)}{\tt hidden3d}\ttindex{hidden3d}: 3d: remove hidden lines (default: no) The command {\tt PLOTRESET}\ttindex{PLOTRESET} closes the current GNUPLOT windows. The next call to {\tt PLOT} will create a new one. GNUPLOT is controlled by a number of switches. Normally all intermediate data sets are deleted after terminating a plot session. If the switch {\tt PLOTKEEP}\ttindex{PLOTKEEP} is set on, the data sets are kept for eventual post processing independent of \REDUCE. In general {\tt PLOT} tries to generate smooth pictures by evaluating the functions at interior points until the distances are fine enough. This can require a lot of computing time if the single function evaluation is expensive. The refinement is controlled by the switch {\tt PLOTREFINE}\ttindex{PLOTREFINE} which is on by default. When you turn it off the functions will be evaluated only at the basic points. The integer value of the global variable {\tt PLOT\_XMESH}\ttindex{PLOT\_XMESH} defines the number of initial function evaluations in x direction for \f{PLOT}. For 2d graphs additional points will be used as long as {\tt plotrefine}\ttindex{plotrefine} is on. For 3d graphs this number defines also the number of mesh lines orthogonal to the x axis. {\tt PLOT\_YMESH}\ttindex{PLOT\_YMESH} defines for 3d plots the number of function evaluations in the y direction and the number of mesh lines orthogonal to the y axis. The grid for localising an implicitly defined curve in \f{PLOT} consists of triangles. These are computed initially equally distributed over the x-y plane controlled by {\tt PLOT\_XMESH}. The grid is refined adaptively in several levels. The final grid can be visualised by setting on the switch {\tt SHOW\_GRID}\ttindex{SHOW\_GRID}. \chapter{GROEBNER: A Gr\"obner basis package} \label{GROEBNER} \typeout{{GROEBNER: A Gr\"obner basis package}} {\footnotesize \begin{center} Herbert Melenk \& Winfried Neun \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: melenk@zib.de \\[0.05in] and \\[0.05in] H.M. M\"oller \\ Fernuniversit\"at Hagen FB Math und Informatik\\ Postfach 940 \\ D--58084 Hagen, Germany\\[0.05in] e--mail: Michael.Moeller@fernuni-hagen.de \end{center} } \ttindex{GROEBNER} Gr\"obner bases are a valuable tool for solving problems in connection with multivariate polynomials, such as solving systems of algebraic equations and analysing polynomial ideals. \index{GROEBNER package}\index{Buchberger's Algorithm} The GROEBNER package calculates Gr\"obner bases using the Buchberger algorithm. It can be used over a variety of different coefficient domains, and for different variable and term orderings. \section{} \subsection{Term Ordering} \par In the theory of Gr\"obner bases, the terms of polynomials are considered as ordered. Several order modes are available in the current package, including the basic modes: \index{LEX ! term order}\index{GRADLEX ! term order} \index{REVGRADLEX ! term order} \begin{center} LEX, GRADLEX, REVGRADLEX \end{center} All orderings are based on an ordering among the variables. For each pair of variables $(a,b)$ an order relation must be defined, {\em e.g.\ } ``$ a\gg b $''. The greater sign $\gg$ does not represent a numerical relation among the variables; it can be interpreted only in terms of formula representation: ``$a$'' will be placed in front of ``$b$'' or ``$a$'' is more complicated than ``$b$''. The sequence of variables constitutes this order base. So the notion of \[ \{x1,x2,x3\} \] as a list of variables at the same time means \[ x1 \gg x2 \gg x3 \] with respect to the term order. If terms (products of powers of variables) are compared with LEX, that term is chosen which has a greater variable or a higher degree if the greatest variable is the first in both. With GRADLEX the sum of all exponents (the total degree) is compared first, and if that does not lead to a decision, the LEX method is taken for the final decision. The REVGRADLEX method also compares the total degree first, but afterward it uses the LEX method in the reverse direction; this is the method originally used by Buchberger. Note that the LEX ordering is identical to the standard \REDUCE{} kernel ordering, when KORDER is set explicitly to the sequence of variables. \index{default ! term order} LEX is the default term order mode in the GROEBNER package. \section{The Basic Operators} \subsection{Term Ordering Mode} \begin{description} \ttindex{TORDER} \item [{\it TORDER}]($vl$,$m$,$[p_1,p_2,\ldots]$); where $vl$ is a variable list (or the empty list if no variables are declared explicitly), $m$ is the name of a term ordering mode LEX, GRADLEX, REV\-GRAD\-LEX (or another implemented mode) and $[p_1,p_2,\ldots]$ are additional parameters for the term ordering mode (not needed for the basic modes). TORDER sets variable set and the term ordering mode. The default mode is LEX. The previous description is returned as a list with corresponding elements. Such a list can alternatively passed as sole argument to TORDER. If the variable list is empty or if the TORDER declaration is omitted, the automatic variable extraction is activated. \ttindex{GVARS} \item[{\it GVARS}] ({\it\{exp$1$, exp$2$, $ \ldots$, exp$n$\}}); where $\{exp1, exp2, \ldots , expn\}$ is a list of expressions or equations. GVARS extracts from the expressions $\{exp1, exp2, \ldots , expn\}$ the kernels, which can play the role of variables for a Gr\"obner calculation. This can be used {\em e.g.\ } in a TORDER declaration. \end{description} \subsection{GROEBNER: Calculation of a Gr\"obner Basis} \begin{description} \ttindex{GROEBNER} \item[{\it GROEBNER}] $\{exp1, exp2, \ldots , expm\}; $ where $\{exp1, exp2, \ldots , expm\}$ is a list of expressions or equations. GROEBNER calculates the Gr\"obner basis of the given set of expressions with respect to the current TORDER setting. The Gr\"obner basis $\{1\}$ means that the ideal generated by the input polynomials is the whole polynomial ring, or equivalently, that the input polynomials have no zeros in common. As a side effect, the sequence of variables is stored as a \REDUCE\ list in the shared variable \ttindex{gvarslast}{\tt gvarslast}. \end{description} \example \index{GROEBNER package ! example} {\small\begin{verbatim} torder({},lex)$ groebner{3*x**2*y + 2*x*y + y + 9*x**2 + 5*x - 3, 2*x**3*y - x*y - y + 6*x**3 - 2*x**2 - 3*x + 3, x**3*y + x**2*y + 3*x**3 + 2*x**2 }; 2 {8*X - 2*Y + 5*Y + 3, 3 2 2*Y - 3*Y - 16*Y + 21} \end{verbatim}} The operation of GROEBNER can be controlled by the following switches: \begin{description} \ttindex{GROEBOPT} \item[GROEBOPT] -- If set ON, the sequence of variables is optimized with respect to execution speed; note that the final list of variables is available in\ttindex{GVARSLAST} GVARSLAST. An explicitly declared dependency supersedes the variable optimization. By default GROEBOPT is off, conserving the original variable sequence. \ttindex{GROEBFULLREDUCTION} \item[GROEBFULLREDUCTION] -- If set off, the reduction steps during the \linebreak[4] GROEBNER operation are limited to the pure head term reduction; subsequent terms are reduced otherwise. By default GROEBFULLREDUCTION is on. \ttindex{GLTBASIS} \item[GLTBASIS] -- If set on, the leading terms of the result basis are extracted. They are collected in a basis of monomials, which is available as value of the global variable with the name GLTB. \end{description} \subsection{GZERODIM?: Test of $\dim = 0$} \begin{description} \ttindex{GZERODIM?} \item[{\it GZERODIM}!?] $bas$ \\ where {\it bas} is a Gr\"obner basis in the current setting. The result is {\it NIL}, if {\it bas} is the basis of an ideal of polynomials with more than finitely many common zeros. If the ideal is zero dimensional, {\em i.e.\ } the polynomials of the ideal have only finitely many zeros in common, the result is an integer $k$ which is the number of these common zeros (counted with multiplicities). \end{description} \subsection{GDIMENSION, GINDEPENDENT\_SETS} The following operators can be used to compute the dimension and the independent variable sets of an ideal which has the Gr\"obner basis {\it bas} with arbitrary term order: \begin{description} \ttindex{GDIMENSION}\ttindex{GINDEPENDENT\_SETS} \ttindex{ideal dimension}\ttindex{independent sets} \item[Gdimension]$bas$ \item[Gindependent\_sets]$bas$ {\it Gindependent\_sets} computes the maximal left independent variable sets of the ideal, that are the variable sets which play the role of free parameters in the current ideal basis. Each set is a list which is a subset of the variable list. The result is a list of these sets. For an ideal with dimension zero the list is empty. {\it GDimension} computes the dimension of the ideal, which is the maximum length of the independent sets. \end{description} \subsection{GLEXCONVERT: Conversion to a Lexical Base} \begin{description} \ttindex{GLEXCONVERT} \item[{\it GLEXCONVERT}] $ \left(\{exp,\ldots , expm\} \left[,\{var1 \ldots , varn\}\right]\right.$ \\ $\left. \left[,MAXDEG=mx\right] \left[,NEWVARS=\{nv1, \ldots , nvk\}\right]\right) $ \\ where $\{exp1, \ldots , expm\}$ is a Gr\"obner basis with $\{var1, \ldots , varn\}$ as variables in the current term order mode, $mx$ is an integer, and $\{nv1, \ldots , nvk\}$ is a subset of the basis variables. For this operator the source and target variable sets must be specified explicitly. \end{description} GLEXCONVERT converts a basis of a zero-dimensional ideal (finite number of isolated solutions) from arbitrary ordering into a basis under {\it lex} ordering. During the call of GLEXCONVERT the original ordering of the input basis must be still active. NEWVARS defines the new variable sequence. If omitted, the original variable sequence is used. If only a subset of variables is specified here, the partial ideal basis is evaluated. For the calculation of a univariate polynomial, NEW\-VARS should be a list with one element. MAXDEG is an upper limit for the degrees. The algorithm stops with an error message, if this limit is reached. A warning occurs if the ideal is not zero dimensional. GLEXCONVERT is an implementation of the FLGM algorithm. Often, the calculation of a Gr\"obner basis with a graded ordering and subsequent conversion to {\it lex} is faster than a direct {\it lex} calculation. Additionally, GLEXCONVERT can be used to transform a {\it lex} basis into one with different variable sequence, and it supports the calculation of a univariate polynomial. If the latter exists, the algorithm is even applicable in the non zero-dimensional case, if such a polynomial exists. {\small\begin{verbatim} torder({{w,p,z,t,s,b},gradlex) g := groebner { f1 := 45*p + 35*s -165*b -36, 35*p + 40*z + 25*t - 27*s, 15*w + 25*p*s +30*z -18*t -165*b**2, -9*w + 15*p*t + 20*z*s, w*p + 2*z*t - 11*b**3, 99*w - 11*s*b +3*b**2, b**2 + 33/50*b + 2673/10000}; G := {60000*W + 9500*B + 3969, 1800*P - 3100*B - 1377, 18000*Z + 24500*B + 10287, 750*T - 1850*B + 81, 200*S - 500*B - 9, 2 10000*B + 6600*B + 2673} glexconvert(g,{w,p,z,t,s,b},maxdeg=5,newvars={w}); 2 100000000*W + 2780000*W + 416421 glexconvert(g,{w,p,z,t,s,b},maxdeg=5,newvars={p}); 2 6000*P - 2360*P + 3051 \end{verbatim}} \subsection{GROEBNERF: Factorizing Gr\"obner Bases} If Gr\"obner bases are computed in order to solve systems of equations or to find the common roots of systems of polynomials, the factorizing version of the Buchberger algorithm can be used. The theoretical background is simple: if a polynomial $p$ can be represented as a product of two (or more) polynomials, {\em e.g.\ } $h= f*g$, then $h$ vanishes if and only if one of the factors vanishes. So if during the calculation of a Gr\"obner basis $h$ of the above form is detected, the whole problem can be split into two (or more) disjoint branches. Each of the branches is simpler than the complete problem; this saves computing time and space. The result of this type of computation is a list of (partial) Gr\"obner bases; the solution set of the original problem is the union of the solutions of the partial problems, ignoring the multiplicity of an individual solution. If a branch results in a basis $\{1\}$, then there is no common zero, {\em i.e.\ }no additional solution for the original problem, contributed by this branch. \subsubsection{GROEBNERF Call} \ttindex{GROEBNERF} The syntax of GROEBNERF is the same as for GROEBNER. \[ \mbox{\it GROEBNERF}(\{exp1, exp2, \ldots , expm\} [,\{\},\{nz1, \ldots nzk\}); \] where $\{exp1, exp2, \ldots , expm\} $ is a given list of expressions or equations, and $\{nz1, \ldots nzk\}$ is an optional list of polynomials known to be non-zero. GROEBNERF tries to separate polynomials into individual factors and to branch the computation in a recursive manner (factorisation tree). The result is a list of partial Gr\"obner bases. If no factorisation can be found or if all branches but one lead to the trivial basis $\{1\}$, the result has only one basis; nevertheless it is a list of lists of polynomials. If no solution is found, the result will be $\{\{1\}\}$. Multiplicities (one factor with a higher power, the same partial basis twice) are deleted as early as possible in order to speed up the calculation. The factorising is controlled by some switches. As a side effect, the sequence of variables is stored as a \REDUCE\ list in the shared variable \begin{center} gvarslast . \end{center} If GLTBASIS is on, a corresponding list of leading term bases is also produced and is available in the variable GLTB. The third parameter of GROEBNERF allows one to declare some polynomials nonzero. If any of these is found in a branch of the calculation the branch is cancelled. This can be used to save a substantial amount of computing time. The second parameter must be included as an empty list if the third parameter is to be used. {\small\begin{verbatim} torder({x,y},lex)$ groebnerf { 3*x**2*y + 2*x*y + y + 9*x**2 + 5*x = 3, 2*x**3*y - x*y - y + 6*x**3 - 2*x**2 - 3*x = -3, x**3*y + x**2*y + 3*x**3 + 2*x**2 }; {{Y - 3,X}, 2 {2*Y + 2*X - 1,2*X - 5*X - 5}} \end{verbatim}} %} It is obvious here that the solutions of the equations can be read off immediately. All switches from GROEBNER are valid for GROEBNERF as well: \ttindex{GROEBOPT} \ttindex{GLTBASIS} \ttindex{GROEBFULLREDUCTION}\ttindex{GROEBSTAT}\ttindex{TRGROEB} \ttindex{TRGROEBS}\ttindex{TRGROEB1} \begin{center} \begin{tabular}{l} GROEBOPT \\ GLTBASIS \\ GROEBFULLREDUCTION \\ GROEBSTAT \\ TRGROEB \\ TRGROEBS \\ TRGROEB1 \end{tabular} \end{center} \subsubsection{Restriction of the Solution Space} In some applications only a subset of the complete solution set of a given set of equations is relevant, {\em e.g.\ } only nonnegative values or positive definite values for the variables. A significant amount of computing time can be saved if nonrelevant computation branches can be terminated early. Positivity: If a polynomial has no (strictly) positive zero, then every system containing it has no nonnegative or strictly positive solution. Therefore, the Buchberger algorithm tests the coefficients of the polynomials for equal sign if requested. For example, in $13*x + 15*y*z $ can be zero with real nonnegative values for $x, y$ and $z$ only if $x=0$ and $y=0$ or $ z=0$; this is a sort of ``factorization by restriction''. A polynomial $13*x + 15*y*z + 20$ never can vanish with nonnegative real variable values. Zero point: If any polynomial in an ideal has an absolute term, the ideal cannot have the origin point as a common solution. By setting the shared variable \ttindex{GROEBRESTRICTION} \begin{center} GROEBRESTRICTION \end{center} GROEBNERF is informed of the type of restriction the user wants to impose on the solutions: \begin{center} \begin{tabular}{l} {\it GROEBRESTRICTION:=NONEGATIVE;} \\ \hspace*{+.5cm} only nonnegative real solutions are of interest\vspace*{4mm} \\ {\it GROEBRESTRICTION:=POSITIVE;} \\ \hspace*{+.5cm}only nonnegative and nonzero solutions are of interest\vspace*{4mm} \\ {\it GROEBRESTRICTION:=ZEROPOINT;} \\ \hspace*{+.5cm}only solution sets which contain the point $\{0,0,\ldots,0\}$ are or interest. \end{tabular} \end{center} If GROEBNERF detects a polynomial which formally conflicts with the restriction, it either splits the calculation into separate branches, or, if a violation of the restriction is determined, it cancels the actual calculation branch. \subsection{GREDUCE, PREDUCE: Reduction of Polynomials} \subsubsection{Background} \label{GROEBNER:background} Reduction of a polynomial ``p'' modulo a given sets of polynomials ``B'' is done by the reduction algorithm incorporated in the Buchberger algorithm. % Subsection 3.5.2 \subsubsection{Reduction via Gr\"obner Basis Calculation} \ttindex{GREDUCE} \[ \mbox{\it GREDUCE}(exp, \{exp1, exp2, \ldots , expm\}]); \] where {\it exp} is an expression, and $\{exp1, exp2,\ldots , expm\}$ is a list of any number of expressions or equations. GREDUCE first converts the list of expressions $\{exp1, \ldots , expn\}$ to a Gr\"obner basis, and then reduces the given expression modulo that basis. An error results if the list of expressions is inconsistent. The returned value is an expression representing the reduced polynomial. As a side effect, GREDUCE sets the variable {\it gvarslast} in the same manner as GROEBNER does. \subsubsection{Reduction with Respect to Arbitrary Polynomials} \ttindex{PREDUCE} \[ PREDUCE(exp, \{exp1, exp2,\ldots , expm\}); \] where $ exp $ is an expression, and $\{exp1, exp2, \ldots , expm \}$ is a list of any number of expressions or equations. PREDUCE reduces the given expression modulo the set $\{exp1, \ldots , expm\}$. If this set is a Gr\"obner basis, the obtained reduced expression is uniquely determined. If not, then it depends on the subsequence of the single reduction steps (see~\ref{GROEBNER:background}). PREDUCE does not check whether $\{exp1, exp2, \ldots , expm\}$ is a Gr\"obner basis in the actual order. Therefore, if the expressions are a Gr\"obner basis calculated earlier with a variable sequence given explicitly or modified by optimisation, the proper variable sequence and term order must be activated first. \example (PREDUCE called with a Gr\"obner basis): {\small\begin{verbatim} torder({x,y},lex); gb:=groebner{3*x**2*y + 2*x*y + y + 9*x**2 + 5*x - 3, 2*x**3*y - x*y - y + 6*x**3 - 2*x**2 - 3*x + 3, x**3*y + x**2*y + 3*x**3 + 2*x**2}$ preduce (5*y**2 + 2*x**2*y + 5/2*x*y + 3/2*y + 8*x**2 + 3/2*x - 9/2, gb); 2 Y \end{verbatim}} \section{Ideal Decomposition \& Equation System Solving} Based on the elementary Gr\"obner operations, the GROEBNER package offers additional operators, which allow the decomposition of an ideal or of a system of equations down to the individual solutions. Details of the operators\ttindex{GROESOLVE}\ttindex{GROEBNERF} \ttindex{IDEALQUOTIENT}GROESOLVE, GROEBNERF and IDEALQUOTIENT can be found in the full documentation, with associated functions. \chapter{IDEALS: Arithmetic for polynomial ideals} \label{IDEALS} \typeout{{IDEALS: Arithmetic for polynomial ideals}} {\footnotesize \begin{center} Herbert Melenk \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: melenk@zib.de \end{center} } \ttindex{IDEALS} This package implements the basic arithmetic for polynomial ideals by exploiting the Gr\"obner bases package of \REDUCE. In order to save computing time all intermediate Gr\"obner bases are stored internally such that time consuming repetitions are inhibited. A uniform setting facilitates the access. \section{Initialization} Prior to any computation the set of variables has to be declared by calling the operator $I\_setting$ . For example in order to initiate computations in the polynomial ring $Q[x,y,z]$ call {\small\begin{verbatim} I_setting(x,y,z); \end{verbatim}} A subsequent call to $I\_setting$ allows one to select another set of variables; at the same time the internal data structures are cleared in order to free memory resources. \section{Bases} An ideal is represented by a basis (set of polynomials) tagged with the symbol $I$, {\em e.g.\ } {\small\begin{verbatim} u := I(x*z-y**2, x**3-y*z); \end{verbatim}} Alternatively a list of polynomials can be used as input basis; however, all arithmetic results will be presented in the above form. The operator $ideal2list$ allows one to convert an ideal basis into a conventional \REDUCE\ list. \subsection{Operators} Because of syntactical restrictions in \REDUCE, special operators have to be used for ideal arithmetic: {\small\begin{verbatim} .+ ideal sum (infix) .* ideal product (infix) .: ideal quotient (infix) ./ ideal quotient (infix) .= ideal equality test (infix) subset ideal inclusion test (infix) intersection ideal intersection (prefix,binary) member test for membership in an ideal (infix: polynomial and ideal) gb Groebner basis of an ideal (prefix, unary) ideal2list convert ideal basis to polynomial list (prefix,unary) \end{verbatim}} Example: {\small\begin{verbatim} I(x+y,x^2) .* I(x-z); 2 2 2 I(X + X*Y - X*Z - Y*Z,X*Y - Y *Z) \end{verbatim}} Note that ideal equality cannot be tested with the \REDUCE\ equal sign: {\small\begin{verbatim} I(x,y) = I(y,x) is false I(x,y) .= I(y,x) is true \end{verbatim}} \chapter{INEQ: Support for solving inequalities} \label{INEQ} \typeout{{INEQ: Support for solving inequalities}} {\footnotesize \begin{center} Herbert Melenk \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: melenk@zib.de \end{center} } \ttindex{INEQ} This package supports the operator {\bf ineq\_solve} that tries to solves single inequalities and sets of coupled inequalities. The following types of systems are supported \footnote{For linear optimization problems please use the operator {\bf simplex} of the {\bf linalg} package (section~\ref{simplex}}: \begin{itemize} \item only numeric coefficients (no parametric system), \item a linear system of mixed equations and $<=$ -- $>=$ inequalities, applying the method of Fourier and Motzkin, \item a univariate inequality with $<=$, $>=$, $>$ or $<$ operator and polynomial or rational left--hand and right--hand sides, or a system of such inequalities with only one variable. \end{itemize} Syntax: \begin{center} {\tt INEQ\_SOLVE($<$expr$>$ [,$<$vl$>$])} \end{center} where $<$expr$>$ is an inequality or a list of coupled inequalities and equations, and the optional argument $<$vl$>$ is a single variable (kernel) or a list of variables (kernels). If not specified, they are extracted automatically from $<$expr$>$. For multivariate input an explicit variable list specifies the elimination sequence: the last member is the most specific one. An error message occurs if the input cannot be processed by the current algorithms. The result is a list. It is empty if the system has no feasible solution. Otherwise the result presents the admissible ranges as set of equations where each variable is equated to one expression or to an interval. The most specific variable is the first one in the result list and each form contains only preceding variables (resolved form). The interval limits can be formal {\bf max} or {\bf min} expressions. Algebraic numbers are encoded as rounded number approximations. \noindent {\bf Examples}: {\small\begin{verbatim} ineq_solve({(2*x^2+x-1)/(x-1) >= (x+1/2)^2, x>0}); {x=(0 .. 0.326583),x=(1 .. 2.56777)} reg:= {a + b - c>=0, a - b + c>=0, - a + b + c>=0, 0>=0, 2>=0, 2*c - 2>=0, a - b + c>=0, a + b - c>=0, - a + b + c - 2>=0, 2>=0, 0>=0, 2*b - 2>=0, k + 1>=0, - a - b - c + k>=0, - a - b - c + k + 2>=0, - 2*b + k>=0, - 2*c + k>=0, a + b + c - k>=0, 2*b + 2*c - k - 2>=0, a + b + c - k>=0}$ ineq_solve (reg,{k,a,b,c}); {c=(1 .. infinity), b=(1 .. infinity), a=(max( - b + c,b - c) .. b + c - 2), k=a + b + c} \end{verbatim}} \chapter[INVBASE: Involutive Bases]% {INVBASE: A package for computing involutive bases} \label{INVBASE} \typeout{{INVBASE: A package for computing involutive bases}} {\footnotesize \begin{center} A.Yu.Zharkov, Yu.A.Blinkov\\ Saratov University\\ Astrakhanskaya 83\\ 410071 Saratov, Russia\\[0.05in] e--mail: postmaster@scnit.saratov.su \end{center} } \ttindex{INVBASE} Involutive bases are a new tool for solving problems in connection with multivariate polynomials, such as solving systems of polynomial equations and analysing polynomial ideals. An involutive basis of polynomial ideal is a special form of a redundant Gr\"obner basis. The construction of involutive bases reduces the problem of solving polynomial systems to simple linear algebra. The INVBASE package can be seen as an alternative to Buchberger's algorithm. \section{The Basic Operators} \subsection{Term Ordering} The term order modes available are\ttindex{REVGRADLEX}\ttindex{GRADLEX}\ttindex{LEX} {\tt REVGRADLEX}, {\tt GRADLEX} and {\tt LEX}. These modes have the same meaning as for the GROEBNER package. All orderings are based on an ordering among the variables. For each pair of variables an order relation $\gg$ must be defined. The term ordering mode as well as the order of variables are set by the operator\ttindex{INVTORDER} {\tt INVTORDER} {\it mode},$\{x_1,...,x_n\}$ where {\it mode} is one of the term order modes listed above. The notion of $\{x_1,...,x_n\}$ as a list of variables at the same time means $x_1\gg \ldots \gg x_n$. \subsection{Computing Involutive Bases} To compute the involutive basis of ideal generated by the set of polynomials $\{p_1,...,p_m\}$ one should type the command \ttindex{INVBASE} \noindent{\tt INVBASE} $\{p_1,...,p_m\} $ where $p_i$ are polynomials in variables listed in the {\tt INVTORDER} operator. If some kernels in $p_i$ were not listed previously in the {\tt INVTORDER} operator they are considered as parameters, {\em i.e.\ }they are considered part of the coefficients of polynomials. If {\tt INVTORDER} was omitted, all the kernels in $p_i$ are considered as variables with the default \REDUCE{} kernel order. The coefficients of polynomials $p_i$ may be integers as well as rational numbers (or, accordingly, polynomials and rational functions in the parametric case). The computations modulo prime numbers are also available. For this purpose one should type the \REDUCE\ commands {\small\begin{verbatim} ON MODULAR; SETMOD p; \end{verbatim}} where $p$ is a prime number. The value of the \f{INVBASE} function is a list of integer polynomials $\{g_1,...,g_n\}$ representing an involutive basis of a given ideal. {\small\begin{verbatim} INVTORDER REVGRADLEX, {x,y,z}; g:= INVBASE {4*x**2 + x*y**2 - z + 1/4, 2*x + y**2*z + 1/2, x**2*z - 1/2*x - y**2}; 3 2 3 2 g := {8*x*y*z - 2*x*y*z + 4*y - 4*y*z + 16*x*y + 17*y*z - 4*y, 4 2 2 2 8*y - 8*x*z - 256*y + 2*x*z + 64*z - 96*x + 20*z - 9, 3 2*y *z + 4*x*y + y, 3 2 2 2 8*x*z - 2*x*z + 4*y - 4*z + 16*x + 17*z - 4, 3 3 2 - 4*y*z - 8*y + 6*x*y*z + y*z - 36*x*y - 8*y, 2 2 2 4*x*y + 32*y - 8*z + 12*x - 2*z + 1, 2 2*y *z + 4*x + 1, 3 2 2 - 4*z - 8*y + 6*x*z + z - 36*x - 8, 2 2 2 8*x - 16*y + 4*z - 6*x - z} \end{verbatim}} To convert it into a lexicographical Gr\"obner basis one should type {\small\begin{verbatim} h := INVLEX g; 6 5 4 3 h := {3976*x + 37104*z - 600*z + 2111*z + 122062*z 2 + 232833*z - 680336*z + 288814, 2 6 5 4 3 1988*y - 76752*z + 1272*z - 4197*z - 251555*z 2 - 481837*z + 1407741*z - 595666, 7 6 5 4 3 2 16*z - 8*z + z + 52*z + 75*z - 342*z + 266*z - 60} \end{verbatim}} \chapter[LAPLACE: Laplace transforms etc.]% {LAPLACE: Laplace and inverse Laplace transforms} \label{LAPLACE} \typeout{{LAPLACE: Laplace and inverse Laplace transforms}} {\footnotesize \begin{center} C. Kazasov, M. Spiridonova, V. Tomov \\ Sofia, Bulgaria %%\\[0.05in] %%e--mail: \end{center} } \ttindex{LAPLACE} The LAPLACE package provides both Laplace Transforms and Inverse Laplace Transforms, with the two operators \noindent{\tt LAPLACE(exp, s\_var, t\_var)}\ttindex{LAPLACE} \\ {\tt INVLAP(exp, s\_var, t\_var)}\ttindex{INVLAP} The action is to transform the expression from the {\tt s\_var} or source variable into the {\tt t\_var} or target variable. If {\tt t\_var} is omitted, the package uses an internal variable {\tt lp!\&} or {\tt il!\&} respectively. Three switches control the transformations. If {\tt lmon}\ttindex{lpon} is on then sine, cosine, hyperbolic sine and hyperbolic cosines are converted by LAPLACE into exponentials. If {\tt lhyp} is on then exponential functions are converted into hyperbolic form. The last switch {\tt ltrig}\ttindex{ltrig} has the same effect except it uses trigonometric functions. The system can be extended by adding Laplace transformation rules for single functions by rules or rule sets. In such a rule the source variable {\bf must} be free, the target variable {\bf must} be {\tt il!\&} for LAPLACE and {\tt lp!\&} for INVLAP, with the third parameter omitted. Also rules for transforming derivatives are entered in such a form. For example {\small\begin{verbatim} let {laplace(log(~x),x) => -log(gam * il!&)/il!&, invlap(log(gam * ~x)/x,x) => -log(lp!&)}; operator f; let { laplace(df(f(~x),x),x) => il!&*laplace(f(x),x) - sub(x=0,f(x)), laplace(df(f(~x),x,~n),x) => il!&**n*laplace(f(x),x) - for i:=n-1 step -1 until 0 sum sub(x=0, df(f(x),x,n-1-i)) * il!&**i when fixp n, laplace(f(~x),x) = f(il!&) }; \end{verbatim}} The LAPLACE system knows about the functions {\tt DELTA} and {\tt GAMMA}, and used the operator {\tt ONE} for the unit step function and {\tt INTL} stands for the parameterised integral function, for instance {\tt intl(2*y**2,y,0,x)} stands for $\int^x_0 2 y^2 dx$. {\small\begin{verbatim} load_package laplace; laplace(sin(17*x),x,p); 17 ---------- 2 p + 289 on lmon; laplace(-1/4*e**(a*x)*(x-k)**(-1/2), x, p); 1 a*k - ---*sqrt(pi)*e 4 ---------------------- k*p e *sqrt( - a + p) invlap(c/((p-a)*(p-b)), p, t); a*t b*t c*(e - e ) ----------------- a - b invlap(p**(-7/3), p, t); 1/3 t *t ------------ 7 gamma(---) 3 \end{verbatim}} \chapter[LIE: Classification of Lie algebras]% {LIE: Functions for the classification of real n-dimensional Lie algebras} \label{LIE} \typeout{{LIE: Functions for the classification of real n-dimensional Lie algebras}} {\footnotesize \begin{center} Carsten and Franziska Sch\"obel\\ The Leipzig University, Computer Science Department \\ Augustusplatz 10/11, \\ O-7010 Leipzig, Germany \\[0.05in] e--mail: cschoeb@aix550.informatik.uni-leipzig.de \end{center} } \ttindex{LIE} {\bf LIE} is a package of functions for the classification of real n-dimensional Lie algebras. It consists of two modules: {\bf liendmc1} and {\bf lie1234}. \section{liendmc1} With the help of the functions in this module real n-dimensional Lie algebras $L$ with a derived algebra $L^{(1)}$ of dimension 1 can be classified. $L$ has to be defined by its structure constants $c_{ij}^k$ in the basis $\{X_1,\ldots,X_n\}$ with $[X_i,X_j]=c_{ij}^k X_k$. The user must define an ARRAY LIENSTRUCIN($n,n,n$) with n being the dimension of the Lie algebra $L$. The structure constants LIENSTRUCIN($i,j,k$):=$c_{ij}^k$ for $i<j$ should be given. Then the procedure LIENDIMCOM1 can be called. Its syntax is:\ttindex{LIENDIMCOM1} {\small\begin{verbatim} LIENDIMCOM1(<number>). \end{verbatim}} {\tt <number>} corresponds to the dimension $n$. The procedure simplifies the structure of $L$ performing real linear transformations. The returned value is a list of the form {\small\begin{verbatim} (i) {LIE_ALGEBRA(2),COMMUTATIVE(n-2)} or (ii) {HEISENBERG(k),COMMUTATIVE(n-k)} \end{verbatim}} with $3\leq k\leq n$, $k$ odd. The returned list is also stored as\ttindex{LIE\_LIST}{\tt LIE\_LIST}. The matrix LIENTRANS gives the transformation from the given basis $\{X_1,\ldots ,X_n\}$ into the standard basis $\{Y_1,\ldots ,Y_n\}$: $Y_j=($LIENTRANS$)_j^k X_k$. \section{lie1234} This part of the package classifies real low-dimensional Lie algebras $L$ of the dimension $n:={\rm dim}\,L=1,2,3,4$. $L$ is also given by its structure constants $c_{ij}^k$ in the basis $\{X_1,\ldots,X_n\}$ with $[X_i,X_j]=c_{ij}^k X_k$. An ARRAY LIESTRIN($n,n,n$) has to be defined and LIESTRIN($i,j,k$):=$c_{ij}^k$ for $i<j$ should be given. Then the procedure LIECLASS can be performed whose syntax is:\ttindex{LIECLASS} {\small\begin{verbatim} LIECLASS(<number>). \end{verbatim}} {\tt <number>} should be the dimension of the Lie algebra $L$. The procedure stepwise simplifies the commutator relations of $L$ using properties of invariance like the dimension of the centre, of the derived algebra, unimodularity {\em etc.} The returned value has the form: {\small\begin{verbatim} {LIEALG(n),COMTAB(m)}, \end{verbatim}} where the value $m$ corresponds to the number of the standard form (basis: $\{Y_1, \ldots ,Y_n\}$) in an enumeration scheme. This returned value is also stored as LIE\_CLASS. The linear transformation from the basis $\{X_1,\ldots,X_n\}$ into the basis of the standard form $\{Y_1,\ldots,Y_n\}$ is given by the matrix LIEMAT: $Y_j=($LIEMAT$)_j^k X_k$. \chapter{LIMITS: A package for finding limits} \label{LIMITS} \typeout{{LIMITS: A package for finding limits}} {\footnotesize \begin{center} Stanley L. Kameny \\ Los Angeles, U.S.A. \end{center} } \ttindex{LIMITS} LIMITS is a fast limit package for \REDUCE\ for functions which are continuous except for computable poles and singularities, based on some earlier work by Ian Cohen and John P. Fitch. The Truncated Power Series package is used for non-critical points, at which the value of the function is the constant term in the expansion around that point. \index{l'H\^opital's rule} l'H\^opital's rule is used in critical cases, with preprocessing of $\infty - \infty$ forms and reformatting of product forms in order to apply l'H\^opital's rule. A limited amount of bounded arithmetic is also employed where applicable. \section{Normal entry points} \ttindex{LIMIT} \vspace{.1in} \noindent {\tt LIMIT}(EXPRN:{\em algebraic}, VAR:{\em kernel}, LIMPOINT:{\em algebraic}):{\em algebraic} \vspace{.1in} This is the standard way of calling limit, applying all of the methods. The result is the limit of EXPRN as VAR approaches LIMPOINT. \section{Direction-dependent limits} \ttindex{LIMIT+}\ttindex{LIMIT-} \vspace{.1in} \noindent {\tt LIMIT!+}(EXPRN:{\em algebraic}, VAR:{\em kernel}, LIMPOINT:{\em algebraic}):{\em algebraic} \\ \noindent {\tt LIMIT!-}(EXPRN:{\em algebraic}, VAR:{\em kernel}, LIMPOINT:{\em algebraic}):{\em algebraic} \vspace{.1in} If the limit depends upon the direction of approach to the {\tt LIMPOINT}, the functions {\tt LIMIT!+} and {\tt LIMIT!-} may be used. They are defined by: \vspace{.1in} \noindent{\tt LIMIT!+} (EXP,VAR,LIMPOINT) $\rightarrow$ \hspace*{2em}{\tt LIMIT}(EXP*,$\epsilon$,0) \\ where EXP* = sub(VAR=VAR+$\epsilon^2$,EXP) and \noindent{\tt LIMIT!-} (EXP,VAR,LIMPOINT) $\rightarrow$ \hspace*{2em}{\tt LIMIT}(EXP*,$\epsilon$,0) \\ where EXP* = sub(VAR=VAR-$\epsilon^2$,EXP) Examples: {\small\begin{verbatim} load_package misc; limit(sin(x)/x,x,0); 1 limit((a^x-b^x)/x,x,0); log(a) - log(b) limit(x/(e**x-1), x, 0); 1 limit!-(sin x/cos x,x,pi/2); infinity limit!+(sin x/cos x,x,pi/2); - infinity limit(x^log(1/x),x,infinity); 0 limit((x^(1/5) + 3*x^(1/4))^2/(7*(sqrt(x + 9) - 3 - x/6))^(1/5),x,0); 3/5 - 6 --------- 1/5 7 \end{verbatim}} \chapter{LINALG: Linear algebra package} \label{LINALG} \typeout{{LINALG: Linear algebra package}} {\footnotesize \begin{center} Matt Rebbeck \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] \end{center} } \ttindex{LINALG} \section{Introduction} This package provides a selection of functions that are useful in the world of linear algebra. They can be classified into four sections: \subsection{Basic matrix handling} \begin{center} \begin{tabular}{l l l l} add\_columns\ttindex{ADD\_COLUMNS} & add\_rows\ttindex{ADD\_ROWS} & add\_to\_columns\ttindex{ADD\_TO\_COLUMNS} & add\_to\_rows\ttindex{ADD\_TO\_ROWS} \\ augment\_columns\ttindex{AUGMENT\_COLUMNS} & char\_poly\ttindex{CHAR\_POLY} & column\_dim\ttindex{COLUMN\_DIM} & copy\_into\ttindex{COPY\_INTO} \\ diagonal\ttindex{DIAGONAL} & extend\ttindex{EXTEND} & find\_companion\ttindex{FIND\_COMPANION} & get\_columns\ttindex{GET\_COLUMNS} \\ get\_rows\ttindex{GET\_ROWS} & hermitian\_tp\ttindex{HERMITIAN\_TP} & matrix\_augment\ttindex{MATRIX\_AUGMENT} & matrix\_stack\ttindex{MATRIX\_STACK} \\ minor\ttindex{MINOR} & mult\_columns\ttindex{MULT\_COLUMNS} & mult\_rows\ttindex{MULT\_ROWS} & pivot\ttindex{PIVOT} \\ remove\_columns\ttindex{REMOVE\_COLUMNS} & remove\_rows\ttindex{REMOVE\_ROWS} & row\_dim\ttindex{ROW\_DIM} & rows\_pivot\ttindex{ROWS\_PIVOT} \\ stack\_rows\ttindex{STACK\_ROWS} & sub\_matrix\ttindex{SUB\_MATRIX} & swap\_columns\ttindex{SWAP\_COLUMNS} & swap\_entries\ttindex{SWAP\_ENTRIES} \\ swap\_rows\ttindex{SWAP\_ROWS} & & & \end{tabular} \end{center} \subsection{Constructors} Functions that create matrices. \begin{center} \begin{tabular}{l l l l} band\_matrix\ttindex{BAND\_MATRIX} & block\_matrix\ttindex{BLOCK\_MATRIX} & char\_matrix\ttindex{CHAR\_MATRIX} & coeff\_matrix\ttindex{COEFF\_MATRIX} \\ companion\ttindex{COMPANION} & hessian\ttindex{HESSIAN} & hilbert\ttindex{HILBERT} & jacobian\ttindex{JACOBIAN} \\ jordan\_block\ttindex{JORDAN\_BLOCK} & make\_identity\ttindex{MAKE\_IDENTITY} & random\_matrix\ttindex{RANDOM\_MATRIX} & toeplitz\ttindex{TOEPLITZ} \\ vandermonde\ttindex{VANDERMONDE} & Kronecker\_Product\ttindex{KRONECKER\_PRODUCT} & \end{tabular} \end{center} \subsection{High level algorithms} \begin{center} \begin{tabular}{l l l l} char\_poly\ttindex{CHAR\_POLY} & cholesky\ttindex{CHOLESKY} & gram\_schmidt\ttindex{GRAM\_SCHMIDT} & lu\_decom\ttindex{LU\_DECOM} \\ pseudo\_inverse\ttindex{PSEUDO\_INVERSE} & simplex\ttindex{SIMPLEX} & svd\ttindex{SVD} & triang\_adjoint\ttindex{TRIANG\_ADJOINT} \\ \end{tabular} \end{center} \vspace*{5mm} There is a separate {\small NORMFORM} package (chapter~\ref{NORMFORM}) for computing the matrix normal forms smithex, smithex\_int, frobenius, ratjordan, jordansymbolic and jordan in \REDUCE. \subsection{Predicates} \begin{center} \begin{tabular}{l l l} matrixp\ttindex{MATRIXP} & squarep\ttindex{SQUAREP} & symmetricp\ttindex{SYMMETRICP} \end{tabular} \end{center} \section{Explanations} In the examples the matrix ${\cal A}$ will be \begin{flushleft} \begin{math} {\cal A} = \left( \begin{array}{ccc} 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \end{array} \right) \end{math} \end{flushleft} Throughout ${\cal I}$ is used to indicate the identity matrix and ${\cal A}^T$ to indicate the transpose of the matrix ${\cal A}$. Many of the functions have a fairly obvious meaning. Others need a little explanation. \section{Basic matrix handling} The functions \f{ADD\_COLUMNS}\ttindex{ADD\_COLUMNS} and \f{ADD\_ROWS} provide basic operations between rows and columns. The form is \noindent {\tt add\_columns(${\cal A}$,c1,c2,expr);} and it replaces column c2 of the matix by expr $*$ column(${\cal A}$,c1) $+$ column(${\cal A}$,c2). \f{ADD\_TO\_COLUMNS}\ttindex{ADD\_TO\_COLUMNS} and \f{ADD\_TO\_ROWS}\ttindex{ADD\_TO\_ROWS} do a similar task, adding an expression to each of a number of columns (or rows) specified by a list. \begin{math} \begin{array}{ccc} {\tt add\_to\_columns}({\cal A},\{1,2\},10) & = & \left( \begin{array}{ccc} 11 & 12 & 3 \\ 14 & 15 & 6 \\ 17 & 18 & 9 \end{array} \right) \end{array} \end{math} The functions \f{MULT\_COLUMNS}\ttindex{MULT\_COLUMNS} and \f{MULT\_ROW}\ttindex{MULT\_ROW} are equivalent to multiply columns and rows. \f{COLUMN\_DIM}\ttindex{COLUMN\_DIM} and \f{ROW\_DIM}\ttindex{ROW\_DIM} find the column dimension and row dimension of their argument. Parts of a matrix can be replaced from another by using \f{COPY\_INTO}\ttindex{COPY\_INTO}; the last two arguments are row and column counters for to where to copy the matrix. \begin{flushleft} \hspace*{0.175in} \begin{math} {\cal G} = \left( \begin{array}{cccc} 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \end{array} \right) \end{math} \end{flushleft} \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt copy\_into}({\cal A,G},1,2) & = & \left( \begin{array}{cccc} 0 & 1 & 2 & 3 \\ 0 & 4 & 5 & 6 \\ 0 & 7 & 8 & 9 \\ 0 & 0 & 0 & 0 \end{array} \right) \end{array} \end{math} \end{flushleft} A diagonal matrix can be created with \f{DIAGONAL}\ttindex{DIAGONAL}. The argument is a list of expressions of matrices which form the diagonal. An existing matrix can be extended; the call \f{EXTEND}(A,r,c,exp)\ttindex{EXTEND} returns the matrix A extended by r rows and c columns, with the new entries all exp. The function \f{GET\_COLUMNS}\ttindex{GET\_COLUMNS} extracts from a matrix a list of the specified columns as matrices. \f{GET\_ROWS}\ttindex{GET\_ROWS} does the equivalent for rows. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt get\_columns}({\cal A},\{1,3\}) & = & \left\{ \left( \begin{array}{c} 1 \\ 4 \\ 7 \end{array} \right), \left( \begin{array}{c} 3 \\ 6 \\ 9 \end{array} \right) \right\} \end{array} \end{math} \end{flushleft} The Hermitian transpose, that is a matrix in which the (i,$\,$j) entry is the conjugate of the (j,$\,$i) entry of the input is returned by \f{HERMITIAN\_TP}\ttindex{HERMITIAN\_TP}. \f{MATRIX\_AUGMENT}(\{mat$_{1}$,mat$_{2}$, \ldots ,mat$_{n}$\})\ttindex{MATRIX\_AUGMENT} produces a new matrix from the list joined as new columns. \ttindex{MATRIX\_STACK}\f{MATRIX\_STACK} joins a list of matrices by stacking them. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt matrix\_stack}(\{{\cal A,A}\}) & = & \left( \begin{array}{ccc} 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \\ 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \end{array} \right) \end{array} \end{math} \end{flushleft} \f{MINOR}(A,r,c)\ttindex{MINOR} calculates the (r,c) minor of A. \f{PIVOT}\ttindex{PIVOT} pivots a matrix about its (r,c) entry. To do this, multiples of the $r^{th}$ row are added to every other row in the matrix. This means that the $c^{th}$ column will be 0 except for the (r,c) entry. A variant on this operation is provided by \f{ROWS\_PIVOT}\ttindex{ROWS\_PIVOT}. It applies the pivot only to the rows specified as the last argument. A sub matrix can be extracted, giving a list or the rows and columns to keep. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt sub\_matrix}({\cal A},\{1,3\},\{2,3\}) & = & \left( \begin{array}{cc} 2 & 3 \\ 8 & 9 \end{array} \right) \end{array} \end{math} \end{flushleft} The basic operation of swapping rows or columns is provided by \f{SWAP\_ROWS}\ttindex{SWAP\_ROWS} and \f{SWAP\_COLUMNS}\ttindex{SWAP\_COLUMNS}. Individual entries can be swapped with \f{SWAP\_ENTRIES}\ttindex{SWAP\_ENTRIES}. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt swap\_columns}({\cal A},2,3) & = & \left( \begin{array}{ccc} 1 & 3 & 2 \\ 4 & 6 & 5 \\ 7 & 9 & 8 \end{array} \right) \end{array} \end{math} \end{flushleft} \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt swap\_entries}({\cal A},\{1,1\},\{3,3\}) & = & \left( \begin{array}{ccc} 9 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 1 \end{array} \right) \end{array} \end{math} \end{flushleft} \section{Constructors} \f{AUGMENT\_COLUMNS}\ttindex{AUGMENT\_COLUMNS} allows just specified columns to be selected; \f{STACK\_ROWS}\ttindex{STACK\_ROWS} does a similar job for rows. \begin{math} \begin{array}{ccc} {\tt stack\_rows}({\cal A},\{1,3\}) & = & \left( \begin{array}{ccc} 1 & 2 & 3 \\ 7 & 8 & 9 \end{array} \right) \end{array} \end{math} Rows or columns can be removed with \f{REMOVE\_COLUMNS}\ttindex{REMOVE\_COLUMNS} and \f{REMOVE\_ROWS}\ttindex{REMOVE\_ROWS}. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt remove\_columns}({\cal A},2) & = & \left( \begin{array}{cc} 1 & 3 \\ 4 & 6 \\ 7 & 9 \end{array} \right) \end{array} \end{math} \end{flushleft} {\tt BAND\_MATRIX}\ttindex{BAND\_MATRIX} creates a square matrix of dimension its second argument. The diagonal consists of the middle expressions of the first argument, which is an expression list. The expressions to the left of this fill the required number of sub\_diagonals and the expressions to the right the super\_diagonals. \begin{math} \begin{array}{ccc} {\tt band\_matrix}(\{x,y,z\},6) & = & \left( \begin{array}{cccccc} y & z & 0 & 0 & 0 & 0 \\ x & y & z & 0 & 0 & 0 \\ 0 & x & y & z & 0 & 0 \\ 0 & 0 & x & y & z & 0 \\ 0 & 0 & 0 & x & y & z \\ 0 & 0 & 0 & 0 & x & y \end{array} \right) \end{array} \end{math} Related to the band matrix is a block matrix, which can be created by \noindent {\tt BLOCK\_MATRIX(r,c,matrix\_list)}.\ttindex{BLOCK\_MATRIX} The resulting matrix consists of r by c matrices filled from the matrix\_list row wise. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\cal B} = \left( \begin{array}{cc} 1 & 0 \\ 0 & 1 \end{array} \right), & {\cal C} = \left( \begin{array}{c} 5 \\ 5 \end{array} \right), & {\cal D} = \left( \begin{array}{cc} 22 & 33 \\ 44 & 55 \end{array} \right) \end{array} \end{math} \end{flushleft} \vspace*{0.175in} \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt block\_matrix}(2,3,\{{\cal B,C,D,D,C,B}\}) & = & \left( \begin{array}{ccccc} 1 & 0 & 5 & 22 & 33 \\ 0 & 1 & 5 & 44 & 55 \\ 22 & 33 & 5 & 1 & 0 \\ 44 & 55 & 5 & 0 & 1 \end{array} \right) \end{array} \end{math} \end{flushleft} Characteristic polynomials and characteristic matrices are created by the functions {\tt CHAR\_POLY}\ttindex{CHAR\_POLY} and \f{CHAR\_MATRIX}\ttindex{CHAR\_MATRIX}. A set of linear equations can be turned into the associated coefficient matrix and vector of unknowns and the righthandside. \f{COEFF\_MATRIX} returns a list \{${\cal C,X,B}$\} such that ${\cal CX} = {\cal B}$. \begin{math} \hspace*{0.175in} {\tt coeff\_matrix}(\{x+y+4*z=10,y+x-z=20,x+y+4\}) = \end{math} \vspace*{0.1in} \begin{flushleft} \hspace*{0.175in} \begin{math} \left\{ \left( \begin{array}{ccc} 4 & 1 & 1 \\ -1 & 1 & 1 \\ 0 & 1 & 1 \end{array} \right), \left( \begin{array}{c} z \\ y \\ x \end{array} \right), \left( \begin{array}{c} 10 \\ 20 \\ -4 \end{array} \right) \right\} \end{math} \end{flushleft} \f{COMPANION}(poly,x) creates the companion matrix ${\cal C}$ of a polynomial. That is the square matrix of dimension n, where n is the degree of polynomial with respect to x, and the entries of ${\cal C}$ are: ${\cal C}$(i,n) = -coeffn(poly,x,i-1) for i = 1 \ldots n, ${\cal C}$(i,i-1) = 1 for i = 2 \ldots n and the rest are 0. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt companion}(x^4+17*x^3-9*x^2+11,x) & = & \left( \begin{array}{cccc} 0 & 0 & 0 & -11 \\ 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 9 \\ 0 & 0 & 1 & -17 \end{array} \right) \end{array} \end{math} \end{flushleft} The polynomial associated with a companion matrix can be recovered by calling \f{FIND\_COMPANION}\ttindex{FIND\_COMPANION}. \f{HESSIAN}(expr, var\_list)\ttindex{HESSIAN} calculates the Hessian matrix of the expressions with respect to the variables in the list, or the single variable. That is the matrix with the (i,$\,$j) element the $j^{th}$ derivative of the expressions with respect to the $i^{th}$ variable. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt hessian}(x*y*z+x^2,\{w,x,y,z\}) & = & \left( \begin{array}{cccc} 0 & 0 & 0 & 0 \\ 0 & 2 & z & y \\ 0 & z & 0 & x \\ 0 & y & x & 0 \end{array} \right) \end{array} \end{math} \end{flushleft} Hilbert's matrix, that is where the (i,$\,$j) element is $1/(i+j-x)$ is constructed by \f{HILBERT}(n,x)\ttindex{HILBERT}. The Jacobian of an expression list with respect to a variable list is calculated by \f{JACOBIAN}(expr\_list,variable\_list)\ttindex{JACOBIAN}. This is a matrix whose (i,$\,$j) entry is df(expr\_list(i),variable\_list(j)). The square Jordan block matrix of dimension $n$ is calculated by the function \f{JORDAN\_BLOCK}(exp,n).\ttindex{JORDAN\_BLOCK} The entries of the Jordan\_block matrix are ${\cal J}$(i,i) = expr for i=1 \ldots n, ${\cal J}$(i,i+1) = 1 for i=1 \ldots n-1, and all other entries are 0. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt jordan\_block(x,5)} & = & \left( \begin{array}{ccccc} x & 1 & 0 & 0 & 0 \\ 0 & x & 1 & 0 & 0 \\ 0 & 0 & x & 1 & 0 \\ 0 & 0 & 0 & x & 1 \\ 0 & 0 & 0 & 0 & x \end{array} \right) \end{array} \end{math} \end{flushleft} \f{MAKE\_IDENTITY}(n)\ttindex{MAKE\_IDENTITY} generates the $n \times n$ identity matrix. \f{RANDOM\_MATRIX}(r,c,limit)\ttindex{RANDOM\_MATRIX} generates and $r \times c$ matrix with random values limited by {\tt limit}. The type of entries is controlled by a number of switches. \begin{description} \item[{\tt IMAGINARY}]\ttindex{IMAGINARY} If on then matrix entries are $x+i*y$ where $-limit < x,y < limit$. \item[{\tt NOT\_NEGATIVE}]\ttindex{NOT\_NEGATIVE} If on then $0 < entry < limit$. In the imaginary case we have $0 < x,y < limit$. \item[{\tt ONLY\_INTEGER}]\ttindex{ONLY\_INTEGER} If on then each entry is an integer. In the imaginary case $x$ and $y$ are integers. If off the values are rounded. \item[{\tt SYMMETRIC}]\ttindex{SYMMETRIC} If on then the matrix is symmetric. \item[{\tt UPPER\_MATRIX}]\ttindex{UPPER\_MATRIX} If on then the matrix is upper triangular. \item[{\tt LOWER\_MATRIX}]\ttindex{LOWER\_MATRIX} If on then the matrix is lower triangular. \end{description} \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt random\_matrix}(3,3,10) & = & \left( \begin{array}{ccc} -4.729721 & 6.987047 & 7.521383 \\ - 5.224177 & 5.797709 & - 4.321952 \\ - 9.418455 & - 9.94318 & - 0.730980 \end{array} \right) \end{array} \end{math} \end{flushleft} \vspace*{0.2in} \hspace*{0.165in} {\tt on only\_integer, not\_negative, upper\_matrix, imaginary;} \begin{flushleft} %\hspace*{0.12in} \begin{math} \begin{array}{ccc} {\tt random\_matrix}(4,4,10) & = & \left( \begin{array}{cccc} 2*i+5 & 3*i+7 & 7*i+3 & 6 \\ 0 & 2*i+5 & 5*i+1 & 2*i+1 \\ 0 & 0 & 8 & i \\ 0 & 0 & 0& 5*i+9 \end{array} \right) \end{array} \end{math} \end{flushleft} {\tt TOEPLITZ}\ttindex{TOEPLITZ} creates the Toeplitz matrix from the given expression list. This is a square symmetric matrix in which the first expression is placed on the diagonal and the $i^{th}$ expression is placed on the $(i-1)^{th}$ sub- and super-diagonals. It has dimension equal to the number of expressions. \begin{flushleft} \begin{math} \begin{array}{ccc} {\tt toeplitz}(\{w,x,y,z\}) & = & \left( \begin{array}{cccc} w & x & y & z \\ x & w & x & y \\ y & x & w & x \\ z & y & x & w \end{array} \right) \end{array} \end{math} \end{flushleft} \f{VANDERMONDE}\ttindex{VANDERMONDE} creates the Vandermonde matrix from the expression list; the square matrix in which the (i,$\,$j) entry is expr\_list(i) $^{(j-1)}$. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt vandermonde}(\{x,2*y,3*z\}) & = & \left( \begin{array}{ccc} 1 & x & x^2 \\ 1 & 2*y & 4*y^2 \\ 1 & 3*z & 9*z^2 \end{array} \right) \end{array} \end{math} \end{flushleft} The direct product\index{direct product} (or tensor product\index{tensor product}) is created by the \f{KRONECKER\_PRODUCT}\ttindex{KRONECKER\_PRODUCT} function. {\small\begin{verbatim} a1 := mat((1,2),(3,4),(5,6))$ a2 := mat((1,1,1),(2,z,2),(3,3,3))$ kronecker_product(a1,a2); \end{verbatim}} \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} \left( \begin{array}{cccccc} 1 & 1 & 1 & 2 & 2 & 2 \\ 2 & z & 2 & 4 &2*z &4 \\ 3 & 3 & 3 & 6 & 6 &6 \\ 3 & 3 & 3 & 4 & 4 &4 \\ 6 & 3*z& 6 & 8 &4*z &8 \\ 9 & 9 & 9 & 12 &12 &12\\ 5 & 5 & 5 & 6 & 6 &6 \\ 10 &5*z& 10& 12 &6*z &12 \\ 15 &15 & 15& 18 &18 &18 \end{array} \right) \end{array} \end{math} \end{flushleft} \section{Higher Algorithms} The Cholesky decomposition of a matrix can be calculated with the function \f{CHOLESKY}. It returns \{${\cal L,U}$\} where ${\cal L}$ is a lower matrix, ${\cal U}$ is an upper matrix, and ${\cal A} = {\cal LU}$, and ${\cal U} = {\cal L}^T$. Gram--Schmidt orthonormalisation can be calculated by \f{GRAM\_SCHMIDT}\ttindex{GRAM\_SCHMIDT}. It accepts a list of linearly independent vectors, written as lists, and returns a list of orthogonal normalised vectors. {\small\begin{verbatim} gram_schmidt({{1,0,0},{1,1,0},{1,1,1}}); {{1,0,0},{0,1,0},{0,0,1}} gram_schmidt({{1,2},{3,4}}); 1 2 2*sqrt(5) - sqrt(5) {{---------,---------},{-----------,------------}} sqrt(5) sqrt(5) 5 5 \end{verbatim}} The LU decomposition of a real or imaginary matrix with numeric entries is performed by {\tt LU\_DECOM(${\cal A}$)}.\ttindex{LU\_DECOM} It returns \{${\cal L,U}$\} where ${\cal L}$ is a lower diagonal matrix, ${\cal U}$ an upper diagonal matrix and ${\cal A} = {\cal LU}$. Note: the algorithm used can swap the rows of ${\cal A}$ during the calculation. This means that ${\cal LU}$ does not equal ${\cal A}$ but a row equivalent of it. Due to this, {\tt lu\_decom} returns \{${\cal L,U}$,vec\}. The call {\tt CONVERT(${\cal A}$,vec)}\ttindex{CONVERT} will return the matrix that has been decomposed, {\em i.e.\ } ${\cal LU} = $ {\tt convert(${\cal A}$,vec)}. \begin{flushleft} \hspace*{0.175in} \begin{math} {\cal K} = \left( \begin{array}{ccc} 1 & 3 & 5 \\ -4 & 3 & 7 \\ 8 & 6 & 4 \end{array} \right) \end{math} \end{flushleft} \begin{flushleft} %\hspace*{0.1in} \begin{math} \begin{array}{cccc} $% {\tt lu} := {\tt lu\_decom}$({\cal K}) & = & \left\{ \left( \begin{array}{ccc} 8 & 0 & 0 \\ -4 & 6 & 0 \\ 1 & 2.25 & 1.125 1 \end{array} \right), \left( \begin{array}{ccc} 1 & 0.75 & 0.5 \\ 0 & 1 & 1.5 \\ 0 & 0 & 1 \end{array} \right), [\; 3 \; 2 \; 3 \; ] \right\} \end{array} \end{math} \end{flushleft} {\tt PSEUDO\_INVERSE}\ttindex{PSEUDO\_INVERSE}, also known as the Moore--Penrose inverse\index{Moore--Penrose inverse}, computes the pseudo inverse of ${\cal A}$. Given the singular value decomposition of ${\cal A}$, {\em i.e.\ } ${\cal A} = {\cal U} \sum {\cal V}^T$, then the pseudo inverse ${\cal A}^{-1}$ is defined by ${\cal A}^{-1} = {\cal V}^T \sum^{-1} {\cal U}$. Thus ${\cal A}$ $ * $ {\tt pseudo\_inverse}$({\cal A}) = {\cal I}$. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt pseudo\_inverse}({\cal A}) & = & \left( \begin{array}{cc} -0.2 & 0.1 \\ -0.05 & 0.05 \\ 0.1 & 0 \\ 0.25 & -0.05 \end{array} \right) \end{array} \end{math} \end{flushleft} \label{simplex} The simplex linear programming algorithm\index{Simplex Algorithm} for maximising or minimising a function subject to lineal inequalities can be used with the function \f{SIMPLEX}\ttindex{SIMPLEX}. It requires three arguments, the first indicates where the action is to maximising or minimising, the second is the test expressions, and the last is a list of linear inequalities. It returns \{optimal value,\{ values of variables at this optimal\}\}. The algorithm implies that all the variables are non-negative. \begin{addtolength}{\leftskip}{0.22in} %\begin{math} {\tt simplex($max,x+y,\{x>=10,y>=20,x+y<=25\}$);} %\end{math} {\tt ***** Error in simplex: Problem has no feasible solution.} \vspace*{0.2in} \parbox[t]{0.96\linewidth}{\tt simplex($max,10x+5y+5.5z,\{5x+3z<=200, x+0.1y+0.5z<=12$,\\ \hspace*{0.55in} $0.1x+0.2y+0.3z<=9, 30x+10y+50z<=1500\}$);} \vspace*{0.1in} {\tt $\{525.0,\{x=40.0,y=25.0,z=0\}$\}} \end{addtolength} {\tt SVD}\ttindex{SVD} computes the singular value decomposition of ${\cal A}$ with numeric entries. It returns \{${\cal U},\sum,{\cal V}$\} where ${\cal A} = {\cal U} \sum {\cal V}^T$ and $\sum = diag(\sigma_{1}, \ldots ,\sigma_{n}). \; \sigma_{i}$ for $i= (1 \ldots n)$ are the singular values of ${\cal A}$. The singular values of ${\cal A}$ are the non-negative square roots of the eigenvalues of ${\cal A}^T {\cal A}$. ${\cal U}$ and ${\cal V}$ are such that ${\cal UU}^T = {\cal VV}^T = {\cal V}^T {\cal V} = {\cal I}_n$. \begin{flushleft} \hspace*{0.175in} \begin{math} {\cal Q} = \left( \begin{array}{cc} 1 & 3 \\ -4 & 3 \end{array} \right) \end{math} \end{flushleft} \begin{eqnarray} \hspace*{0.1in} {\tt svd({\cal Q})} & = & \left\{ \left( \begin{array}{cc} 0.289784 & 0.957092 \\ -0.957092 & 0.289784 \end{array} \right), \left( \begin{array}{cc} 5.149162 & 0 \\ 0 & 2.913094 \end{array} \right), \right. \nonumber \\ & & \left. \: \; \, \left( \begin{array}{cc} -0.687215 & 0.726453 \\ -0.726453 & -0.687215 \end{array} \right) \right\} \nonumber \end{eqnarray} {\tt TRIANG\_ADJOINT}\ttindex{TRIANG\_ADJOINT} computes the trianglarizing adjoint of the given matrix. The triangularizing adjoint is a lower triangular matrix. The multiplication of the triangularizing adjoint with the given matrix results in an upper triangular matrix. The i-th entry in the diagonal of this matrix is the determinant of the principal i-th minor of the given matrix. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{ccc} {\tt triang\_adjoint}({\cal A}) & = & \left( \begin{array}{ccc} 1 & 0 & 0 \\ -4 & 1 & 0 \\ -3 & 6 & -3 \end{array} \right) \end{array} \end{math} \end{flushleft} The multiplication of this matrix with ${\cal A}$ results in an upper triangular matrix. \begin{flushleft} \hspace*{0.1in} \begin{math} \begin{array}{cccc} \left( \begin{array}{ccc} 1 & 0 & 0 \\ -4 & 1 & 0 \\ -3 & 6 & -3 \end{array} \right) & \left( \begin{array}{ccc} 1 & 2 & 3 \\ 4 & 5 & 6 \\ 7 & 8 & 9 \end{array} \right) & = & \left( \begin{array}{ccc} 1 & 2 & 3 \\ 0 & -3 & -6 \\ 0 & 0 & 0 \end{array} \right) \end{array} \end{math} \end{flushleft} \section{Fast Linear Algebra} By turning the {\tt FAST\_LA}\ttindex{FAST\_LA} switch on, the speed of the following functions will be increased: \begin{tabular}{l l l l} add\_columns & add\_rows & augment\_columns & column\_dim \\ copy\_into & make\_identity & matrix\_augment & matrix\_stack\\ minor & mult\_column & mult\_row & pivot \\ remove\_columns & remove\_rows & rows\_pivot & squarep \\ stack\_rows & sub\_matrix & swap\_columns & swap\_entries\\ swap\_rows & symmetricp \end{tabular} The increase in speed will be insignificant unless you are making a thousands of calls. When using this switch, error checking is minimised, and thus illegal input may give strange error messages. \chapter{MATHML : MathML Interface for REDUCE } \label{MATHML} \typeout{{MATHML : MathML Interface for REDUCE}} {\footnotesize \begin{center} Luis Alvarez-Sobreviela \\ Konrad-Zuse-Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D-14195 Berlin-Dahlem, Germany \\ \end{center} } \ttindex{MATHML} MathML is intended to facilitate the use and re-use of mathematical and scientific content on the Web, and for other applications such as computer algebra systems. \\ This package contains the MathML-{\REDUCE}\ interface. This interface provides an easy to use series of commands, allowing to evaluate and output MathML. The principal features of this package can be resumed as: \begin{itemize} \item Evaluation of MathML code. Allows {\REDUCE}\ to parse MathML expressions and evaluate them. \item Generation of MathML compliant code. Provides the printing of REDUCE expressions in MathML source code, to be used directly in web page production. \end{itemize} We assume that the reader is familiar with MathML. If not, the specification\footnote{This specification is subject to change, since it is not yet a final draft. During the two month period in which this package was developed, the specification changed, forcing a review of the code. This package is based on the Nov 98 version.} is available at: \qquad {\tt http://www.w3.org/TR/WD-math/ } The MathML-{\REDUCE} interface package is loaded by supplying {\tt load mathml;}. \subsubsection{Switches} There are two switches which can be used alternatively and incrementally. These are {\tt MATHML} and {\tt BOTH}. Their use can be described as follows: \begin{description} \item[{\tt mathml}:]\ttindex{MATHML} All output will be printed in MathML. \item[{\tt both}:]\ttindex{BOTH} All output will be printed in both MathML and normal REDUCE. \item[{\tt web}:]\ttindex{WEB} All output will be printed within an HTML $<$embed$>$ tag. This is for direct use in an HTML web page. Only works when {\tt mathml} is on. \end{description} MathML has often been said to be too verbose. If {\tt BOTH} is on, an easy interpretation of the results is possible, improving MathML readability. \subsubsection{Operators of Package MathML} \begin{description} \item[\f{mml}(filename):]\ttindex{MML} This function opens and reads the file filename containing the MathML. \item[\f{parseml}():]\ttindex{PARSEML} To introduce a series of valid mathml tokens you can use this function. It takes no arguments and will prompt you to enter mathml tags stating with $<$mathml$>$ and ending with $<$/mathml$>$. It returns an expression resulting from evaluating the input. \end{description} {\bf Example} {\small\begin{verbatim} 1: load mathml; 3: on both; 3: int(2*x+1,x);; x*(x + 1) <mathml> <apply><plus/> <apply><power/> <ci>x</ci> <cn type="integer">2</cn> </apply> <ci>x</ci> </apply> </mathml> 4: \end{verbatim}} \chapter{MODSR: Modular solve and roots} \label{MODSR} \typeout{{MODSR: Modular solve and roots}} {\footnotesize \begin{center} Herbert Melenk \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: melenk@zib.de \end{center} } \ttindex{MODSR} This package supports solve (\f{M\_SOLVE}\ttindex{M\_SOLVE}) and roots (\f{M\_ROOTS}\ttindex{M\_ROOTS}) operators for modular polynomials and modular polynomial systems. The moduli need not be primes. {\tt M\_SOLVE} requires a modulus to be set. {\tt M\_ROOTS} takes the modulus as a second argument. For example: {\small\begin{verbatim} on modular; setmod 8; m_solve(2x=4); -> {{X=2},{X=6}} m_solve({x^2-y^3=3}); -> {{X=0,Y=5}, {X=2,Y=1}, {X=4,Y=5}, {X=6,Y=1}} m_solve({x=2,x^2-y^3=3}); -> {{X=2,Y=1}} off modular; m_roots(x^2-1,8); -> {1,3,5,7} m_roots(x^3-x,7); -> {0,1,6} \end{verbatim}} \chapter[MRVLIMIT: Limits of ``exp-log'' functions]% {MRVLIMIT: Package for Computing Limits of "Exp-Log" Functions} \label{MRVLIMIT} \typeout{{MRVLIMIT: Package for Computing Limits of "Exp-Log" Functions}} {\footnotesize \begin{center} Neil Langmead \\ Konrad-Zuse-Zentrum f\"ur Informationstechnik Berlin (ZIB) \\ Takustra\"se 7 \\ D - 14195 Berlin-Dahlem, Germany \\ \end{center} } \ttindex{MRVLIMIT} %\markboth{CHAPTER \ref{MRVLIMIT}. MRVLIMIT: LIMITS OF ``EXP-LOG'' FUNCTIONS}{} %\thispagestyle{myheadings} Using the LIMITS package to compute the limits of functions containing exponential and logarithmic expressions may raise a problem. For the computation of indefinite forms (such as $0/0$,or $\frac{\infty}{\infty}$) L'Hospital's rule may only be applied a finite number of times in a CAS. In REDUCE it is applied 3 times. This algorithm of Dominik Gruntz of the ETH Z\"urich solves this particular problem, and enables the computation of many more limit calculations in REDUCE. {\small\begin{verbatim} 1: load limits; 2: limit(x^7/e^x,x,infinity); 7 x limit(----,x,infinity) x e 3: load mrvlimit; 4: mrv_limit(x^7/e^x,x,infinity); 0 \end{verbatim}} For this example, the MRVLIMIT package is able to compute the correct limit. \\ \ttindex{MRV\_LIMIT} \vspace{.1in} \noindent {\tt MRV\_LIMIT}(EXPRN:{\em algebraic}, VAR:{\em kernel}, LIMPOINT:{\em algebraic}):{\em algebraic} \ttindex{MRV\_LIMIT} \par The result is the limit of EXPRN as VAR approaches LIMPOINT. \vspace{.1in} A switch {\tt TRACELIMIT} is available to inform the user about the computed Taylor expansion, all recursive calls and the return value of the internally called function {\tt MRV}. \\ \\ {\bf Examples}: \\ {\small\begin{verbatim} 5: b:=e^x*(e^(1/x-e^-x)-e^(1/x)); -1 - x x + x - e b:= e *(e - 1) 6: mrv_limit(b,x,infinity); -1 -1 7: ex:= - log(log(log(log(x))) + log(x)) *log(x) *(log(log(x)) - log(log(log(x)) + log(x))); - log(x)*(log(log(x)) - log(log(log(x)) + log(x))) ex:= ----------------------------------------------------- log(log(log(log(x))) + log(x)) 8: off mcd; 9: mrv_limit(ex,x,infinity); 1 \end{verbatim}} \chapter[NCPOLY: Ideals in non--comm case]% {NCPOLY: Non--commutative polynomial ideals} \label{NCPOLY} \typeout{{NCPOLY: Non--commutative polynomial ideals}} {\footnotesize \begin{center} Herbert Melenk\\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: melenk@zib.de \\[0.1in] Joachim Apel\\ Institut f\"ur Informatik, Universit\"at Leipzig \\ Augustusplatz 10--11\\ D--04109 Leipzig, Germany \\[0.05in] e--mail: apel@informatik.uni--leipzig.de \end{center} } \ttindex{NCPOLY}\index{Groebner Bases} \REDUCE\ supports a very general mechanism for computing with objects under a non--commutative multiplication, where commutator relations must be introduced explicitly by rule sets when needed. The package {\bf NCPOLY} allows the user to set up automatically a consistent environment for computing in an algebra where the non--commutativity is defined by Lie-bracket commutators. The package uses the \REDUCE\ {\bf noncom} mechanism for elementary polynomial arithmetic; the commutator rules are automatically computed from the Lie brackets. Polynomial arithmetic may be performed directly, including {\bf division} and {\bf factorisation}. Additionally {\bf NCPOLY} supports computations in a one sided ideal (left or right), especially one sided {\bf Gr\"obner} bases and {\bf polynomial reduction}. \section{Setup, Cleanup} Before the computations can start the environment for a non--commutative computation must be defined by a call to {\tt nc\_setup}:\ttindex{nc\_setup} {\small\begin{verbatim} nc_setup(<vars>[,<comms>][,<dir>]); \end{verbatim}} where $<vars>$ is a list of variables; these must include the non--commutative quantities. $<comms>$ is a list of equations \verb&<u>*<v> - <v>*<u>=<rh>& where $<u>$ and $<v>$ are members of $<vars>$, and $<rh>$ is a polynomial. $<dir>$ is either $left$ or $right$ selecting a left or a right one sided ideal. The initial direction is $left$. {\tt nc\_setup} generates from $<comms>$ the necessary rules to support an algebra where all monomials are ordered corresponding to the given variable sequence. All pairs of variables which are not explicitly covered in the commutator set are considered as commutative and the corresponding rules are also activated. The second parameter in {\tt nc\_setup} may be omitted if the operator is called for the second time, {\em e.g.\ } with a reordered variable sequence. In such a case the last commutator set is used again. Remarks: \begin{itemize} \item The variables need not be declared {\bf noncom} - {\bf nc\_setup} performs all necessary declarations. \item The variables need not be formal operator expressions; {\bf nc\_setup} encapsulates a variable $x$ internally as \verb+nc!*(!_x)+ expressions anyway where the operator $nc!*$ keeps the noncom property. \item The commands {\bf order} and {\bf korder} should be avoided because {\bf nc\_setup} sets these such that the computation results are printed in the correct term order. \end{itemize} Example: {\small\begin{verbatim} nc_setup({KK,NN,k,n}, {NN*n-n*NN= NN, KK*k-k*KK= KK}); NN*N; -> NN*N N*NN; -> NN*N - NN nc_setup({k,n,KK,NN}); NN*N - NN -> N*NN; \end{verbatim}} Here $KK,NN,k,n$ are non--commutative variables where the commutators are described as $[NN,n]=NN$, $[KK,k]=KK$. The current term order must be compatible with the commutators: the product $<u>*<v>$ must precede all terms on the right hand side $<rh>$ under the current term order. Consequently \begin{itemize} \item the maximal degree of $<u>$ or $<v>$ in $<rh>$ is 1, \item in a total degree ordering the total degree of $<rh>$ may be not higher than 1, \item in an elimination degree order ({\em e.g.\ }$lex$) all variables in $<rh>$ must be below the minimum of $<u>$ and $<v>$. \item If $<rh>$ does not contain any variables or has at most $<u>$ or $<v>$, any term order can be selected. \end{itemize} To use the non--commutative variables or results from non--commutative computations later in commutative operations it might be necessary to switch off the non--commutative evaluation mode because not all operators in \REDUCE\ are prepared for that environment. In such a case use the command\ttindex{nc\_cleanup} {\small\begin{verbatim} nc_cleanup; \end{verbatim}} without parameters. It removes all internal rules and definitions which {\tt nc\_setup} had introduced. To reactive non--commutative call {\tt nc\_setup} again. \section{Left and right ideals} A (polynomial) left ideal $L$ is defined by the axioms $u \in L, v \in L \Longrightarrow u+v \in L$ $u \in L \Longrightarrow k*u \in L$ for an arbitrary polynomial $k$ where ``*'' is the non--commutative multiplication. Correspondingly, a right ideal $R$ is defined by $u \in R, v \in R \Longrightarrow u+v \in R$ $u \in R \Longrightarrow u*k \in R$ for an arbitrary polynomial $k$ \section{Gr\"obner bases} When a non--commutative environment has been set up by {\tt nc\_setup}, a basis for a left or right polynomial ideal can be transformed into a Gr\"obner basis by the operator {\tt nc\_groebner}\ttindex{nc\_groebner} {\small\begin{verbatim} nc_groebner(<plist>); \end{verbatim}} Note that the variable set and variable sequence must be defined before in the {\tt nc\_setup} call. The term order for the Gr\"obner calculation can be set by using the {\tt torder} declaration. For details about {\tt torder} see the {\bf \REDUCE\ GROEBNER} manual, or chapter~\ref{GROEBNER}. {\small\begin{verbatim} 2: nc_setup({k,n,NN,KK},{NN*n-n*NN=NN,KK*k-k*KK=KK},left); 3: p1 := (n-k+1)*NN - (n+1); p1 := - k*nn + n*nn - n + nn - 1 4: p2 := (k+1)*KK -(n-k); p2 := k*kk + k - n + kk 5: nc_groebner ({p1,p2}); {k*nn - n*nn + n - nn + 1, k*kk + k - n + kk, n*nn*kk - n*kk - n + nn*kk - kk - 1} \end{verbatim}} Important: Do not use the operators of the GROEBNER package directly as they would not consider the non--commutative multiplication. \section{Left or right polynomial division} The operator {\tt nc\_divide}\ttindex{nc\_divide} computes the one sided quotient and remainder of two polynomials: {\small\begin{verbatim} nc_divide(<p1>,<p2>); \end{verbatim}} The result is a list with quotient and remainder. The division is performed as a pseudo--division, multiplying $<p1>$ by coefficients if necessary. The result $\{<q>,<r>\}$ is defined by the relation $<c>*<p1>=<q>*<p2> + <r>$ for direction $left$ and $<c>*<p1>=<p2>*<q> + <r>$ for direction $right$, where $<c>$ is an expression that does not contain any of the ideal variables, and the leading term of $<r>$ is lower than the leading term of $<p2>$ according to the actual term order. \section{Left or right polynomial reduction} For the computation of the one sided remainder of a polynomial modulo a given set of other polynomials the operator {\tt nc\_preduce} may be used:\ttindex{nc\_preduce} {\small\begin{verbatim} nc_preduce(<polynomial>,<plist>); \end{verbatim}} The result of the reduction is unique (canonical) if and only if $<plist>$ is a one sided Gr\"obner basis. Then the computation is at the same time an ideal membership test: if the result is zero, the polynomial is member of the ideal, otherwise not. \section{Factorisation} Polynomials in a non--commutative ring cannot be factored using the ordinary {\tt factorize} command of \REDUCE. Instead one of the operators of this section must be used:\ttindex{nc\_factorize} {\small\begin{verbatim} nc_factorize(<polynomial>); \end{verbatim}} The result is a list of factors of $<polynomial>$. A list with the input expression is returned if it is irreducible. As non--commutative factorisation is not unique, there is an additional operator which computes all possible factorisations\ttindex{nc\_factorize\_all} {\small\begin{verbatim} nc_factorize_all(<polynomial>); \end{verbatim}} The result is a list of factor decompositions of $<polynomial>$. If there are no factors at all the result list has only one member which is a list containing the input polynomial. \section{Output of expressions} It is often desirable to have the commutative parts (coefficients) in a non--commutative operation condensed by factorisation. The operator\ttindex{nc\_compact} {\small\begin{verbatim} nc_compact(<polynomial>) \end{verbatim}} collects the coefficients to the powers of the lowest possible non-commutative variable. {\small\begin{verbatim} load_package ncpoly; nc_setup({n,NN},{NN*n-n*NN=NN})$ p1 := n**4 + n**2*nn + 4*n**2 + 4*n*nn + 4*nn + 4; 4 2 2 p1 := n + n *nn + 4*n + 4*n*nn + 4*nn + 4 nc_compact p1; 2 2 2 (n + 2) + (n + 2) *nn \end{verbatim}} \chapter[NORMFORM: matrix normal forms]% {NORMFORM: Computation of matrix normal forms} \label{NORMFORM} \typeout{{NORMFORM: Computation of matrix normal forms}} {\footnotesize \begin{center} Matt Rebbeck \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] \end{center} } \ttindex{NORMFORM} This package contains routines for computing the following normal forms of matrices: \begin{itemize} \item smithex\_int \item smithex \item frobenius \item ratjordan \item jordansymbolic \item jordan. \end{itemize} By default all calculations are carried out in ${\cal Q}$ (the rational numbers). For {\tt smithex}, {\tt frobenius}, {\tt ratjordan}, {\tt jordansymbolic}, and {\tt jordan}, this field can be extended to an algebraic number field using ARNUM (chapter~\ref{ARNUM}). The {\tt frobenius}, {\tt ratjordan}, and {\tt jordansymbolic} normal forms can also be computed in a modular base. \section{Smithex} \ttindex{smithex} {\tt Smithex}(${\cal A},\, x$) computes the Smith normal form ${\cal S}$ of the matrix ${\cal A}$. It returns \{${\cal S}, {\cal P}, {\cal P}^{-1}$\} where ${\cal S}, {\cal P}$, and ${\cal P}^{-1}$ are such that ${\cal P S P}^{-1} = {\cal A}$. ${\cal A}$ is a rectangular matrix of univariate polynomials in $x$ where $x$ is the variable name. {\tt load\_package normform;} \begin{displaymath} {\cal A} = \left( \begin{array}{cc} x & x+1 \\ 0 & 3*x^2 \end{array} \right) \end{displaymath} \begin{displaymath} \hspace{-0.5in} \begin{array}{ccc} {\tt smithex}({\cal A},\, x) & = & \left\{ \left( \begin{array}{cc} 1 & 0 \\ 0 & x^3 \end{array} \right), \left( \begin{array}{cc} 1 & 0 \\ 3*x^2 & 1 \end{array} \right), \left( \begin{array}{cc} x & x+1 \\ -3 & -3 \end{array} \right) \right\} \end{array} \end{displaymath} \section{Smithex\_int} \ttindex{smithex\_int} Given an $n$ by $m$ rectangular matrix ${\cal A}$ that contains {\it only} integer entries, {\tt smithex\_int}(${\cal A}$) computes the Smith normal form ${\cal S}$ of ${\cal A}$. It returns \{${\cal S}, {\cal P}, {\cal P}^{-1}$\} where ${\cal S}, {\cal P}$, and ${\cal P}^{-1}$ are such that ${\cal P S P}^{-1} = {\cal A}$. {\tt load\_package normform;} \begin{displaymath} {\cal A} = \left( \begin{array}{ccc} 9 & -36 & 30 \\ -36 & 192 & -180 \\ 30 & -180 & 180 \end{array} \right) \end{displaymath} {\tt smithex\_int}(${\cal A}$) = \begin{center} \begin{displaymath} \left\{ \left( \begin{array}{ccc} 3 & 0 & 0 \\ 0 & 12 & 0 \\ 0 & 0 & 60 \end{array} \right), \left( \begin{array}{ccc} -17 & -5 & -4 \\ 64 & 19 & 15 \\ -50 & -15 & -12 \end{array} \right), \left( \begin{array}{ccc} 1 & -24 & 30 \\ -1 & 25 & -30 \\ 0 & -1 & 1 \end{array} \right) \right\} \end{displaymath} \end{center} \section{Frobenius} \ttindex{frobenius} {\tt Frobenius}(${\cal A}$) computes the Frobenius normal form ${\cal F}$ of the matrix ${\cal A}$. It returns \{${\cal F}, {\cal P}, {\cal P}^{-1}$\} where ${\cal F}, {\cal P}$, and ${\cal P}^{-1}$ are such that ${\cal P F P}^{-1} = {\cal A}$. ${\cal A}$ is a square matrix. {\tt load\_package normform;} \begin{displaymath} {\cal A} = \left( \begin{array}{cc} \frac{-x^2+y^2+y}{y} & \frac{-x^2+x+y^2-y}{y} \\ \frac{-x^2-x+y^2+y}{y} & \frac{-x^2+x+y^2-y} {y} \end{array} \right) \end{displaymath} {\tt frobenius}(${\cal A}$) = \begin{center} \begin{displaymath} \left\{ \left( \begin{array}{cc} 0 & \frac{x*(x^2-x-y^2+y)}{y} \\ 1 & \frac{-2*x^2+x+2*y^2}{y} \end{array} \right), \left( \begin{array}{cc} 1 & \frac{-x^2+y^2+y}{y} \\ 0 & \frac{-x^2-x+y^2+y}{y} \end{array} \right), \left( \begin{array}{cc} 1 & \frac{-x^2+y^2+y}{x^2+x-y^2-y} \\ 0 & \frac{-y}{x^2+x-y^2-y} \end{array} \right) \right\} \end{displaymath} \end{center} \section{Ratjordan} \ttindex{ratjordan} {\tt Ratjordan}(${\cal A}$) computes the rational Jordan normal form ${\cal R}$ of the matrix ${\cal A}$. It returns \{${\cal R}, {\cal P}, {\cal P}^{-1}$\} where ${\cal R}, {\cal P}$, and ${\cal P}^{-1}$ are such that ${\cal P R P}^{-1} = {\cal A}$. ${\cal A}$ is a square matrix. {\tt load\_package normform;} \begin{displaymath} {\cal A} = \left( \begin{array}{cc} x+y & 5 \\ y & x^2 \end{array} \right) \end{displaymath} {\tt ratjordan}(${\cal A}$) = \begin{center} \begin{displaymath} \left\{ \left( \begin{array}{cc} 0 & -x^3-x^2*y+5*y \\ 1 & x^2+x+y \end{array} \right), \left( \begin{array}{cc} 1 & x+y \\ 0 & y \end{array} \right), \left( \begin{array}{cc} 1 & \frac{-(x+y)}{y} \\ 0 & \hspace{0.2in} \frac{1}{y} \end{array} \right) \right\} \end{displaymath} \end{center} \section{Jordansymbolic} \ttindex{jordansymbolic} {\tt Jordansymbolic}(${\cal A}$) \hspace{0in} computes the Jordan normal form ${\cal J}$of the matrix ${\cal A}$. It returns \{${\cal J}, {\cal L}, {\cal P}, {\cal P}^{-1}$\}, where ${\cal J}, {\cal P}$, and ${\cal P}^{-1}$ are such that ${\cal P J P}^ {-1} = {\cal A}$. ${\cal L}$ = \{~{\it ll},~$\xi$~\}, where $\xi$ is a name and {\it ll} is a list of irreducible factors of ${\it p}(\xi)$. ${\cal A}$ is a square matrix. {\tt load\_package normform;}\\ \begin{displaymath} {\cal A} = \left( \begin{array}{cc} 1 & y \\ y^2 & 3 \end{array} \right) \end{displaymath} {\tt jordansymbolic}(${\cal A}$) = \begin{eqnarray} & & \left\{ \left( \begin{array}{cc} \xi_{11} & 0 \\ 0 & \xi_{12} \end{array} \right) , \left\{ \left\{ -y^3+\xi^2-4*\xi+3 \right\}, \xi \right\}, \right. \nonumber \\ & & \hspace{0.1in} \left. \left( \begin{array}{cc} \xi_{11} -3 & \xi_{12} -3 \\ y^2 & y^2 \end{array} \right), \left( \begin{array}{cc} \frac{\xi_{11} -2} {2*(y^3-1)} & \frac{\xi_{11} + y^3 -1}{2*y^2*(y^3+1)} \\ \frac{\xi_{12} -2}{2*(y^3-1)} & \frac{\xi_{12}+y^3-1}{2*y^2*(y^3+1)} \end{array} \right) \right\} \nonumber \end{eqnarray} \vspace{0.2in} \begin{flushleft} \begin{math} {\tt solve(-y^3+xi^2-4*xi+3,xi)}${\tt ;}$ \end{math} \end{flushleft} \vspace{0.1in} \begin{center} \begin{math} \{ \xi = \sqrt{y^3+1} + 2,\, \xi = -\sqrt{y^3+1}+2 \} \end{math} \end{center} \vspace{0.1in} \begin{math} {\tt {\cal J} = sub}{\tt (}{\tt \{ xi(1,1)=sqrt(y^3+1)+2,\, xi(1,2) = -sqrt(y^3+1)+2\},} \end{math} \\ \hspace*{0.29in} {\tt first jordansymbolic (${\cal A}$));} \vspace{0.2in} \begin{displaymath} {\cal J} = \left( \begin{array}{cc} \sqrt{y^3+1} + 2 & 0 \\ 0 & -\sqrt{y^3+1} + 2 \end{array} \right) \end{displaymath} \section{Jordan} \ttindex{jordan} {\tt Jordan}(${\cal A}$) computes the Jordan normal form ${\cal J}$ of the matrix ${\cal A}$. It returns \{${\cal J}, {\cal P}, {\cal P}^{-1}$\}, where ${\cal J}, {\cal P}$, and ${\cal P}^{-1}$ are such that ${\cal P J P}^ {-1} = {\cal A}$. ${\cal A}$ is a square matrix. {\tt load\_package normform;} \begin{displaymath} {\cal A} = \left( \begin{array}{cccccc} -9 & -21 & -15 & 4 & 2 & 0 \\ -10 & 21 & -14 & 4 & 2 & 0 \\ -8 & 16 & -11 & 4 & 2 & 0 \\ -6 & 12 & -9 & 3 & 3 & 0 \\ -4 & 8 & -6 & 0 & 5 & 0 \\ -2 & 4 & -3 & 0 & 1 & 3 \end{array} \right) \end{displaymath} \begin{flushleft} {\tt ${\cal J}$ = first jordan$({\cal A})$;} \end{flushleft} \begin{displaymath} {\cal J} = \left( \begin{array}{cccccc} 3 & 0 & 0 & 0 & 0 & 0 \\ 0 & 3 & 0 & 0 & 0 & 0 \\ 0 & 0 & 1 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 0 & i+2 & 0 \\ 0 & 0 & 0 & 0 & 0 & -i+2 \end{array} \right) \end{displaymath} \chapter{NUMERIC: Solving numerical problems} \label{NUMERIC} \typeout{{NUMERIC: Solving numerical problems}} {\footnotesize \begin{center} Herbert Melenk \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: melenk@zib.de \end{center} } \ttindex{NUMERIC} \ttindex{NUM\_SOLVE}\index{Newton's method}\ttindex{NUM\_ODESOLVE} \ttindex{BOUNDS}\index{Chebyshev fit} \ttindex{NUM\_MIN}\index{Minimum}\ttindex{NUM\_INT}\index{Quadrature} The {\small NUMERIC} package implements some numerical (approximative) algorithms for \REDUCE\, based on the \REDUCE\ rounded mode arithmetic. These algorithms are implemented for standard cases. They should not be called for ill-conditioned problems; please use standard mathematical libraries for these. \section{Syntax} \subsection{Intervals, Starting Points} Intervals are generally coded as lower bound and upper bound connected by the operator \verb+`..'+, usually associated to a variable in an equation.\index{Interval} {\small\begin{verbatim} x= (2.5 .. 3.5) \end{verbatim}} means that the variable x is taken in the range from 2.5 up to 3.5. Note, that the bounds can be algebraic expressions, which, however, must evaluate to numeric results. In cases where an interval is returned as the result, the lower and upper bounds can be extracted by the \verb+PART+ operator as the first and second part respectively. A starting point is specified by an equation with a numeric righthand side, {\small\begin{verbatim} x=3.0 \end{verbatim}} If for multivariate applications several coordinates must be specified by intervals or as a starting point, these specifications can be collected in one parameter (which is then a list) or they can be given as separate parameters alternatively. The list form is more appropriate when the parameters are built from other \REDUCE\ calculations in an automatic style, while the flat form is more convenient for direct interactive input. \subsection{Accuracy Control} The keyword parameters $accuracy=a$ and $iterations=i$, where $a$ and $i$ must be positive integer numbers, control the iterative algorithms: the iteration is continued until the local error is below $10^{-a}$; if that is impossible within $i$ steps, the iteration is terminated with an error message. The values reached so far are then returned as the result. \section{Minima} The function to be minimised must have continuous partial derivatives with respect to all variables. The starting point of the search can be specified; if not, random values are taken instead. The steepest descent algorithms in general find only local minima. Syntax:\ttindex{NUM\_MIN} \begin{description} \item[NUM\_MIN] $(exp, var_1[=val_1] [,var_2[=val_2] \ldots]$ $ [,accuracy=a][,iterations=i]) $ or \item[NUM\_MIN] $(exp, \{ var_1[=val_1] [,var_2[=val_2] \ldots] \}$ $ [,accuracy=a][,iterations=i]) $ where $exp$ is a function expression, $var_1, var_2, \ldots$ are the variables in $exp$ and $val_1,val_2, \ldots$ are the (optional) start values. NUM\_MIN tries to find the next local minimum along the descending path starting at the given point. The result is a list with the minimum function value as first element followed by a list of equations, where the variables are equated to the coordinates of the result point. \end{description} Examples: {\small\begin{verbatim} num_min(sin(x)+x/5, x); {4.9489585606,{X=29.643767785}} num_min(sin(x)+x/5, x=0); { - 1.3342267466,{X= - 1.7721582671}} % Rosenbrock function (well known as hard to minimize). fktn := 100*(x1**2-x2)**2 + (1-x1)**2; num_min(fktn, x1=-1.2, x2=1, iterations=200); {0.00000021870228295,{X1=0.99953284494,X2=0.99906807238}} \end{verbatim}} \section{Roots of Functions/ Solutions of Equations} An adaptively damped Newton iteration is used to find an approximative zero of a function, a function vector or the solution of an equation or an equation system. The expressions must have continuous derivatives for all variables. A starting point for the iteration can be given. If not given, random values are taken instead. If the number of forms is not equal to the number of variables, the Newton method cannot be applied. Then the minimum of the sum of absolute squares is located instead. With {\tt ON COMPLEX} solutions with imaginary parts can be found, if either the expression(s) or the starting point contain a nonzero imaginary part. Syntax:\ttindex{NUM\_SOLVE} \begin{description} \item[NUM\_SOLVE] $(exp_1, var_1[=val_1][,accuracy=a][,iterations=i])$ or \item[NUM\_SOLVE] $(\{exp_1,\ldots,exp_n\}, var_1[=val_1],\ldots,var_1[=val_n]$ \item[\ \ \ \ \ \ \ \ ]$[,accuracy=a][,iterations=i])$ or \item[NUM\_SOLVE] $(\{exp_1,\ldots,exp_n\}, \{var_1[=val_1],\ldots,var_1[=val_n]\}$ \item[\ \ \ \ \ \ \ \ ]$[,accuracy=a][,iterations=i])$ where $exp_1, \ldots,exp_n$ are function expressions, $var_1, \ldots, var_n$ are the variables, $val_1, \ldots, val_n$ are optional start values. NUM\_SOLVE tries to find a zero/solution of the expression(s). Result is a list of equations, where the variables are equated to the coordinates of the result point. The Jacobian matrix is stored as a side effect in the shared variable JACOBIAN.\ttindex{JACOBIAN} \end{description} Example: {\small\begin{verbatim} num_solve({sin x=cos y, x + y = 1},{x=1,y=2}); {X= - 1.8561957251,Y=2.856195584} jacobian; [COS(X) SIN(Y)] [ ] [ 1 1 ] \end{verbatim}} \section{Integrals} Numerical integration uses a polyalgorithm, explained in the full documentation.\ttindex{NUM\_INT} \begin{description} \item[NUM\_INT] $(exp,var_1=(l_1 .. u_1)[,var_2=(l_2 .. u_2)\ldots]$ \item[\ \ \ \ \ \ ]$[,accuracy=a][,iterations=i])$ where $exp$ is the function to be integrated, $var_1, var_2 , \ldots$ are the integration variables, $l_1, l_2 , \ldots$ are the lower bounds, $u_1, u_2 , \ldots$ are the upper bounds. Result is the value of the integral. \end{description} Example: {\small\begin{verbatim} num_int(sin x,x=(0 .. pi)); 2.0000010334 \end{verbatim}} \section{Ordinary Differential Equations} A Runge-Kutta method of order 3 finds an approximate graph for the solution of a ordinary differential equation real initial value problem. Syntax:\ttindex{NUM\_ODESOLVE} \begin{description} \item[NUM\_ODESOLVE]($exp$,$depvar=dv$,$indepvar$=$(from .. to)$ $ [,accuracy=a][,iterations=i]) $ where $exp$ is the differential expression/equation, $depvar$ is an identifier representing the dependent variable (function to be found), $indepvar$ is an identifier representing the independent variable, $exp$ is an equation (or an expression implicitly set to zero) which contains the first derivative of $depvar$ wrt $indepvar$, $from$ is the starting point of integration, $to$ is the endpoint of integration (allowed to be below $from$), $dv$ is the initial value of $depvar$ in the point $indepvar=from$. The ODE $exp$ is converted into an explicit form, which then is used for a Runge-Kutta iteration over the given range. The number of steps is controlled by the value of $i$ (default: 20). If the steps are too coarse to reach the desired accuracy in the neighbourhood of the starting point, the number is increased automatically. Result is a list of pairs, each representing a point of the approximate solution of the ODE problem. \end{description} Example: {\small\begin{verbatim} num_odesolve(df(y,x)=y,y=1,x=(0 .. 1), iterations=5); {{0.0,1.0},{0.2,1.2214},{0.4,1.49181796},{0.6,1.8221064563}, {0.8,2.2255208258},{1.0,2.7182511366}} \end{verbatim}} \section{Bounds of a Function} Upper and lower bounds of a real valued function over an interval or a rectangular multivariate domain are computed by the operator \f{BOUNDS}. Some knowledge about the behaviour of special functions like ABS, SIN, COS, EXP, LOG, fractional exponentials etc. is integrated and can be evaluated if the operator BOUNDS is called with rounded mode on (otherwise only algebraic evaluation rules are available). If BOUNDS finds a singularity within an interval, the evaluation is stopped with an error message indicating the problem part of the expression. \newpage Syntax:\ttindex{BOUNDS} \begin{description} \item[BOUNDS]$(exp,var_1=(l_1 .. u_1) [,var_2=(l_2 .. u_2) \ldots])$ \item[{\it BOUNDS}]$(exp,\{var_1=(l_1 .. u_1) [,var_2=(l_2 .. u_2)\ldots]\})$ where $exp$ is the function to be investigated, $var_1, var_2 , \ldots$ are the variables of exp, $l_1, l_2 , \ldots$ and $u_1, u_2 , \ldots$ specify the area (intervals). {\tt BOUNDS} computes upper and lower bounds for the expression in the given area. An interval is returned. \end{description} Example: {\small\begin{verbatim} bounds(sin x,x=(1 .. 2)); {-1,1} on rounded; bounds(sin x,x=(1 .. 2)); 0.84147098481 .. 1 bounds(x**2+x,x=(-0.5 .. 0.5)); - 0.25 .. 0.75 \end{verbatim}} \section{Chebyshev Curve Fitting} The operator family $Chebyshev\_\ldots$ implements approximation and evaluation of functions by the Chebyshev method. The operator {\tt Chebyshev\_fit}\ttindex{Chebyshev\_fit} computes this approximation and returns a list, which has as first element the sum expressed as a polynomial and as second element the sequence of Chebyshev coefficients ${c_i}$. {\tt Chebyshev\_df}\ttindex{Chebyshev\_df} and {\tt Chebyshev\_int}\ttindex{Chebyshev\_int} transform a Chebyshev coefficient list into the coefficients of the corresponding derivative or integral respectively. For evaluating a Chebyshev approximation at a given point in the basic interval the operator {\tt Chebyshev\_eval}\ttindex{Chebyshev\_eval} can be used. Note that {\tt Chebyshev\_eval} is based on a recurrence relation which is in general more stable than a direct evaluation of the complete polynomial. \begin{description} \item[CHEBYSHEV\_FIT] $(fcn,var=(lo .. hi),n)$ \item[CHEBYSHEV\_EVAL] $(coeffs,var=(lo .. hi),var=pt)$ \item[CHEBYSHEV\_DF] $(coeffs,var=(lo .. hi))$ \item[CHEBYSHEV\_INT] $(coeffs,var=(lo .. hi))$ where $fcn$ is an algebraic expression (the function to be fitted), $var$ is the variable of $fcn$, $lo$ and $hi$ are numerical real values which describe an interval ($lo < hi$), $n$ is the approximation order,an integer $>0$, set to 20 if missing, $pt$ is a numerical value in the interval and $coeffs$ is a series of Chebyshev coefficients, computed by one of $CHEBYSHEV\_COEFF$, $\_DF$ or $\_INT$. \end{description} Example: {\small\begin{verbatim} on rounded; w:=chebyshev_fit(sin x/x,x=(1 .. 3),5); 3 2 w := {0.03824*x - 0.2398*x + 0.06514*x + 0.9778, {0.8991,-0.4066,-0.005198,0.009464,-0.00009511}} chebyshev_eval(second w, x=(1 .. 3), x=2.1); 0.4111 \end{verbatim}} \section{General Curve Fitting} The operator {\tt NUM\_FIT}\ttindex{NUM\_FIT} finds for a set of points the linear combination of a given set of functions (function basis) which approximates the points best under the objective of the least squares criterion (minimum of the sum of the squares of the deviation). The solution is found as zero of the gradient vector of the sum of squared errors. Syntax: \begin{description} \item[NUM\_FIT] $(vals,basis,var=pts)$ where $vals$ is a list of numeric values, $var$ is a variable used for the approximation, $pts$ is a list of coordinate values which correspond to $var$, $basis$ is a set of functions varying in $var$ which is used for the approximation. \end{description} The result is a list containing as first element the function which approximates the given values, and as second element a list of coefficients which were used to build this function from the basis. Example: {\small\begin{verbatim} % approximate a set of factorials by a polynomial pts:=for i:=1 step 1 until 5 collect i$ vals:=for i:=1 step 1 until 5 collect for j:=1:i product j$ num_fit(vals,{1,x,x**2},x=pts); 2 {14.571428571*X - 61.428571429*X + 54.6,{54.6, - 61.428571429,14.571428571}} num_fit(vals,{1,x,x**2,x**3,x**4},x=pts); 4 3 {2.2083333234*X - 20.249999879*X 2 + 67.791666154*X - 93.749999133*X + 44.999999525, {44.999999525, - 93.749999133,67.791666154, - 20.249999879,2.2083333234}} \end{verbatim}} \section{Function Bases} The following procedures compute sets of functions for example to be used for approximation. All procedures have two parameters, the expression to be used as $variable$ (an identifier in most cases) and the order of the desired system. The functions are not scaled to a specific interval, but the $variable$ can be accompanied by a scale factor and/or a translation in order to map the generic interval of orthogonality to another ({\em e.g.\ }$(x- 1/2 ) * 2 pi$). The result is a function list with ascending order, such that the first element is the function of order zero and (for the polynomial systems) the function of order $n$ is the $n+1$-th element. \ttindex{monomial\_base}\ttindex{trigonometric\_base}\ttindex{Bernstein\_base} \ttindex{Legendre\_base}\ttindex{Laguerre\_base}\ttindex{Hermite\_base} \ttindex{Chebyshev\_base\_T}\ttindex{Chebyshev\_base\_U} {\small\begin{verbatim} monomial_base(x,n) {1,x,...,x**n} trigonometric_base(x,n) {1,sin x,cos x,sin(2x),cos(2x)...} Bernstein_base(x,n) Bernstein polynomials Legendre_base(x,n) Legendre polynomials Laguerre_base(x,n) Laguerre polynomials Hermite_base(x,n) Hermite polynomials Chebyshev_base_T(x,n) Chebyshev polynomials first kind Chebyshev_base_U(x,n) Chebyshev polynomials second kind \end{verbatim}} Example: {\small\begin{verbatim} Bernstein_base(x,5); 5 4 3 2 { - X + 5*X - 10*X + 10*X - 5*X + 1, 4 3 2 5*X*(X - 4*X + 6*X - 4*X + 1), 2 3 2 10*X *( - X + 3*X - 3*X + 1), 3 2 10*X *(X - 2*X + 1), 4 5*X *( - X + 1), 5 X } \end{verbatim}} \chapter[ODESOLVE: Ordinary differential eqns]% {ODESOLVE: \protect\\ Ordinary differential equations solver} \label{ODESOLVE} \typeout{[ODESOLVE: Ordinary differential equations solver]} {\footnotesize \begin{center} Malcolm A.H. MacCallum \\ School of Mathematical Sciences, Queen Mary and Westfield College \\ University of London \\ Mile End Road \\ London E1 4NS, England \\[0.05in] e--mail: mm@maths.qmw.ac.uk \end{center} } \ttindex{ODESOLVE} \index{ordinary differential equations} The ODESOLVE package is a solver for ordinary differential equations. At the present time it has very limited capabilities, \begin{enumerate} \item it can handle only a single scalar equation presented as an algebraic expression or equation, and \item it can solve only first-order equations of simple types, linear equations with constant coefficients and Euler equations. \end{enumerate} \noindent These solvable types are exactly those for which Lie symmetry techniques give no useful information. \section{Use} The only top-level function the user should normally invoke is: \ttindex{ODESOLVE} \vspace{.1in} \begin{tabbing} {\tt ODESOLVE}(\=EXPRN:{\em expression, equation}, \\ \>VAR1:{\em variable}, \\ \>VAR2:{\em variable}):{\em list-algebraic} \end{tabbing} \vspace{.1in} \noindent {\tt ODESOLVE} returns a list containing an equation (like solve): \begin{description} \item[EXPRN] is a single scalar expression such that EXPRN = 0 is the ordinary differential equation (ODE for short) to be solved, or is an equivalent equation. \item[VAR1] is the name of the dependent variable. \item[VAR2] is the name of the independent variable \end{description} \noindent (For simplicity these will be called y and x in the sequel) The returned value is a list containing the equation giving the general solution of the ODE (for simultaneous equations this will be a list of equations eventually). It will contain occurrences of the \index{ARBCONST operator} operator {\tt ARBCONST} for the arbitrary constants in the general solution. The arguments of {\tt ARBCONST} should be new, as with {\tt ARBINT} etc. in SOLVE. A counter {\tt !!ARBCONST} is used to arrange this (similar to the way {\tt ARBINT} is implemented). Some other top-level functions may be of use elsewhere, especially: \ttindex{SORTOUTODE} \vspace{.1in} \noindent{\tt SORTOUTODE}(EXPRN:{\em algebraic}, Y:{\em var}, X:{\em var}): {\em expression} \vspace{.1in} \noindent which finds the order and degree of the EXPRN as a differential equation for Y with respect to Y and sets the linearity and highest derivative occurring in reserved variables ODEORDER, ODEDEGREE,\ttindex{ODEORDER}\ttindex{ODEDEGREE}\ttindex{ODELINEARITY}\ttindex{HIGHESTDERIV}ODELINEARITY and HIGHESTDERIV. An expression equivalent to the ODE is returned, or zero if EXPRN (equated to 0) is not an ODE in the given variables. \section{Commentary} The methods used by this package are described in detail in the full documentation, which should be inspected together with the examples file. \chapter[ORTHOVEC: scalars and vectors]% {ORTHOVEC: Three-dimensional vector analysis} \label{ORTHOVEC} \typeout{{ORTHOVEC: Three-dimensional vector analysis}} {\footnotesize \begin{center} James W.~Eastwood \\ AEA Technology, Culham Laboratory \\ Abingdon \\ Oxon OX14 3DB, England \\[0.05in] e--mail: jim\_eastwood@aeat.co.uk \end{center} } \ttindex{ORTHOVEC} The ORTHOVEC package is a collection of \REDUCE\ procedures and operations which provide a simple to use environment for the manipulation of scalars and vectors. Operations include addition, subtraction, dot and cross products, division, modulus, div, grad, curl, laplacian, differentiation, integration, ${\bf a \cdot \nabla}$ and Taylor expansion. \section{Initialisation}\label{vstart} \ttindex{VSTART} The procedure \f{START} initialises ORTHOVEC. VSTART provides a menu of standard coordinate systems:- \begin{enumerate} \index{cartesian coordinates} \item cartesian $(x, y, z) = $ {\tt (x, y, z)} \index{cylindrical coordinates} \item cylindrical $(r, \theta, z) = $ {\tt (r, th, z)} \index{spherical coordinates} \item spherical $(r, \theta, \phi) = $ {\tt (r, th, ph) } \item general $( u_1, u_2, u_3 ) = $ {\tt (u1, u2, u3) } \item others \end{enumerate} which the user selects by number. Selecting options (1)-(4) automatically sets up the coordinates and scale factors. Selection option (5) shows the user how to select another coordinate system. If VSTART is not called, then the default cartesian coordinates are used. ORTHOVEC may be re-initialised to a new coordinate system at any time during a given \REDUCE\ session by typing {\small\begin{verbatim} VSTART $. \end{verbatim}} \section{Input-Output} ORTHOVEC assumes all quantities are either scalars or 3 component vectors. To define a vector $a$ with components $(c_1, c_2, c_3)$ use the procedure SVEC:\ttindex{SVEC} {\small\begin{verbatim} a := svec(c1, c2, c3); \end{verbatim}} The procedure\ttindex{VOUT} \f{VOUT} (which returns the value of its argument) can be used to give labelled output of components in algebraic form: {\small\begin{verbatim} b := svec (sin(x)**2, y**2, z)$ vout(b)$ \end{verbatim}} The operator {\tt \_} can be used to select a particular component (1, 2 or 3) for output {\em e.g.} {\small\begin{verbatim} b_1 ; \end{verbatim}} \section{Algebraic Operations} Six infix operators, sum, difference, quotient, times, exponentiation and cross product, and four prefix operators, plus, minus, reciprocal and modulus are defined in ORTHOVEC. These operators can take suitable combinations of scalar and vector arguments, and in the case of scalar arguments reduce to the usual definitions of $ +, -, *, /, $ etc. The operators are represented by symbols \index{+ ! 3-D vector}\index{- ! 3-D vector}\index{/ ! 3-D vector} \index{* ! 3-D vector}\index{* ! 3-D vector}\index{"\^{} ! 3-D vector} \index{$><$ ! 3-D vector} {\small\begin{verbatim} +, -, /, *, ^, >< \end{verbatim}} \index{$><$ ! diphthong} The composite {\tt ><} is an attempt to represent the cross product symbol $\times$ in ASCII characters. If we let ${\bf v}$ be a vector and $s$ be a scalar, then valid combinations of arguments of the procedures and operators and the type of the result are as summarised below. The notation used is\\ {\em result :=procedure(left argument, right argument) } or\\ {\em result :=(left operand) operator (right operand) } . \\ \underline{Vector Addition} \\ \ttindex{VECTORPLUS}\ttindex{VECTORADD}\index{vector ! addition} \begin{tabular}{rclcrcl} {\bf v} &:=& VECTORPLUS({\bf v}) &{\rm or}& {\bf v} &:=& + {\bf v} \\ s &:=& VECTORPLUS(s) &{\rm or} & s &:=& + s \\ {\bf v} &:=& VECTORADD({\bf v},{\bf v}) &{\rm or }& {\bf v} &:=& {\bf v} + {\bf v} \\ s &:=& VECTORADD(s,s) &{\rm or }& s &:=& s + s \\ \end{tabular} \\ \underline{Vector Subtraction} \\ \ttindex{VECTORMINUS}\ttindex{VECTORDIFFERENCE}\index{vector ! subtraction} \begin{tabular}{rclcrcl} {\bf v} &:=& VECTORMINUS({\bf v}) &{\rm or}& {\bf v} &:=& - {\bf v} \\ s &:=& VECTORMINUS(s) &{\rm or} & s &:=& - s \\ {\bf v} &:=& VECTORDIFFERENCE({\bf v},{\bf v}) &{\rm or }& {\bf v} &:=& {\bf v} - {\bf v} \\ s &:=& VECTORDIFFERENCE(s,s) &{\rm or }& s &:=& s - s \\ \end{tabular} \\ \underline{Vector Division}\\ \ttindex{VECTORRECIP}\ttindex{VECTORQUOTIENT}\index{vector ! division} \begin{tabular}{rclcrcl} {\bf v} &:=& VECTORRECIP({\bf v}) &{\rm or}& {\bf v} &:=& / {\bf v} \\ s &:=& VECTORRECIP(s) &{\rm or} & s &:=& / s \\ {\bf v} &:=& VECTORQUOTIENT({\bf v},{\bf v}) &{\rm or }& {\bf v} &:=& {\bf v} / {\bf v} \\ {\bf v} &:=& VECTORQUOTIENT({\bf v}, s ) &{\rm or }& {\bf v} &:=& {\bf v} / s \\ {\bf v} &:=& VECTORQUOTIENT( s ,{\bf v}) &{\rm or }& {\bf v} &:=& s / {\bf v} \\ s &:=& VECTORQUOTIENT(s,s) &{\rm or }& s &:=& s / s \\ \end{tabular} \\ \underline{Vector Multiplication}\\ \ttindex{VECTORTIMES}\index{vector ! multiplication} \begin{tabular}{rclcrcl} {\bf v} &:=& VECTORTIMES( s ,{\bf v}) &{\rm or }& {\bf v} &:=& s * {\bf v} \\ {\bf v} &:=& VECTORTIMES({\bf v}, s ) &{\rm or }& {\bf v} &:=& {\bf v} * s \\ s &:=& VECTORTIMES({\bf v},{\bf v}) &{\rm or }& s &:=& {\bf v} * {\bf v} \\ s &:=& VECTORTIMES( s , s ) &{\rm or }& s &:=& s * s \\ \end{tabular} \\ \underline{Vector Cross Product} \\ \ttindex{VECTORCROSS}\index{cross product}\index{vector ! cross product} \begin{tabular}{rclcrcl} {\bf v} &:=& VECTORCROSS({\bf v},{\bf v}) &{\rm or }& {\bf v} &:=& {\bf v} $\times$ {\bf v} \\ \end{tabular} \\ \underline{Vector Exponentiation}\\ \ttindex{VECTOREXPT}\index{vector ! exponentiation} \begin{tabular}{rclcrcl} s &:=& VECTOREXPT ({\bf v}, s ) &{\rm or }& s &:=& {\bf v} \^{} s \\ s &:=& VECTOREXPT ( s , s ) &{\rm or }& s &:=& s \^{} s \\ \end{tabular} \\ \underline{Vector Modulus}\\ \ttindex{VMOD}\index{vector ! modulus} \begin{tabular}{rcl} s &:=& VMOD (s)\\ s &:=& VMOD ({\bf v}) \\ \end{tabular} \\ All other combinations of operands for these operators lead to error messages being issued. The first two instances of vector multiplication are scalar multiplication of vectors, the third is the \index{vector ! dot product}\index{vector ! inner product} \index{inner product}\index{dot product} product of two scalars and the last is the inner (dot) product. The prefix operators {\tt +, -, /} can take either scalar or vector arguments and return results of the same type as their arguments. VMOD returns a scalar. In compound expressions, parentheses may be used to specify the order of combination. If parentheses are omitted the ordering of the operators, in increasing order of precedence is {\small\begin{verbatim} + | - | dotgrad | * | >< | ^ | _ \end{verbatim}} and these are placed in the precedence list defined in \REDUCE{} after $<$. Vector divisions are defined as follows: If ${\bf a}$ and ${\bf b}$ are vectors and $c$ is a scalar, then \begin{eqnarray*} {\bf a} / {\bf b} & = & \frac{{\bf a} \cdot {\bf b}}{ \mid {\bf b} \mid^2}\\ c / {\bf a} & = & \frac{c {\bf a} }{ \mid {\bf a} \mid^2} \end{eqnarray*} Both scalar multiplication and dot products are given by the same symbol, braces are advisable to ensure the correct precedences in expressions such as $({\bf a} \cdot {\bf b}) ({\bf c} \cdot {\bf d})$. Vector exponentiation is defined as the power of the modulus:\\ ${\bf a}^n \equiv {\rm VMOD}(a)^n = \mid {\bf a} \mid^n$ \section{Differential Operations} Differential operators provided are div, grad, curl, delsq, and dotgrad. \index{div operator}\index{grad operator}\index{curl operator} \index{delsq operator}\index{dotgrad operator} All but the last of these are prefix operators having a single vector or scalar argument as appropriate. Valid combinations of operator and argument, and the type of the result are shown in table~\ref{vvecttable}. \begin{table} \begin{center} \begin{tabular}{rcl} s & := & div ({\bf v}) \\ {\bf v} & := & grad(s) \\ {\bf v} & := & curl({\bf v}) \\ {\bf v} & := & delsq({\bf v}) \\ s & := & delsq(s) \\ {\bf v} & := & {\bf v} dotgrad {\bf v} \\ s & := & {\bf v} dotgrad s \end{tabular} \end{center} \caption{ORTHOVEC valid combinations of operator and argument}\label{vvecttable} \end{table} All other combinations of operator and argument type cause error messages to be issued. The differential operators have their usual meanings. The coordinate system used by these operators is set by invoking VSTART (cf. Sec.~\ref{vstart}). The names {\tt h1}, {\tt h2} and {\tt h3 } are reserved for the scale factors, and {\tt u1}, {\tt u2} and {\tt u3} are used for the coordinates. A vector extension, VDF, of the \REDUCE\ procedure DF allows the differentiation of a vector (scalar) with respect to a scalar to be performed. Allowed forms are\ttindex{VDF} VDF({\bf v}, s) $\rightarrow$ {\bf v} and VDF(s, s) $\rightarrow$ s , where, for example\\ \begin{eqnarray*} {\tt vdf( B,x)} \equiv \frac{\partial {\bf B}}{\partial x} \end{eqnarray*} The standard \REDUCE\ procedures DEPEND and NODEPEND have been redefined to allow dependences of vectors to be compactly defined. For example\index{DEPEND statement}\index{NODEPEND statement} {\small\begin{verbatim} a := svec(a1,a2,a3)$; depend a,x,y; \end{verbatim}} causes all three components {\tt a1},{\tt a2} and {\tt a3} of {\tt a} to be treated as functions of {\tt x} and {\tt y}. Individual component dependences can still be defined if desired. {\small\begin{verbatim} depend a3,z; \end{verbatim}} The procedure VTAYLOR gives truncated Taylor series expansions of scalar or vector functions:-\ttindex{VTAYLOR} {\small\begin{verbatim} vtaylor(vex,vx,vpt,vorder); \end{verbatim}} returns the series expansion of the expression VEX with respect to variable VX\ttindex{VORDER} about point VPT to order VORDER. Valid combinations of argument types are shown in table~\ref{ORTHOVEC:validexp}. \\ \begin{table} \begin{center} \begin{tabular}{cccc} VEX & VX & VPT & VORDER \\[2ex] {\bf v} & {\bf v} & {\bf v} & {\bf v}\\ {\bf v} & {\bf v} & {\bf v} & s\\ {\bf v} & s & s & s \\ s & {\bf v} & {\bf v} & {\bf v} \\ s & {\bf v} & {\bf v} & s\\ s & s & s & s\\ \end{tabular} \end{center} \caption{ORTHOVEC valid combination of argument types.}\label{ORTHOVEC:validexp} \end{table} Any other combinations cause error messages to be issued. Elements of VORDER must be non-negative integers, otherwise error messages are issued. If scalar VORDER is given for a vector expansion, expansions in each component are truncated at the same order, VORDER. The new version of Taylor expansion applies\index{l'H\^opital's rule} l'H\^opital's rule in evaluating coefficients, so handle cases such as $\sin(x) / (x) $ , etc. which the original version of ORTHOVEC could not. The procedure used for this is LIMIT,\ttindex{LIMIT} which can be used directly to find the limit of a scalar function {\tt ex} of variable {\tt x} at point {\tt pt}:- {\small\begin{verbatim} ans := limit(ex,x,pt); \end{verbatim}} \section{Integral Operations} Definite and indefinite vector, volume and scalar line integration procedures are included in ORTHOVEC. They are defined as follows: \ttindex{VINT}\ttindex{DVINT} \ttindex{VOLINT}\ttindex{DVOLINT}\ttindex{LINEINT}\ttindex{DLINEINT} \begin{eqnarray*} {\rm VINT} ({\bf v},x) & = & \int {\bf v}(x)dx\\ % {\rm DVINT} ({\bf v},x, a, b) & = & \int^b_a {\bf v} (x) dx\\ % {\rm VOLINT} ({\bf v}) & = & \int {\bf v} h_1 h_2 h_3 du_1 du_2 du_3\\ % {\rm DVOLINT}({\bf v},{\bf l},{\bf u},n) & = & \int^{\bf u}_{\bf l} {\bf v} h_1 h_2 h_3 du_1 du_2 du_3\\ % {\rm LINEINT} ({\bf v, \omega}, t) & = & \int {\bf v} \cdot {\bf dr} \equiv \int v_i h_i \frac{\partial \omega_i}{\partial t} dt\\ % {\rm DLINEINT} ({\bf v, \omega} t, a, b) & = & \int^b_a v_i h_i \frac{\partial \omega_i}{\partial t} dt\\ \end{eqnarray*} In the vector and volume integrals, ${\bf v}$ are vector or scalar, $a, b,x$ and $n$ are scalar. Vectors ${\bf l}$ and ${\bf u}$ contain expressions for lower and upper bounds to the integrals. The integer index $n$ defines the order in which the integrals over $u_1, u_2$ and $u_3$ are performed in order to allow for functional dependencies in the integral bounds: \begin{center} \begin{tabular}{ll} n & order\\ 1 & $u_1~u_2~u_3$\\ % 2 & $u_3~u_1~u_2$\\ % 3 & $u_2~u_3~u_1$\\ % 4 & $u_1~u_3~u_2$\\ % 5 & $u_2~u_1~u_3$\\ otherwise & $u_3~u_2~u_1$\\ \end{tabular} \end{center} The vector ${\bf \omega}$ in the line integral's arguments contain explicit parameterisation of the coordinates $u_1, u_2, u_3$ of the line ${\bf u}(t)$ along which the integral is taken. \chapter[PHYSOP: Operator Calculus]% {PHYSOP: Operator calculus in quantum theory} \label{PHYSOP} \typeout{{PHYSOP: Operator calculus in quantum theory}} {\footnotesize \begin{center} Mathias Warns \\ Physikalisches Institut der Universit\"at Bonn \\ Endenicher Allee 11--13 \\ D--5300 BONN 1, Germany \\[0.05in] e--mail: UNP008@DBNRHRZ1.bitnet \end{center} } \ttindex{PHYSOP} The package PHYSOP has been designed to meet the requirements of theoretical physicists looking for a computer algebra tool to perform complicated calculations in quantum theory with expressions containing operators. These operations consist mainly in the calculation of commutators between operator expressions and in the evaluations of operator matrix elements in some abstract space. \section{The NONCOM2 Package} The package NONCOM2 redefines some standard \REDUCE\ routines in order to modify the way noncommutative operators are handled by the system. It redefines the \f{NONCOM}\ttindex{NONCOM} statement in a way more suitable for calculations in physics. Operators have now to be declared noncommutative pairwise, {\em i.e.\ }coding: \\ {\small\begin{verbatim} NONCOM A,B; \end{verbatim}} declares the operators \f{A} and \f{B} to be noncommutative but allows them to commute with any other (noncommutative or not) operator present in the expression. In a similar way if one wants {\em e.g.\ }\f{A(X)} and \f{A(Y)} not to commute, one has now to code: {\small\begin{verbatim} NONCOM A,A; \end{verbatim}} A final example should make the use of the redefined \f{NONCOM} statement clear: {\small\begin{verbatim} NONCOM A,B,C; \end{verbatim}} declares \f{A} to be noncommutative with \f{B} and \f{C}, \f{B} to be noncommutative with \f{A} and \f{C} and \f{C} to be noncommutative with \f{A} and \f{B}. Note that after these declaration {\em e.g.\ }\f{A(X)} and \f{A(Y)} are still commuting kernels. Finally to keep the compatibility with standard \REDUCE\, declaring a \underline{single} identifier using the \f{NONCOM} statement has the same effect as in standard \REDUCE. From the user's point of view there are no other new commands implemented by the package. \section{The PHYSOP package} The package PHYSOP implements a new \REDUCE\ data type to perform calculations with physical operators. The noncommutativity of operators is implemented using the NONCOM2 package so this file should be loaded prior to the use of PHYSOP. \subsection{Type declaration commands} The new \REDUCE\ data type PHYSOP implemented by the package allows the definition of a new kind of operators ({\em i.e.\ }kernels carrying an arbitrary number of arguments). Throughout this manual, the name ``operator'' will refer, unless explicitly stated otherwise, to this new data type. This data type is in turn divided into 5 subtypes. For each of this subtype, a declaration command has been defined: \begin{description} \item[\f{SCALOP A;} ]\ttindex{SCALOP} declares \f{A} to be a scalar operator. This operator may carry an arbitrary number of arguments; after the declaration: \f{ SCALOP A; } all kernels of the form \f{A(J), A(1,N), A(N,L,M)} are recognised by the system as being scalar operators. \item[\f{VECOP V;} ]\ttindex{VECOP} declares \f{V} to be a vector operator. As for scalar operators, the vector operators may carry an arbitrary number of arguments. For example \f{V(3)} can be used to represent the vector operator $\vec{V}_{3}$. Note that the dimension of space in which this operator lives is \underline{arbitrary}. One can however address a specific component of the vector operator by using a special index declared as \f{PHYSINDEX} (see below). This index must then be the first in the argument list of the vector operator. \item[\f{TENSOP C(3);} ] \ttindex{TENSOP} declares \f{C} to be a tensor operator of rank 3. Tensor operators of any fixed integer rank larger than 1 can be declared. Again this operator may carry an arbitrary number of arguments and the space dimension is not fixed. The tensor components can be addressed by using special \f{PHYSINDEX} indices (see below) which have to be placed in front of all other arguments in the argument list. \item[\f{STATE U;} ]\ttindex{STATE} declares \f{U} to be a state, {\em i.e.\ }an object on which operators have a certain action. The state U can also carry an arbitrary number of arguments. \item[\f{PHYSINDEX X;} ]\ttindex{PHYSINDEX} declares \f{X} to be a special index which will be used to address components of vector and tensor operators. \end{description} A command \f{CLEARPHYSOP}\ttindex{CLEARPHYSOP} removes the PHYSOP type from an identifier in order to use it for subsequent calculations. However it should be remembered that \underline{no} substitution rule is cleared by this function. It is therefore left to the user's responsibility to clear previously all substitution rules involving the identifier from which the PHYSOP type is removed. \subsection{Ordering of operators in an expression} The ordering of kernels in an expression is performed according to the following rules: \\ 1. \underline{Scalars} are always ordered ahead of PHYSOP operators in an expression. The \REDUCE\ statement \f{KORDER}\ttindex{KORDER} can be used to control the ordering of scalars but has no effect on the ordering of operators. 2. The default ordering of operators follows the order in which they have been declared (not the alphabetical one). This ordering scheme can be changed using the command \f{OPORDER}. \ttindex{OPORDER} Its syntax is similar to the \f{KORDER} statement, {\em i.e.\ }coding: \f{OPORDER A,V,F;} means that all occurrences of the operator \f{A} are ordered ahead of those of \f{V} etc. It is also possible to include operators carrying indices (both normal and special ones) in the argument list of \f{OPORDER}. However including objects \underline{not} defined as operators ({\em i.e.\ }scalars or indices) in the argument list of the \f{OPORDER} command leads to an error. 3. Adjoint operators are placed by the declaration commands just after the original operators on the \f{OPORDER} list. Changing the place of an operator on this list means \underline{not} that the adjoint operator is moved accordingly. This adjoint operator can be moved freely by including it in the argument list of the \f{OPORDER} command. \subsection{Arithmetic operations on operators} The following arithmetic operations are possible with operator expressions: \\ 1. Multiplication or division of an operator by a scalar. 2. Addition and subtraction of operators of the \underline{same} type. 3. Multiplication of operators is only defined between two \underline{scalar} operators. 4. The scalar product of two VECTOR operators is implemented with a new function \f{DOT}\ttindex{DOT}. The system expands the product of two vector operators into an ordinary product of the components of these operators by inserting a special index generated by the program. To give an example, if one codes: {\small\begin{verbatim} VECOP V,W; V DOT W; \end{verbatim}} the system will transform the product into: {\small\begin{verbatim} V(IDX1) * W(IDX1) \end{verbatim}} where \f{IDX1} is a \f{PHYSINDEX} generated by the system (called a DUMMY INDEX in the following) to express the summation over the components. The identifiers \f{IDXn} (\f{n} is a nonzero integer) are reserved variables for this purpose and should not be used for other applications. The arithmetic operator \f{DOT} can be used both in infix and prefix form with two arguments. 5. Operators (but not states) can only be raised to an \underline{integer} power. The system expands this power expression into a product of the corresponding number of terms inserting dummy indices if necessary. The following examples explain the transformations occurring on power expressions (system output is indicated with an \f{-->}): {\small\begin{verbatim} SCALOP A; A**2; --> A*A VECOP V; V**4; --> V(IDX1)*V(IDX1)*V(IDX2)*V(IDX2) TENSOP C(2); C**2; --> C(IDX3,IDX4)*C(IDX3,IDX4) \end{verbatim}} Note in particular the way how the system interprets powers of tensor operators which is different from the notation used in matrix algebra. 6. Quotients of operators are only defined between scalar operator expressions. The system transforms the quotient of 2 scalar operators into the product of the first operator times the inverse of the second one. {\small\begin{verbatim} SCALOP A,B; A / B; -1 A *( B ) \end{verbatim}} 7. Combining the last 2 rules explains the way how the system handles negative powers of operators: \noindent {\small\begin{verbatim} SCALOP B; B**(-3); -1 -1 -1 --> (B )*(B )*(B ) \end{verbatim}} The method of inserting dummy indices and expanding powers of operators has been chosen to facilitate the handling of complicated operator expressions and particularly their application on states. However it may be useful to get rid of these dummy indices in order to enhance the readability of the system's final output. For this purpose the switch \f{CONTRACT}\ttindex{CONTRACT} has to be turned on (\f{CONTRACT} is normally set to \f{OFF}). The system in this case contracts over dummy indices reinserting the \f{DOT} operator and reassembling the expanded powers. However due to the predefined operator ordering the system may not remove all the dummy indices introduced previously. %%file). \subsection{Special functions} \subsubsection{Commutation relations} If two PHYSOPs have been declared noncommutative using the (redefined) \f{NONCOM}\ttindex{NONCOM} statement, it is possible to introduce in the environment elementary (anti-) commutation relations between them. For this purpose, two scalar operators \f{COMM}\ttindex{COMM} and \f{ANTICOMM}\ttindex{ANTICOMM} are available. These operators are used in conjunction with \f{LET} statements. Example: {\small\begin{verbatim} SCALOP A,B,C,D; LET COMM(A,B)=C; FOR ALL N,M LET ANTICOMM(A(N),B(M))=D; VECOP U,V,W; PHYSINDEX X,Y,Z; FOR ALL X,Y LET COMM(V(X),W(Y))=U(Z); \end{verbatim}} Note that if special indices are used as dummy variables in \f{FOR ALL ... LET} constructs then these indices should have been declared previously using the \f{PHYSINDEX} command.\ttindex{PHYSINDEX} Every time the system encounters a product term involving two noncommutative operators which have to be reordered on account of the given operator ordering, the list of available (anti-) commutators is checked in the following way: First the system looks for a \underline{commutation} relation which matches the product term. If it fails then the defined \underline{anticommutation} relations are checked. If there is no successful match the product term \f{A*B} is replaced by: \\ {\small\begin{verbatim} A*B; --> COMM(A,B) + B*A \end{verbatim}} so that the user may introduce the commutation relation later on. The user may want to force the system to look for \underline{anticommutators} only; for this purpose a switch \f{ANTICOM} \ttindex{ANTICOM} is defined which has to be turned on ( \f{ANTICOM} is normally set to \f{OFF}). In this case, the above example is replaced by: {\small\begin{verbatim} ON ANTICOM; A*B; --> ANTICOMM(A,B) - B*A \end{verbatim}} For the calculation of (anti-) commutators between complex operator expressions, the functions \f{COMMUTE}\ttindex{COMMUTE} and \f{ANTICOMMUTE}\ttindex{ANTICOMMUTE} have been defined. {\small\begin{verbatim} VECOP P,A,K; PHYSINDEX X,Y; FOR ALL X,Y LET COMM(P(X),A(Y))=K(X)*A(Y); COMMUTE(P**2,P DOT A); \end{verbatim}} \subsubsection{Adjoint expressions} As has been already mentioned, for each operator and state defined using the declaration commands, the system generates automatically the corresponding adjoint operator. For the calculation of the adjoint representation of a complicated operator expression, a function \f{ADJ}\ttindex{ADJ} has been defined. {\small\begin{verbatim} SCALOP A,B; ADJ(A*B); + + --> (A )*(B ) \end{verbatim}} \subsubsection{Application of operators on states} A function \f{OPAPPLY}\ttindex{OPAPPLY} has been defined for the application of operators to states. It has two arguments and is used in the following combinations: {\bf (i)} \f{LET OPAPPLY(}{\it operator, state}\f{) =} {\it state}; This is to define a elementary action of an operator on a state in analogy to the way elementary commutation relations are introduced to the system. {\small\begin{verbatim} SCALOP A; STATE U; FOR ALL N,P LET OPAPPLY((A(N),U(P))= EXP(I*N*P)*U(P); \end{verbatim}} {\bf (ii)} \f{LET OPAPPLY(}{\it state, state}\f{) =} {\it scalar exp.}; This form is to define scalar products between states and normalisation conditions. {\small\begin{verbatim} STATE U; FOR ALL N,M LET OPAPPLY(U(N),U(M)) = IF N=M THEN 1 ELSE 0; \end{verbatim}} {\bf (iii)} {\it state} \f{:= OPAPPLY(}{\it operator expression, state}); In this way, the action of an operator expression on a given state is calculated using elementary relations defined as explained in {\bf (i)}. The result may be assigned to a different state vector. {\bf (iv)} \f{OPAPPLY(}{\it state}\f{, OPAPPLY(}{\it operator expression, state}\f{))}; This is the way how to calculate matrix elements of operator expressions. The system proceeds in the following way: first the rightmost operator is applied on the right state, which means that the system tries to find an elementary relation which match the application of the operator on the state. If it fails the system tries to apply the leftmost operator of the expression on the left state using the adjoint representations. If this fails also, the system prints out a warning message and stops the evaluation. Otherwise the next operator occuring in the expression is taken and so on until the complete expression is applied. Then the system looks for a relation expressing the scalar product of the two resulting states and prints out the final result. An example of such a calculation is given in the test file. The infix version of the \f{OPAPPLY} function is the vertical bar $\mid$. It is \underline{right} associative and placed in the precedence list just above the minus ($-$) operator. \chapter{PM: A REDUCE pattern matcher} \label{PM} \typeout{{PM: A REDUCE pattern matcher}} {\footnotesize \begin{center} Kevin McIsaac \\ The University of Western Australia \\ Australia\\[0.05in] e--mail: kevin@wri.com \end{center} } \ttindex{PM} PM is a general pattern matcher similar in style to those found in systems such as SMP and Mathematica. A template is any expression composed of literal elements ({\em e.g.\ }{\tt 5}, {\tt a} or {\tt a+1}) and specially denoted pattern variables ({\em e.g.\ }{\tt ?a} or {\tt ??b}). Atoms beginning with `?' are called generic variables and match any expression. Atoms beginning with `??' are called multi-generic variables and match any expression or any sequence of expressions including the null or empty sequence. A sequence is an expression of the form `[a1, a2,...]'. When placed in a function argument list the brackets are removed, {\em i.e.\ }f([a,1]) $\rightarrow$ f(a,1) and f(a,[1,2],b) $\rightarrow$ f(a,1,2,b). A template is said to match an expression if the template is literally equal to the expression or if by replacing any of the generic or multi-generic symbols occurring in the template, the template can be made to be literally equal to the expression. These replacements are called the bindings for the generic variables. A replacement is an expression of the form {\tt exp1 -> exp2}, which means exp1 is replaced by exp2, or {\tt exp1 --> exp2}, which is the same except exp2 is not simplified until after the substitution for exp1 is made. If the expression has any of the properties; associativity, commutativity, or an identity element, they are used to determine if the expressions match. If an attempt to match the template to the expression fails the matcher backtracks, unbinding generic variables, until it reached a place were it can make a different choice. The matcher also supports semantic matching. Briefly, if a subtemplate does not match the corresponding subexpression because they have different structures then the two are equated and the matcher continues matching the rest of the expression until all the generic variables in the subexpression are bound. The equality is then checked. This is controlled by the switch \ttindex{SEMANTIC}{\tt semantic}. By default it is on. \section{The Match Function} {\tt M(exp,template)}\ttindex{M} The template is matched against the expression. If the template is literally equal to the expression {\tt T} is returned. If the template is literally equal to the expression after replacing the generic variables by their bindings then the set of bindings is returned as a set of replacements. Otherwise {\tt NIL} is returned. {\small\begin{verbatim} OPERATOR F; M(F(A),F(A)); T M(F(A,B),F(A,?A)); {?A->B} M(F(A,B),F(??A)); {??A->[A,B]} m(a+b+c,c+?a+?b); {?a->a,?b->b} m(a+b+c,b+?a); {?a->a + c} \end{verbatim}} This example shows the effects of semantic matching, using the associativity and commutativity of {\tt +}. \section {Qualified Matching} A template may be qualified by the use of the conditional operator {\tt \_=',}\ttindex{\_=} standing for {\bf such that}. When a such-that condition is encountered in a template it is held until all generic variables appearing in logical-exp are bound. On the binding of the last generic variable logical-exp is simplified and if the result is not {\tt T} the condition fails and the pattern matcher backtracks. When the template has been fully parsed any remaining held such-that conditions are evaluated and compared to {\tt T}. {\small\begin{verbatim} load_package pm; operator f; if (m(f(a,b),f(?a,?b_=(?a=?b)))) then write "yes" else write"no"; no m(f(a,a),f(?a,?b_=(?a=?b))); {?B->A,?A->A} \end{verbatim}} {\typeout {This is not true}} \section{Substituting for replacements} The operator {\tt S}\ttindex{S} substitutes the replacements in an expression. {\tt S(exp,{temp1->sub1,temp2->sub2,...},rept, depth);} will do the substitutions for a maximum of {\tt rept} and to a depth of {\tt depth}, using a breadth-first search and replace. {\tt rept} and {\tt depth} may be omitted when they default to 1 and infinity. {\tt SI(exp,{temp1->sub1,temp2->sub2,...}, depth)}\ttindex{SI} will substitute infinitely many times until expression stops changing. {\tt SD(exp,{temp1->sub1,temp2->sub2,...},rept, depth)}\ttindex{SD} is a depth-first version of {\tt S}. {\small\begin{verbatim} s(f(a,b),f(a,?b)->?b^2); 2 b s(a+b,a+b->a*b); a*b operator nfac; s(nfac(3),{nfac(0)->1,nfac(?x)->?x*nfac(?x-1)}); 3*nfac(2) s(nfac(3),{nfac(0)->1,nfac(?x)->?x*nfac(?x-1)},2); 6*nfac(1) si(nfac(4),{nfac(0)->1,nfac(?x)->?x*nfac(?x-1)}); 24 s(a+b+f(a+b),a+b->a*b,inf,0); f(a + b) + a*b \end{verbatim}} \section{Programming with Patterns} There are also facilities to use this pattern-matcher as a programming language. The operator {\tt :-}\ttindex{:-} can be used to declare that while simplifying all matches of a template should be replaced by some expression. The operator {\tt ::-} is the same except that the left hand side is not simplified. {\small\begin{verbatim} operator fac, gamma; fac(?x_=Natp(?x)) ::- ?x*fac(?x-1); HOLD(FAC(?X-1)*?X) fac(0) :- 1; 1 fac(?x) :- Gamma(?x+1); GAMMA(?X + 1) fac(3); 6 fac(3/2); GAMMA(5/2) \end{verbatim}} \chapter[QSUM: {\slshape q}-hypergeometric sums]% {QSUM : Package for {\slshape q}-hypergeometric sums} \label{QSUM} \typeout{{QSUM : Package for summation of $q$-hypergeometric terms}} \newcommand{\funkdef}[3]{\left\{\!\!\!\begin{array}{cc} #1 & \!\!\!\mbox{\rm{if} $#2$ } \\ #3 & \!\!\!\mbox{\rm{otherwise}} \end{array} \right.} \newcommand{\funkdefff}[6]{\left\{\begin{array}{ccc} #1 && \mbox{{if} $#2$ } \\ #3 && \mbox{{if} $#4$ } \\ #5 && \mbox{{if} $#6$ } \end{array} \right.} \newcommand{\qphihyp}[5]{{}_{#1}\phi_{#2}\left.\left[\begin{array}{c} #3 \\ #4 \end{array}\right|q,#5\right]} \newcommand{\qpsihyp}[5]{{}_{#1}\psi_{#2}\left.\left[\begin{array}{c} #3 \\ #4 \end{array}\right|q,#5\right]} \newcommand{\hyp}[5]{{}_{#1}F_{#2}\left.\left[\begin{array}{c} #3 \\ #4 \end{array}\right|#5\right]} \newcommand{\fcn}[2]{{\mathrm #1}(#2)} \newcommand{\ifcn}[3]{{\mathrm #1}_{#2}(#3)} \newcommand{\qgosper}{$q$-Gosper\ } \newcommand{\qgosperalg}{\qgosper algorithm\ } \newcommand{\qzeilalg}{$q$-Zeilberger algorithm\ } \newcommand{\qfac}[2]{\left(#1;\,q\right)_{#2}} \newcommand{\qatom}[1]{\left(#1;\,q\right)_{\infty}} %\newcommand{\qbinomial}[2]{\left(\begin{array}{c}#1\\#2\end{array}\right)_q} %\newcommand{\binomial}[2]{\left(\begin{array}{c}#1\\#2\end{array}\right)} \newcommand{\binomial}[2]{{#1 \choose #2}} \newcommand{\qbinomial}[2]{{{#1 \choose #2}\!}_q} \newcommand{\qfactorial}[2]{} \newcounter{redprompt} {\setcounter{redprompt}{0}} \newcommand{\redprompt}{\stepcounter{redprompt}\theredprompt:} \newenvironment{redoutput}{\small\begin{alltt}}{\end{alltt}\noindent{}} {\footnotesize \begin{center} Harald B\"oing \\ Wolfram Koepf \\ Konrad-Zuse-Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D-14195 Berlin-Dahlem \\ e-mail: koepf@zib.de \end{center} } \ttindex{QSUM} %\markboth{CHAPTER \ref{QSUM}. QSUM: SUMMATION OF Q-HYPERGEOMETRIC TERMS}{} %\thispagestyle{myheadings} This package is an implementation of the $q$-analogues of Gosper's and Zeilberger's % \footnote{The {\tt ZEILBERG} package (Chap. \ref{ZEILBERG} p. \pageref{ZEILBERG}, see also \cite{Koepf:95}) contains the hypergeometric versions.} % algorithm for indefinite and definite summation of $q$-hypergeometric terms, respectively. An expression $a_k$ is called a {\sl $q$-hypergeometric term}, if $a_{k}/a_{k-1}$ is a rational function with respect to $q^k$. Most $q$-terms are based on the {\sl $q$-shifted factorial} or {\sl qpochhammer}. Other typical $q$-hypergeometric terms are ratios of products of powers, $q$-factorials, $q$-binomial coefficients, and $q$-shifted factorials that are integer-linear in their arguments. \\ The package is loaded with {\tt load\_package qsum}. \section{Elementary {\slshape q}-Functions} The package supports the input of the following elementary {\slshape q}-functions: \begin{itemize} \item {\verb@qpochhammer(a,q,infinity)@} \ttindex{QPOCHHAMMER} \[ \qfac{a}{\infty}:= \prod_{j=0}^{\infty}{\left(1-a\,q^j\right)} \] \item {\verb@qpochhammer(a,q,k)@} \[ \qfac{a}{k}:= \funkdefff{\prod_{j=0}^{k-1}{\left(1-a\,q^j\right)}}% {k>0}{1}{k=0}{\prod_{j=1}^{k}{\left(1-a\,q^{-j}\right)^{-1}}}{k<0} \] \item {\verb@qbrackets(k,q)@} \ttindex{QBRACKETS} \[ {}[q,k]:=\frac{q^k-1}{q-1} \] \item {\verb@qfactorial(k,q)@} \ttindex{QFACTORIAL} \[ {}[k]_q!:= \frac{\qfac{q}{k}}{(1-q)^k} \] \item {\verb@qbinomial(n,k,q)@} \ttindex{QBINOMIAL} \[ \qbinomial{n}{k}:= \frac{\qfac{q}{n}}{\qfac{q}{k}\cdot\qfac{q}{n-k}} \] \item {\protect\verb@qphihyperterm({a1,a2,...,ar},{b1,b2,...,bs},q,z,k)@} \ttindex{QPHIHYPERTERM} \[ \sum_{k=0}^{\infty}{\frac{\qfac{a_1,a_2,\ldots,a_r}{k}} {\qfac{b_1,b_2,\ldots,b_s}{k}} \,\frac{z^k}{\qfac{q}{k}}\,\left[(-1)^k\, q^{\binomial{k}{2}}\right]^{1+s-r}} \] \item {\protect\verb@qpsihyperterm({a1,a2,...,ar},{b1,b2,...,bs},q,z,k)@} \ttindex{QPSIHYPERTERM} \[\sum_{k=-\infty}^{\infty}{\frac{\qfac{a_1,a_2,\ldots,a_r}{k}} {\qfac{b_1,b_2,\ldots,b_s}{k}}\,z^k\, \left[(-1)^k\,q^{\binomial{k}{2}}\right]^{s-r}} \] \end{itemize} where $\qfac{a_1,a_2,\ldots,a_r}{k}$ stands for the product $\prod_{j=1}^r{\qfac{a_j}{k}}$. \section{The {\ttfamily QGOSPER} operator} The {\tt qgosper} operator is an implementation of the $q$-Gosper algorithm \cite{Koornwinder:93}. \begin{itemize} \item {\verb@qgosper(a,q,k)@} determines a $q$-hypergeometric antidifference. (By default it returns a {\sl downward} antidifference, which may be changed by the switch {\verb@qgosper_down@}.) If it does not return a \textsl{q}-hypergeometric antidifference, then such an antidifference does not exist. \item {\verb@qgosper(a,q,k,m,n)@} determines a closed formula for the definite sum \[\sum\limits_{k=m}^n a_k\] using the $q$-analogue of Gosper's algorithm. This is only successful if \textsl{q}-Gosper's algorithm applies. \end{itemize} {\bf Example:} {\small\begin{verbatim} 1: qgosper(qpochhammer(a,q,k)*q^k/qpochhammer(q,q,k),q,k); k (q *a - 1)*qpochhammer(a,q,k) ------------------------------- (a - 1)*qpochhammer(q,q,k) \end{verbatim}} \section{The {\ttfamily QSUMRECURSION} operator} \label{QSUMRECURSION} The \f{QSUMRECURSION\ttindex{QSUMRECURSION}} operator is an implementation of the $q$-Zeilberger algorithm \cite{Koornwinder:93}. It tries to determine a homogeneous recurrence equation for $\fcn{summ}{n}$ wrt. $n$ with polynomial coefficients (in $n$), where \[ \fcn{summ}{n}:= \sum_{k=-\infty}^{\infty}{\fcn{f}{n,k}}. \] There are three different ways to pass a summand $\fcn{f}{n,k}$ to {\verb@qsumrecursion@}: \begin{itemize} \item {\verb@qsumrecursion(f,q,k,n)@}, where {\tt f} is a $q$-hypergeometric term wrt. {\tt k} and {\tt n}, {\tt k} is the summation variable and {\tt n} the recursion variable, {\tt q} is a symbol. \item {\verb@qsumrecursion(upper,lower,q,z,n)@} is a shortcut for \\ {\verb@qsumrecursion(qphihyperterm(upper,lower,q,z,k),q,k,n)@} \item {\verb@qsumrecursion(f,upper,lower,q,z,n)@} is a similar shortcut for\\ {\verb@qsumrecursion(f*qphihyperterm(upper,lower,q,z,k),q,k,n)@}, \end{itemize} i.\,e.\ {\tt upper} and {\tt lower} are lists of upper and lower parameters of the generalized $q$-hypergeometric function. The third form is handy if you have any additional factors. For all three instances it is possible to pass the order, if known in advance, as additional argument at the end of the parameter sequence. You can also specifiy a range by a list of two positive integers, the first one specifying the lowest and the second one the highest order. By default \f{QSUMRECURSION} will search for recurrences of order from 1 to 5. Usually it uses {\tt summ} as name for the summ-function. If you want to change this behaviour then use the following syntax: \f{QSUMRECURSION(f,q,k,s(n))}. {\small\begin{verbatim} 2: qsumrecursion(qpochhammer(q^(-n),q,k)*z^k / qpochhammer(q,q,k),q,k,n); n n - ((q - z)*summ(n - 1) - q *summ(n)) \end{verbatim}} \section{Global Variables and Switches} There are several switches defined in the \f{QSUM} package. Please take a look in the accompanying documentation file {\tt qsum.tex} in \$REDUCEPATH/packages/. \\ The most important switches are: \begin{itemize} \item \verb@qgosper_down@, default setting is on. It determines whether \verb@qgosper@ returns a downward or an upward antidifference $g_k$ for the input term $a_k$, .\,e.\ $a_k=g_k-g_{k-1}$ or $a_k=g_{k+1}-g_k$ respectively. \item \verb@qsumrecursion_certificate@, default off. As Zeilberger's algorithm delivers a recurrence equation for a $q$-hypergeometric term $\mathrm{f}(n,k)$ this switch is used to get all necessary informations for proving this recurrence equation. If it is set on, instead of simply returning the resulting recurrence equation (for the sum)---if one exists---\verb@qsumrecursion@ returns a list \verb@{rec,cert,f,k,dir}@ with five items: The first entry contains the recurrence equation, while the other items enable you to prove the recurrence a posteriori by rational arithmetic. If we denote by \verb@r@ the recurrence \verb@rec@ where we substituted the \verb@summ@-function by the input term \verb@f@ (with the corresponding shifts in \verb@n@) then the following equation is valid: \[ \verb@r = cert*f - sub(k=k-1,cert*f)@ \] or \[ \verb@r = sub(k=k+1,cert*f) - cert*f@ \] if \verb@dir=downward_antidifference@ or \verb@dir=upward_antidifference@ respectively. \end{itemize} There is one global variable: \begin{itemize} \item \verb@qsumrecursion_recrange!*@ controls for which recursion orders the procedure \verb@qsumrecursion@ looks. It has to be a list with two entries, the first one representing the lowest and the second one the highest order of a recursion to search for. By default it is set to \verb@{1,5}@. \end{itemize} \chapter[RANDPOLY: Random polynomials]% {RANDPOLY: A random polynomial generator} \label{RANDPOLY} \typeout{{RANDPOLY: A random polynomial generator}} {\footnotesize \begin{center} Francis J. Wright \\ School of Mathematical Sciences, Queen Mary and Westfield College \\ University of London \\ Mile End Road \\ London E1 4NS, England \\[0.05in] e--mail: F.J.Wright@QMW.ac.uk \end{center} } \ttindex{RANDPOLY} The operator {\tt RANDPOLY}\ttindex{RANDPOLY} requires at least one argument corresponding to the polynomial variable or variables, which must be either a single expression or a list of expressions. In effect, {\tt RANDPOLY} replaces each input expression by an internal variable and then substitutes the input expression for the internal variable in the generated polynomial (and by default expands the result as usual). The rest of this document uses the term ``variable'' to refer to a general input expression or the internal variable used to represent it, and all references to the polynomial structure, such as its degree, are with respect to these internal variables. The actual degree of a generated polynomial might be different from its degree in the internal variables. By default, the polynomial generated has degree 5 and contains 6 terms. Therefore, if it is univariate it is dense whereas if it is multivariate it is sparse. \section{Optional arguments} Other arguments can optionally be specified, in any order, after the first compulsory variable argument. All arguments receive full algebraic evaluation, subject to the current switch settings etc. The arguments are processed in the order given, so that if more than one argument relates to the same property then the last one specified takes effect. Optional arguments are either keywords or equations with keywords on the left. In general, the polynomial is sparse by default, unless the keyword {\tt dense}\index{randpoly ! {\tt dense}} is specified as an optional argument. (The keyword {\tt sparse}\index{randpoly ! {\tt sparse}} is also accepted, but is the default.) The default degree can be changed by specifying an optional argument of the form\index{randpoly ! {\tt degree}} \begin{center} {\tt degree = {\it natural number}}. \end{center} In the multivariate case this is the total degree, {\em i.e.\ }the sum of the degrees with respect to the individual variables. More complicated monomial degree bounds can be constructed by using the coefficient function described below to return a monomial or polynomial coefficient expression. Moreover, {\tt randpoly} respects internally the \REDUCE\ ``asymptotic'' commands {\tt let}, {\tt weight} {\em etc.\ }described in section~\ref{sec-asymp}, which can be used to exercise additional control over the polynomial generated. In the sparse case (only), the default maximum number of terms generated can be changed by specifying an optional argument of the form\index{randpoly ! {\tt terms}} \begin{center} {\tt terms = {\it natural number}}. \end{center} The actual number of terms generated will be the minimum of the value of {\tt terms} and the number of terms in a dense polynomial of the specified degree, number of variables, {\em etc.} \section{Advanced use of RANDPOLY} The default order (or minimum or trailing degree) can be changed by specifying an optional argument of the form\index{randpoly ! {\tt ord}} \begin{center} {\tt ord = {\it natural number}}. \end{center} The order normally defaults to 0. The input expressions to {\tt randpoly} can also be equations, in which case the order defaults to 1 rather than 0. Input equations are converted to the difference of their two sides before being substituted into the generated polynomial. This makes it easy to generate polynomials with a specified zero -- for example \begin{center}\tt randpoly(x = a); \end{center} generates a polynomial that is guaranteed to vanish at $x = a$, but is otherwise random. The operator {\tt randpoly} accepts two further optional arguments in the form of equations with the keywords {\tt coeffs} \index{randpoly ! {\tt coeffs}} and {\tt expons}\index{randpoly ! {\tt expons}} on the left. The right sides of each of these equations must evaluate to objects that can be applied as functions of no variables. These functions should be normal algebraic procedures; the {\tt coeffs} procedure may return any algebraic expression, but the {\tt expons} procedure must return an integer. The values returned by the functions should normally be random, because it is the randomness of the coefficients and, in the sparse case, of the exponents that makes the constructed polynomial random. A convenient special case is to use the function {\tt rand} on the right of one or both of these equations; when called with a single argument {\tt rand} returns an anonymous function of no variables that generates a random integer. The single argument of {\tt rand} should normally be an integer range in the form $a~..~b$, where $a$, $b$ are integers such that $a < b$. For example, the {\tt expons} argument might take the form \begin{center}\tt expons = rand(0~..~n) \end{center} where {\tt n} will be the maximum degree with respect to each variable {\em independently}. In the case of {\tt coeffs} the lower limit will often be the negative of the upper limit to give a balanced coefficient range, so that the {\tt coeffs} argument might take the form \begin{center}\tt coeffs = rand(-n~..~n) \end{center} which will generate random integer coefficients in the range $[-n,n]$. Further information on the the auxiliary functions of RANDPOLY can be found in the extended documentation and examples. \section{Examples} \label{sec:Examples} {\small\begin{verbatim} randpoly(x); 5 4 3 2 - 54*x - 92*x - 30*x + 73*x - 69*x - 67 randpoly({x, y}, terms = 20); 5 4 4 3 2 3 3 31*x - 17*x *y - 48*x - 15*x *y + 80*x *y + 92*x 2 3 2 2 4 3 2 + 86*x *y + 2*x *y - 44*x + 83*x*y + 85*x*y + 55*x*y 5 4 3 2 - 27*x*y + 33*x - 98*y + 51*y - 2*y + 70*y - 60*y - 10 \end{verbatim}} \newpage {\small\begin{verbatim} randpoly({x, sin(x), cos(x)}); 4 3 3 sin(x)*( - 4*cos(x) - 85*cos(x) *x + 50*sin(x) 2 - 20*sin(x) *x + 76*sin(x)*x + 96*sin(x)) \end{verbatim}} \chapter[RATAPRX: Rational Approximations]% {RATAPRX : Rational Approximations Package} \label{RATAPRX} \typeout{{RATAPRX : Rational Approximations Package}} {\footnotesize \begin{center} Lisa Temme\\ Wolfram Koepf\\ Konrad-Zuse-Zentrum f\"ur Informationstechnik Berlin\\ Takustra\"se 7 \\ D-14195 Berlin-Dahlem, Germany \\ e-mail: koepf@zib.de \end{center} } \ttindex{RATAPRX} This package provides functions to \begin{itemize} \item convert rational numbers in their periodic representation and vice versa, \item to compute continued fractions and \item to compute the Pad\'{e} approximant of a function. \end{itemize} The package can be loaded using {\tt load\_package rataprx;} it supersedes the {\tt contfr} package. \section{} \subsection{Periodic Representation} The function \f{rational2periodic(n)\ttindex{RATIONAL2PERIODIC}} converts a rational number {\tt n} in its periodic representation. For example $59/70$ is converted to $0.8\overline{428571}$. \\ Depending on the print function of your \REDUCE\ system, calling the function \f{rational2periodic} might result in an expression of the form {\tt periodic(\{a,b\},\{c$_1$,...,c$_n$\})\ttindex{PERIODIC}}. {\tt a} and {\tt b} is the non-periodic part of the rational number {\tt n} and {\tt c$_1$,...,c$_n$} are the digits of the periodic part. In this case $59/70$ would result in {\tt periodic(\{8,10\},\{4,2,8,5,7,1\})}. \\ The function \f{periodic2rational(periodic(\{a,b\},\{c$_1$,...,c$_n$\})) \ttindex{PERIODIC2RATIONAL}} is the inverse function and computes the rational expression for a periodic one. Note that {\tt b} is 1,-1 or a integer multiple of 10. If {\tt a} is zero, then the input number {\tt b} indicates how many places after the decimal point the period occurs. {\small\begin{verbatim} rational2periodic(6/17); periodic({0,1},{3,5,2,9,4,1,1,7,6,4,7,0,5,8,8,2}) periodic2rational(ws); 6 ---- 17 \end{verbatim}} \subsection{Continued Fractions} A continued fraction (see \cite{Baker:81a} \S 4.2) has the general form \[b_0 + \frac{a_1}{b_1 + \frac{a_2}{b_2+ \frac{a_3}{b_3 + \ldots }}} \;.\] A more compact way of writing this is as \[b_0 + \frac{a_1|}{|b_1} + \frac{a_2|}{|b_2} + \frac{a_3|}{|b_3} + \ldots\,.\] \\ This is represented in \REDUCE\ as \[{\tt contfrac({\mbox{\sl Rational\hspace{2mm} approximant}}, \{b_0, \{a_1,b_1\}, \{a_2,b_2\},.....\}).\ttindex{CONTFRAC} }\] There are four different functions to determine the continued fractions for real numbers and functions {\tt f} in the variable {\tt var}: \begin{center} {\tt \begin{tabular}{l l} cfrac(number); & cfrac(number,length); \\ cfrac(f, var); & cfrac(f, var, length); \end{tabular}} \\[1mm] \end{center} \ttindex{CFRAC} The {\tt length} argument is optional and specifies the number of ordered pairs $\{a_i,b_i\}$ to be returned. It's default value is five. {\small\begin{verbatim} cfrac pi; 1146408 contfrac(---------), 364913 {3,{1,7},{1,15},{1,1},{1,292},{1,1},{1,1},{1,1}, {1,2},{1,1}}) \end{verbatim}} \newpage {\small\begin{verbatim} cfrac((x+2/3)^2/(6*x-5),x); 2 9*x + 12*x + 4 6*x + 13 24*x - 20 contfrac(-----------------,{----------,{1,-----------}}) 54*x - 45 36 9 cfrac(e^x,x); 3 2 x + 9*x + 36*x + 60 contfrac(-----------------------, 2 3*x - 24*x + 60 {1,{x,1},{ - x,2},{x,3},{ - x,2},{x,5}}) \end{verbatim}} \subsection{Pad\'{e} Approximation} The Pad\'{e} approximant represents a function by the ratio of two polynomials. The coefficients of the powers occuring in the polynomials are determined by the coefficients in the Taylor series expansion of the function (see \cite{Baker:81a}). Given a power series \[ f(x) = c_0 + c_1 (x-h) + c_2 (x-h)^2 \ldots \] and the degree of numerator, $n$, and of the denominator, $d$, the {\tt pade} function finds the unique coefficients $a_i,\, b_i$ in the Pad\'{e} approximant \[ \frac{a_0+a_1 x+ \cdots + a_n x^n}{b_0+b_1 x+ \cdots + b_d x^d} \; .\] The function \f{pade(f, x, h ,n ,d)\ttindex{PAD\'{E}}} takes as input the function {\tt f} in the variable {\tt x} to be approximated , where {\tt h} is the point at which the approximation is evaluated. {\tt n} and {\tt d} are the (specified) degrees of the numerator and the denominator. It returns the Pad\'{e} Approximant, ie. a rational function. \par Error Messages may occur in the following different cases: \begin{itemize} \item The Taylor series expansion for the function {\tt f} has not yet been implemented in the \REDUCE\ Taylor Package. \item A Pad\'{e} Approximant of this function does not exist. \item A Pad\'{e} Approximant of this order (ie. the specified numerator and denominator orders) does not exist. Please note, there might exist an approximant of a different order. \end{itemize} \newpage {\small\begin{verbatim} pade(sin(x),x,0,3,3); 2 x*( - 7*x + 60) ------------------ 2 3*(x + 20) pade(tanh(x),x,0,5,5); 4 2 x*(x + 105*x + 945) ----------------------- 4 2 15*(x + 28*x + 63) pade(exp(1/x),x,0,5,5); ***** no Pade Approximation exists pade(factorial(x),x,1,3,3); ***** not yet implemented 30: pade(sin(x)/x^2,x,0,10,0); ***** Pade Approximation of this order does not exist 31: pade(sin(x)/x^2,x,0,10,2); 10 8 6 4 2 - x + 110*x - 7920*x + 332640*x - 6652800*x + 39916800 -------------------------------------------------------------- 39916800*x \end{verbatim}} \chapter[REACTEQN: Chemical reaction equations]% {REACTEQN: Support for chemical reaction equations} \label{REACTEQN} \typeout{{REACTEQN: Support for chemical reaction equations}} {\footnotesize \begin{center} Herbert Melenk \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: melenk@zib.de \end{center} } \ttindex{REACTEQN} The \REDUCE\ package REACTEQN allows one to transform chemical reaction systems into ordinary differential equation systems corresponding to the laws of pure mass action. It provides the single function {\small\begin{verbatim} reac2ode { <reaction> [,<rate> [,<rate>]] [,<reaction> [,<rate> [,<rate>]]] .... }; \end{verbatim}} A rate is any \REDUCE\ expression, and two rates are applicable only for forward and backward reactions. A reaction is coded as a linear sum of the series variables, with the operator $->$ for forward reactions and $<>$ for two-way reactions. The result is a system of explicit ordinary differential equations with polynomial righthand sides. As side effect the following variables are set: \newpage \begin{description} \item[{\tt rates}] \index{reacteqn ! {\tt rates}} A list of the rates in the system. \item[{\tt species}] \index{reacteqn ! {\tt species}} A list of the species in the system. \item[{\tt inputmat}] \index{reacteqn ! {\tt inputmat}} A matrix of the input coefficients. \item[{\tt outputmat}] \index{reacteqn ! {\tt outputmat}} A matrix of the output coefficients. \end{description} In the matrices the row number corresponds to the input reaction number, while the column number corresponds to the species index. If the rates are numerical values, it will be in most cases appropriate to select a \REDUCE\ evaluation mode for floating point numbers. {\tt Inputmat} and {\tt outputmat} can be used for linear algebra type investigations of the reaction system. The classical reaction matrix is the difference of these matrices; however, the two matrices contain more information than their differences because the appearance of a species on both sides is not reflected by the reaction matrix. \chapter{REDLOG: Logic System} \label{REDLOG} \typeout{{REDLOG: Logic System}} {\footnotesize \begin{center} Andreas Dolzmann \\ Thomas Sturm \\ University of Passau, Germany \\ e-mail: dolzmann@uni-passau.de, sturm@uni-passau.de \end{center} } \ttindex{REDLOG} \section{Introduction} This package extends \REDUCE\ to a computer logic system implementing symbolic algorithms on first-order formulas wrt.~temporarily fixed first-order languages and theories. \subsection{Contexts} REDLOG is designed for working with several languages and theories in the sense of first-order logic. Both a language and a theory make up a context. There are the following contexts available: \begin{description} \item[\textsc{OFSF}] \textsc{OF} stands for \emph{ordered fields}, which is a little imprecise. The quantifier elimination actually requires the more restricted class of \emph{real closed fields}, while most of the tool-like algorithms are generally correct for ordered fields. One usually has in mind real numbers with ordering when using \textsc{OFSF}. \item[\textsc{DVFSF}] \emph{Discretely valued fields}. This is for computing with formulas over classes of $p$-adic valued extension fields of the rationals, usually the fields of $p$-adic numbers for some prime $p$. \item[\textsc{ACFSF}] \emph{Algebraically closed fields} such as the complex numbers. \end{description} \subsection{Overview} REDLOG origins from the implementation of quantifier elimination procedures. Successfully applying such methods to both academic and real-world problems, the authors have developed over the time a large set of formula-manipulating tools, many of which are meanwhile interesting in their own right: \begin{itemize} \item Numerous tools for comfortably inputing, decomposing, and analyzing formulas. \item Several techniques for the \emph{simplification} of formulas. \item Various \emph{normal form computations}. The \emph{\textsc{CNF}/\textsc{DNF}} computation includes both Boolean and algebraic simplification strategies. The \emph{prenex normal form} computation minimizes the number of quantifier changes. \item \emph{Quantifier elimination} computes quantifier-free equivalents for given first-order formulas. For \textsc{OFSF} and \textsc{DVFSF} the formulas have to obey certain degree restrictions. \item The context \textsc{OFSF} allows a variant of quantifier elimination called \emph{generic quantifier elimination}: There are certain non-degeneracy assumptions made on the parameters, which considerably speeds up the elimination. \item The contexts \textsc{OFSF} and \textsc{DVFSF} provide variants of (generic) quantifier elimination that additionally compute \emph{answers} such as satisfying sample points for existentially quantified formulas. \item \textsc{OFSF} includes linear \emph{optimization} techniques based on quantifier elimination. \end{itemize} To avoid ambiguities with other packages, all \textsc{REDLOG} functions and switches are prefixed by ``\texttt{RL}''. The package is loaded by typing: \qquad {\tt load\_package redlog;} \\ It is recommended to read the documentation which comes with this package. This manual chapter gives an overview on the features of \textsc{REDLOG}, which is by no means complete. \section{Context Selection} The context to be used has to be selected explicitly. One way to do this is using the command \f{RLSET}\ttindex{RLSET}. As argument it takes one of the valid choices \f{ACFSF}\ttindex{ACFSF} (algebraically closed fields standard form), \f{OFSF}\ttindex{OFSF} (ordered fields standard form), and \f{DVFSF}\ttindex{DVFSF} (discretely valued fields standard form). By default, \f{DVFSF}\ttindex{DVFSF} computes uniformly over the class of all $p$-adic valued fields. For the sake of efficiency, this can be restricted by means of an extra \f{RLSET}\ttindex{RLSET} argument. \f{RLSET}\ttindex{RLSET} returns the old setting as a list. \section{Format and Handling of Formulas} \subsection{First-order Operators} REDLOG knows the following operators for constructing Boolean combinations and quantifications of atomic formulas: \begin{center} \begin{tabular}{llll} \f{NOT}\ttindex{NOT}: Unary & \f{AND}\ttindex{AND}: N-ary Infix & \f{OR}\ttindex{OR}: N-ary Infix & \f{IMPL}\ttindex{IMPL}: Binary Infix \\ \f{REPL}\ttindex{REPL}: Binary Infix & \f{EQUIV}\ttindex{EQUIV}: Binary Infix & \f{EX}\ttindex{EX}: Binary \\ \f{ALL}\ttindex{ALL}: Binary & \f{TRUE}\ttindex{TRUE}: Variable & \f{FALSE}\ttindex{FALSE}: Variable & \end{tabular} \end{center} The \f{EX} and the \f{ALL} operators are the quantifiers. Their first argument is the quantified variable, the second one a matrix formula. There are operators \f{MKAND}\ttindex{MKAND} and \f{MKOR}\ttindex{MKOR} for the construction of large systematic conjunctions/disjunctions via for loops available. They are used in the style of \f{SUM} and \f{COLLECT}. \vspace{0.5cm} {\bf Example:} {\small\begin{verbatim} 1: load_package redlog; 2: rlset ofsf; {} 3: g := for i:=1:3 mkand for j:=1:3 mkor if j<>i then mkid(x,i) + mkid(x,j)=0; true and (false or false or x1 + x2 = 0 or x1 + x3 = 0) and (false or x1 + x2 = 0 or false or x2 + x3 = 0) and (false or x1 + x3 = 0 or x2 + x3 = 0 or false) \end{verbatim}} \subsection{OFSF Operators} The \f{OFSF}\ttindex{OFSF} context implements {\it ordered fields} over the language of {\it ordered rings}. There are the following binary operators available: \begin{center} \begin{tabular}{llllllll} \f{EQUAL}\ttindex{EQUAL} & \f{NEQ}\ttindex{NEQ} & \f{LEQ}\ttindex{LEQ} & \f{GEQ}\ttindex{GEQ} & \f{LESSP}\ttindex{LESSP} & \f{GREATERP}\ttindex{GREATERP} \end{tabular} \end{center} They can also be written as \f{=}, \f{<>}, \f{<=}, \f{>=}, \f{<}, and \f{>}. For {\sc OFSF} there is specified that all right hand sides must be zero. Non-zero right hand sides are immediately subtracted. \subsection{DVFSF Operators}\ttindex{DVFSF} Discretely valued fields are implemented as a one-sorted language using in addition to \f{=} and \f{<>} the binary operators \f{|}, \f{||}, \f{\~{}}, and \f{/\~{}}, which encode $\leq$, $<$, $=$, and $\neq$ in the value group, respectively. \begin{center} \begin{tabular}{llllll} \f{EQUAL}\ttindex{EQUAL} & \f{NEQ}\ttindex{NEQ} & \f{DIV}\ttindex{DIV} & \f{SDIV}\ttindex{SDIV} & \f{ASSOC}\ttindex{ASSOC} & \f{NASSOC}\ttindex{NASSOC} \\ \end{tabular} \end{center} \subsection{ACFSF Operators}\ttindex{ACFSF} For algebraically closed fields there are only equations and inequalities allowed: \begin{center} \begin{tabular}{ll} \f{EQUAL}\ttindex{EQUAL} & \f{NEQ}\ttindex{NEQ} \end{tabular} \end{center} As in \textsc{OFSF}, they can be conveniently written as \f{=} and \f{<>}, respectively. All right hand sides are zero. \subsection{Extended Built-in Commands} The operators \f{SUB}\ttindex{SUB}, \f{PART}\ttindex{PART}, and \f{LENGTH}\ttindex{LENGTH} work on formulas in a reasonable way. \subsection{Global Switches} The switch \f{RLSIMPL}\ttindex{RLSIMPL} causes the function \f{RLSIMPL} to be automatically applied at the expression evaluation stage. The switch \f{RLREALTIME}\ttindex{RLREALTIME} protocols the wall clock time needed for {\sc REDLOG} commands in seconds. The switch \f{RLVERBOSE}\ttindex{RLVERBOSE} toggles verbosity output with some {\sc REDLOG} procedures. \section{Simplification} {\sc REDLOG} knows three types of simplifiers to reduce the size of a given first-order formula: the standard simplifier, tableau simplifiers, and Gr\"obner simplifiers. \subsection{Standard Simplifier} The standard simplifier \f{RLSIMPL}\ttindex{RLSIMPL} returns a simplified equivalent of its argument formula. It is much faster though less powerful than the other simplifiers. As an optional argument there can be a \emph{theory} passed. This is a list of atomic formulas assumed to hold. Simplification is then performed on the basis of these assumptions. \vspace{0.5cm} {\bf Example:} {\small\begin{verbatim} 4: rlsimpl g; (x1 + x2 = 0 or x1 + x3 = 0) and (x1 + x2 = 0 or x2 + x3 = 0) and (x1 + x3 = 0 or x2 + x3 = 0) \end{verbatim}} \subsection{Tableau Simplifier} The standard simplifier preserves the basic Boolean structure of a formula. The tableau methods, in contrast, provide a technique for changing the Boolean structure of a formula by constructing case distinctions. The function \f{RLATAB}\ttindex{RLATAB} automatically finds a suitable case distinction. Based on \f{RLATAB}, the function \f{RLITAB}\ttindex{RLITAB} iterates this process until no further simplification can be detected. There is a more fundamental entry point \f{RLTAB}\ttindex{RLTAB} for manually entering case distinctions. \subsection{Gr\"obner Simplifier} The Gr\"obner simplifier considers algebraic simplification rules between the atomic formulas of the input formula. The usual procedure called for Gr\"obner simplification is \f{RLGSN}\ttindex{RLGSN}. Similar to the standard simplifier, there is an optional theory argument. \begin{samepage} \vspace{0.5cm} {\bf Example:} {\small\begin{verbatim} 5: rlgsn(x*y+1<>0 or y*z+1<>0 or x-z=0); true \end{verbatim}} \end{samepage} \section{Normal Forms} \subsection{Boolean Normal Forms} \f{RLCNF}\ttindex{RLCNF} and \f{RLDNF}\ttindex{RLDNF} compute conjunctive resp.~disjunctive normal forms of their formula arguments. Subsumption and cut strategies are applied to decrease the number of clauses. \subsection{Miscellaneous Normal Forms} \f{RLNNF}\ttindex{RLNNF} computes a negation normal form. This is an {\tt and}-\texttt{or}-combination of atomic formulas. \f{RLPNF}\ttindex{RLPNF} computes a prenex normal form of its argument. That is, all quantifiers are moved outside such that they form a block in front of a quantifier-free matrix formula. \section{Quantifier Elimination and Variants} Quantifier elimination computes quantifier-free equivalents for given first-order formulas. For \textsc{OFSF} and \textsc{DVFSF}, REDLOG uses a technique based on elimination set ideas. The \textsc{OFSF} implementation is restricted to at most quadratic occurrences of the quantified variables, but includes numerous heuristic strategies for coping with higher degrees. The \textsc{DVFSF} implementation is restricted to formulas that are linear in the quantified variables. The \textsc{ACFSF} quantifier elimination is based on comprehensive Gr\"obner basis computation; there are no degree restrictions for this context \subsection{Quantifier Elimination} \f{RLQE}\ttindex{RLQE} performs quantifier elimination on its argument formula. There is an optional theory argument in the style of \f{RLSIMPL} supported. \begin{samepage} \vspace{0.5cm} {\bf Example:} {\small\begin{verbatim} 6: rlqe(ex(x,a*x**2+b*x+c>0),{a<0}); 2 4*a*c - b < 0 \end{verbatim}} \end{samepage} For \textsc{OFSF} and \textsc{DVFSF} there is a variant \f{RLQEA}\ttindex{RLQEA} available. It returns instead of a quantifier-free equivalent, a list of condition-solution pairs containing, e.g., satisfying sample points for outermost existential quantifier blocks. \begin{samepage} \vspace{0.5cm} {\bf Example:} {\small\begin{verbatim} 7: rlqea(ex(x,a*x**2+b*x+c>0),{a<0}); 2 {{4*a*c - b < 0, 2 - sqrt( - 4*a*c + b ) - 2*a*epsilon1 - b {x = -------------------------------------------}}} 2*a \end{verbatim}} \end{samepage} \subsection{Generic Quantifier Elimination} \textsc{OFSF} allows generic quantifier elimination \f{RLGQE}\ttindex{RLGQE}, which enlarges the theory by disequations, i.e.~\f{<>}-atomic formulas, wherever this supports the quantifier elimination. There is also generic quantifier elimination with answer available: \f{RLGQEA}\ttindex{RLGQEA}. \begin{samepage} \vspace{0.5cm} {\bf Example:} {\small\begin{verbatim} 8: rlgqe ex(x,a*x**2+b*x+c>0); {{a <> 0}, 2 4*a*c - b < 0 or a >= 0} \end{verbatim}} \end{samepage} \subsection{Linear Optimization} \f{RLOPT}\ttindex{RLOPT} uses quantifier elimination for linear optimization. It takes as arguments a list of constraints and the target function. The target function is minimized subject to the constraints. \chapter{RESET: Reset REDUCE to its initial state} \label{RESET} \typeout{{RESET: Code to reset REDUCE to its initial state}} {\footnotesize \begin{center} J. P. Fitch \\ School of Mathematical Sciences, University of Bath\\ BATH BA2 7AY, England \\[0.05in] e--mail: jpff@cs.bath.ac.uk \end{center} } \ttindex{RESET} This package defines a command {\tt RESETREDUCE} \ttindex{RESETREDUCE} that works through the history of previous commands, and clears any values which have been assigned, plus any rules, arrays and the like. It also sets the various switches to their initial values. It is not complete, but does work for most things that cause a gradual loss of space. \chapter{RESIDUE: A residue package} \label{RESIDUE} \typeout{{RESIDUE: A residue package}} {\footnotesize \begin{center} Wolfram Koepf\\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: Koepf@zib.de \end{center} } \ttindex{RESIDUE} \def\Res{\mathop{\rm Res}\limits} \newcommand{\C}{{\rm {\mbox{C{\llap{{\vrule height1.52ex}\kern.4em}}}}}} This package supports the calculation of residues. The residue $\Res_{z=a} f(z)$ of a function $f(z)$ at the point $a\in\C$ is defined as \[ \Res_{z=a} f(z)= \frac{1}{2 \pi i}\oint f(z)\,dz \;, \] with integration along a closed curve around $z=a$ with winding number 1. It contains two \REDUCE\ operators: \begin{itemize} \item {\tt residue(f,z,a)}\ttindex{residue} determines the residue of $f$ at the point $z=a$ if $f$ is meromorphic at $z=a$. The calculation of residues at essential singularities of $f$ is not supported. \item {\tt poleorder(f,z,a)}\ttindex{poleorder} determines the pole order of $f$ at the point $z=a$ if $f$ is meromorphic at $z=a$. \end{itemize} Note that both functions use the {\tt TAYLOR} package (chapter~\ref{TAYLOR}). {\small\begin{verbatim} load_package residue; residue(x/(x^2-2),x,sqrt(2)); 1 --- 2 poleorder(x/(x^2-2),x,sqrt(2)); 1 residue(sin(x)/(x^2-2),x,sqrt(2)); sqrt(2)*sin(sqrt(2)) ---------------------- 4 poleorder(sin(x)/(x^2-2),x,sqrt(2)); 1 residue((x^n-y^n)/(x-y)^2,x,y); n y *n ------ y poleorder((x^n-y^n)/(x-y)^2,x,y); 1 \end{verbatim}} \chapter{RLFI: REDUCE LaTeX formula interface} \label{RLFI} \typeout{{RLFI: REDUCE LaTeX formula interface}} {\footnotesize \begin{center} Richard Liska, Ladislav Drska\\ Computational Physics Group \\ Faculty of Nuclear Sciences and Physical Engineering\\ Czech Technical University in Prague, Brehova 7, 115 19 Prague 1 \\ Czech Republic\\[0.05in] e--mail: liska@siduri.fjfi.cvut.cz \end{center} } \ttindex{RLFI} The RLFI package provides the printing of \REDUCE\ expressions in \LaTeX\ format, so it can be used directly for document production. Various mathematical constructions are supported by the interface including subscripts, superscripts, font changing, Greek letters, divide-bars, integral and sum signs, derivatives etc. The interface is connected to \REDUCE\ by three new switches and several statements. To activate the \LaTeX\ output mode the switch {\tt latex}\ttindex{latex} must be set {\tt on}. This switch causes all outputs to be written in the \LaTeX\ syntax of formulas. The switch {\tt VERBATIM}\ttindex{VERBATIM} is used for input printing control. If it is {\tt on} input to \REDUCE{} system is typeset in \LaTeX{} verbatim environment after the line containing the string {\tt REDUCE Input:}. The switch {\tt lasimp}\ttindex{lasimp} controls the algebraic evaluation of input formulas. If it is {\tt on} every formula is evaluated, simplified and written in the form given by ordinary \REDUCE\ statements and switches such as {\tt factor}, {\tt order}, {\tt rat} etc. In the case when the {\tt lasimp} switch is {\tt off} evaluation, simplification or reordering of formulas is not performed and \REDUCE\ acts only as a formula parser and the form of the formula output is exactly the same as that of the input, the only difference remains in the syntax. The mode {\tt off lasimp} is designed especially for typesetting of formulas for which the user needs preservation of their structure. This switch has no meaning if the switch {\tt Latex} is {\tt off} and thus is working only for \LaTeX\ output. For every identifier used in the typeset \REDUCE\ formula the following properties can be defined by the statement {\tt defid}: \ttindex{defid} \begin{itemize} \item its printing symbol (Greek letters can be used). \item the font in which the symbol will be typeset. \item accent which will be typeset above the symbol. \end{itemize} Symbols with indexes are treated in \REDUCE\ as operators. Each index corresponds to an argument of the operator. The meaning of operator arguments (where one wants to typeset them) is declared by the statement\ttindex{defindex} {\tt defindex}. This statement causes the arguments to be typeset as subscripts or superscripts (on left or right-hand side of the operator) or as arguments of the operator. The statement {\tt mathstyle}\ttindex{mathstyle} defines the style of formula typesetting. The variable {\tt laline!*}\ttindex{laline"!*} defines the length of output lines. The fractions with horizontal divide bars are typeset by using the new \REDUCE\ infix operator \verb+\+. This operator is not algebraically simplified. During typesetting of powers the checking on the form of the power base and exponent is performed to determine the form of the typeset expression ({\em e.g.\ }sqrt symbol, using parentheses). Some special forms can be typeset by using \REDUCE\ prefix operators. These are as follows: \begin{itemize} \item {\tt int} - integral of an expression. \item {\tt dint} - definite integral of an expression. \item {\tt df} - derivative of an expression. \item {\tt pdf} - partial derivative of an expression. \item {\tt sum} - sum of expressions. \item {\tt product} - product of expressions. \item {\tt sqrt} - square root of expression. \end{itemize} There are still some problems unsolved in the present version of the interface as follows: \begin{itemize} \item breaking the formulas which do not fit on one line. \item automatic decision where to use divide bars in fractions. \item distinction of two- or more-character identifiers from the product of one-character symbols. \item typesetting of matrices. \end{itemize} \chapter{ROOTS: A REDUCE root finding package} \label{ROOTS} \typeout{{ROOTS: A REDUCE root finding package}} {\footnotesize \begin{center} Stanley L. Kameny \\ Los Angeles, U.S.A. \end{center} } \ttindex{ROOTS} The root finding package is designed so that it can be used as an independent package, or it can be integrated with and called by {\tt SOLVE}.\index{SOLVE package ! with ROOTS package} \section{Top Level Functions} The top level functions can be called either as symbolic operators from algebraic mode, or they can be called directly from symbolic mode with symbolic mode arguments. Outputs are expressed in forms that print out correctly in algebraic mode. \subsection{Functions that refer to real roots only} The three functions \f{REALROOTS}, \f{ISOLATER} and \f{RLROOTNO} can receive 1, 2 or 3 arguments. The first argument is the polynomial p, that can be complex and can have multiple or zero roots. If arg2 and arg3 are not present, all real roots are found. If the additional arguments are present, they restrict the region of consideration. \begin{itemize} \item If there are two arguments the second is either POSITIVE or NEGATIVE. The function will only find positive or negative roots \item If arguments are (p,arg2,arg3) then \ttindex{EXCLUDE}\ttindex{POSITIVE}\ttindex{NEGATIVE}\ttindex{INFINITY} Arg2 and Arg3 must be r (a real number) or EXCLUDE r, or a member of the list POSITIVE, NEGATIVE, INFINITY, -INFINITY. EXCLUDE r causes the value r to be excluded from the region. The order of the sequence arg2, arg3 is unimportant. Assuming that arg2 $\leq$ arg3 when both are numeric, then \begin{tabular}{l c l} \{-INFINITY,INFINITY\} & (or \{\}) & all roots; \\ \{arg2,NEGATIVE\} & represents & $-\infty < r < arg2$; \\ \{arg2,POSITIVE\} & represents & $arg2 < r < \infty$; \end{tabular} In each of the following, replacing an {\em arg} with EXCLUDE {\em arg} converts the corresponding inclusive $\leq$ to the exclusive $<$ \begin{tabular}{l c l} \{arg2,-INFINITY\} & represents & $-\infty < r \leq arg2$; \\ \{arg2,INFINITY\} & represents & $arg2 \leq r < \infty$; \\ \{arg2,arg3\} & represents & $arg2 \leq r \leq arg3$; \end{tabular} \item If zero is in the interval the zero root is included. \end{itemize} \begin{description} \ttindex{REALROOTS} \item[REALROOTS] finds the real roots of the polynomial p. Precision of computation is guaranteed to be sufficient to separate all real roots in the specified region. (cf. MULTIROOT for treatment of multiple roots.) \ttindex{ISOLATER} \item[ISOLATER] produces a list of rational intervals, each containing a single real root of the polynomial p, within the specified region, but does not find the roots. \ttindex{RLROOTNO} \item[RLROOTNO] computes the number of real roots of p in the specified region, but does not find the roots. \end{description} \subsection{Functions that return both real and complex roots} \begin{description} \ttindex{ROOTS} \item[ROOTS p;] This is the main top level function of the roots package. It will find all roots, real and complex, of the polynomial p to an accuracy that is sufficient to separate them and which is a minimum of 6 decimal places. The value returned by ROOTS is a list of equations for all roots. In addition, ROOTS stores separate lists of real roots and complex roots in the global variables ROOTSREAL and ROOTSCOMPLEX.\ttindex{ROOTSREAL}\ttindex{ROOTSCOMPLEX} The output of ROOTS is normally sorted into a standard order: a root with smaller real part precedes a root with larger real part; roots with identical real parts are sorted so that larger imaginary part precedes smaller imaginary part. However, when a polynomial has been factored algebraically then the root sorting is applied to each factor separately. This makes the final resulting order less obvious. \ttindex{ROOTS\_AT\_PREC} \item[ROOTS\_AT\_PREC p;] Same as ROOTS except that roots values are returned to a minimum of the number of decimal places equal to the current system precision. \ttindex{ROOT\_VAL} \item[ROOT\_VAL p;] Same as ROOTS\_AT\_PREC, except that instead of returning a list of equations for the roots, a list of the root value is returned. This is the function that SOLVE calls. \ttindex{NEARESTROOT} \item[NEARESTROOT(p,s);] This top level function finds the root to which the method converges given the initial starting origin s, which can be complex. If there are several roots in the vicinity of s and s is not significantly closer to one root than it is to all others, the convergence could arrive at a root that is not truly the nearest root. This function should therefore be used only when the user is certain that there is only one root in the immediate vicinity of the starting point s. \ttindex{FIRSTROOT} \item[FIRSTROOT p;] ROOTS is called, but only a single root is computed. \end{description} \subsection{Other top level functions} \begin{description} \ttindex{GETROOT}\ttindex{ROOTS}\ttindex{REALROOTS}\ttindex{NEARESTROOTS} \item[GETROOT(n,rr);] If rr has the form of the output of ROOTS, REALROOTS, or NEARESTROOTS; GETROOT returns the rational, real, or complex value of the root equation. An error occurs if $n<1$ or $n>$ the number of roots in rr. \ttindex{MKPOLY} \item[MKPOLY rr;] This function can be used to reconstruct a polynomial whose root equation list is rr and whose denominator is 1. Thus one can verify that if $rr := ROOTS~p$, and $rr1 := ROOTS~MKPOLY~rr$, then $rr1 = rr$. (This will be true if {\tt MULTIROOT} and {\tt RATROOT} are ON, and {\tt ROUNDED} is off.) However, $MKPOLY~rr - NUM~p = 0$ will be true if and only if all roots of p have been computed exactly. \end{description} \section{Switches Used in Input} The input of polynomials in algebraic mode is sensitive to the switches {\tt COMPLEX}, {\tt ROUNDED}, and {\tt ADJPREC}. The correct choice of input method is important since incorrect choices will result in undesirable truncation or rounding of the input coefficients. Truncation or rounding may occur if {\tt ROUNDED} is on and one of the following is true: \begin{enumerate} \item a coefficient is entered in floating point form or rational form. \item {\tt COMPLEX} is on and a coefficient is imaginary or complex. \end{enumerate} Therefore, to avoid undesirable truncation or rounding, then: \begin{enumerate} \item {\tt ROUNDED} should be off and input should be in integer or rational form; or \item {\tt ROUNDED} can be on if it is acceptable to truncate or round input to the current value of system precision; or both {\tt ROUNDED} and {\tt ADJPREC} can be on, in which case system precision will be adjusted to accommodate the largest coefficient which is input; or \item if the input contains complex coefficients with very different magnitude for the real and imaginary parts, then all three switches {\tt ROUNDED}, {\tt ADJPREC} and {\tt COMPLEX} must be on. \end{enumerate} \begin{description} \item[integer and complex modes] (off {\tt ROUNDED}) any real polynomial can be input using integer coefficients of any size; integer or rational coefficients can be used to input any real or complex polynomial, independent of the setting of the switch {\tt COMPLEX}. These are the most versatile input modes, since any real or complex polynomial can be input exactly. \item[modes rounded and complex-rounded] (on {\tt ROUNDED}) polynomials can be input using integer coefficients of any size. Floating point coefficients will be truncated or rounded, to a size dependent upon the system. If complex is on, real coefficients can be input to any precision using integer form, but coefficients of imaginary parts of complex coefficients will be rounded or truncated. \end{description} \section{Root Package Switches} \begin{description} \ttindex{RATROOT} \item[RATROOT] (Default OFF) If {\tt RATROOT} is on all root equations are output in rational form. Assuming that the mode is {\tt COMPLEX} ({\em i.e.\ } {\tt ROUNDED} is off,) the root equations are guaranteed to be able to be input into \REDUCE\ without truncation or rounding errors. (Cf. the function MKPOLY described above.) \ttindex{MULTIROOT} \item[MULTIROOT] (Default ON) Whenever the polynomial has complex coefficients or has real coefficients and has multiple roots, as \ttindex{SQFRF} determined by the Sturm function, the function {\tt SQFRF} is called automatically to factor the polynomial into square-free factors. If {\tt MULTIROOT} is on, the multiplicity of the roots will be indicated in the output of ROOTS or REALROOTS by printing the root output repeatedly, according to its multiplicity. If {\tt MULTIROOT} is off, each root will be printed once, and all roots should be normally be distinct. (Two identical roots should not appear. If the initial precision of the computation or the accuracy of the output was insufficient to separate two closely-spaced roots, the program attempts to increase accuracy and/or precision if it detects equal roots. If, however, the initial accuracy specified was too low, and it was not possible to separate the roots, the program will abort.) \end{description} \chapter[RSOLVE: Rational polynomial solver]% {RSOLVE: \protect\\ Rational/integer polynomial solvers} \label{RSOLVE} \typeout{[RSOLVE: Rational polynomial solver]} {\footnotesize \begin{center} Francis J. Wright \\ School of Mathematical Sciences, Queen Mary and Westfield College \\ University of London \\ Mile End Road \\ London E1 4NS, England \\[0.05in] e--mail: F.J.Wright@QMW.ac.uk \end{center} } \ttindex{RSOLVE} The exact rational zeros of a single univariate polynomial using fast modular methods can be calculated. The operator \verb|r_solve|\ttindex{R\_SOLVE} computes all rational zeros and the operator \verb|i_solve| \ttindex{I\_SOLVE} computes only integer zeros in a way that is slightly more efficient than extracting them from the rational zeros. The first argument is either a univariate polynomial expression or equation with integer, rational or rounded coefficients. Symbolic coefficients are not allowed. The argument is simplified to a quotient of integer polynomials and the denominator is silently ignored. Subsequent arguments are optional. If the polynomial variable is to be specified then it must be the first optional argument. However, since the variable in a non-constant univariate polynomial can be deduced from the polynomial it is unnecessary to specify it separately, except in the degenerate case that the first argument simplifies to either 0 or $0 = 0$. In this case the result is returned by \verb|i_solve| in terms of the operator \verb|arbint| and by \verb|r_solve| in terms of the (new) analogous operator \verb|arbrat|. The operator \verb|i_solve| will generally run slightly faster than \verb|r_solve|. The (rational or integer) zeros of the first argument are returned as a list and the default output format is the same as that used by \verb|solve|. Each distinct zero is returned in the form of an equation with the variable on the left and the multiplicities of the zeros are assigned to the variable \verb|root_multiplicities| as a list. However, if the switch {\ttfamily multiplicities} is turned on then each zero is explicitly included in the solution list the appropriate number of times (and \verb|root_multiplicities| has no value). \begin{sloppypar} Optional keyword arguments acting as local switches allow other output formats. They have the following meanings: \begin{description} \item[{\ttfamily separate}:] assign the multiplicity list to the global variable \verb|root_multiplicities| (the default); \item[{\ttfamily expand} or {\ttfamily multiplicities}:] expand the solution list to include multiple zeros multiple times (the default if the {\ttfamily multiplicities} switch is on); \item[{\ttfamily together}:] return each solution as a list whose second element is the multiplicity; \item[{\ttfamily nomul}:] do not compute multiplicities (thereby saving some time); \item[{\ttfamily noeqs}:] do not return univariate zeros as equations but just as values. \end{description} \end{sloppypar} \section{Examples} {\small\begin{verbatim} r_solve((9x^2 - 16)*(x^2 - 9), x); \end{verbatim}} \[ \left\{x=\frac{-4}{3},x=3,x=-3,x=\frac{4}{3}\right\} \] {\small\begin{verbatim} i_solve((9x^2 - 16)*(x^2 - 9), x); \end{verbatim}} \[ \{x=3,x=-3\} \] \chapter[SCOPE: Source code optimisation package] {SCOPE: REDUCE source code optimisation package} \label{SCOPE} \typeout{{SCOPE: REDUCE source code optimisation package}} {\footnotesize \begin{center} J.A. van Hulzen \\ University of Twente, Department of Computer Science \\ P.O. Box 217, 7500 AE Enschede \\ The Netherlands \\[0.05in] e--mail: infhvh@cs.utwente.nl \end{center} } SCOPE is a package to produce optimised versions of algebraic expressions. It can be used in two distinct fashions, as an adjunct to numerical code generation (using GENTRAN, described in chapter~\ref{GENTRAN}) or as a stand alone way of investigating structure in an expression. When used with GENTRAN\ttindex{GENTRAN} it is sufficient to set the switch {\tt GENTRANOPT}\ttindex{GENTRANOPT} on, and GENTRAN will then use SCOPE internally. This is described in its internal detail in the GENTRAN manual and the SCOPE documentation. As a stand-alone package SCOPE provides the operator {\tt OPTIMIZE}. \ttindex{OPTIMIZE} A SCOPE application is easily performed and based on the use of the following syntax: {\small \begin{flushleft} \begin{tabular}{lcl} $<$SCOPE\_application$>$ & $\Rightarrow$ & {\tt OPTIMIZE} $<$object\_seq$>$ [{\tt INAME} $<$cse\_prefix$>$]\\ $<$object\_seq$>$ & $\Rightarrow$ & $<$object$>$[,$<$object\_seq$>$]\\ $<$object$>$ & $\Rightarrow$ & $<$stat$>~\mid~<$alglist$>~\mid~<$alglist\_production$>$ \\ $<$stat$>$ & $\Rightarrow$ & $<$name$>~<$assignment operator$>~<$expression$>$\\ $<$assignment operator$>$ & $\Rightarrow$ & $:=~\mid~::=~\mid~::=:~\mid~:=:$\\ $<$alglist$>$ & $\Rightarrow$ & \{$<$eq\_seq$>$\}\\ $<$eq\_seq$>$ & $\Rightarrow$ & $<$name$>~=~<$expression$>$[,$<$eq\_seq$>$]\\ $<$alglist\_production$>$ & $\Rightarrow$ & $<$name$>~\mid~<$function\_application$>$\\ $<$name$>$ & $\Rightarrow$ & $<$id$>~\mid~<$id$>(<$a\_subscript\_seq$>)$\\ $<$a\_subscript\_seq$>$ & $\Rightarrow$ & $<$a\_subscript$>$[,$<$a\_subscript\_seq$>$]\\ $<$a\_subscript$>$ & $\Rightarrow$ & $<$integer$>~\mid~<$integer infix\_expression$>$\\ $<$cse\_prefix$>$ & $\Rightarrow$ & $<$id$>$ \end{tabular} \end{flushleft}} A SCOPE action can be applied on one assignment statement, or to a sequence of such statements, separated by commas, or a list of expressions. \index{SCOPE option ! {\tt INAME}} The optional use of the {\tt INAME} extension in an {\tt OPTIMIZE} command is introduced to allow the user to influence the generation of cse-names. The cse\_prefix is an identifier, used to generate cse-names, by extending it with an integer part. If the cse\_prefix consists of letters only, the initially selected integer part is 0. If the user-supplied cse\_prefix ends with an integer its value functions as initial integer part. {\small\begin{verbatim} z:=a^2*b^2+10*a^2*m^6+a^2*m^2+2*a*b*m^4+2*b^2*m^6+b^2*m^2; 2 2 2 6 2 2 4 2 6 2 2 z := a *b + 10*a *m + a *m + 2*a*b*m + 2*b *m + b *m OPTIMIZE z:=:z ; G0 := b*a G4 := m*m G1 := G4*b*b G2 := G4*a*a G3 := G4*G4 z := G1 + G2 + G0*(2*G3 + G0) + G3*(2*G1 + 10*G2) \end{verbatim}} it can be desirable to rerun an optimisation request with a restriction on the minimal size of the righthandsides. The command \index{SCOPE function ! {\tt SETLENGTH}} \hspace*{1cm} {\tt SETLENGTH} $<$integer$>$\$ can be used to produce rhs's with a minimal arithmetic complexity, dictated by the value of its integer argument. Statements, used to rename function applications, are not affected by the {\tt SETLENGTH} command. The default setting is restored with the command \hspace*{1cm} {\tt RESETLENGTH}\$ \index{SCOPE function ! {\tt RESETLENGTH}} {\em Example:} {\small\begin{verbatim} SETLENGTH 2$ OPTIMIZE z:=:z INAME s$ 2 2 s1 := b *m 2 2 s2 := a *m 4 4 z := (a*b + 2*m )*a*b + 2*(s1 + 5*s2)*m + s1 + s2 \end{verbatim}} Details of the algorithm used is given in the Scope User's Manual. \chapter{SETS: A basic set theory package} \label{SETS} \typeout{{SETS: A basic set theory package}} {\footnotesize \begin{center} Francis J. Wright \\ School of Mathematical Sciences, Queen Mary and Westfield College \\ University of London \\ Mile End Road \\ London E1 4NS, England \\[0.05in] e--mail: F.J.Wright@QMW.ac.uk \end{center} } \ttindex{SETS} The SETS package provides set theoretic operations on lists and represents the results as normal algebraic-mode lists, so that all other \REDUCE{} facilities that apply to lists can still be applied to lists that have been constructed by explicit set operations. \section{Infix operator precedence} The set operators are currently inserted into the standard \REDUCE{} precedence list (see section~\ref{sec-operators}) as follows: {\small\begin{verbatim} or and not member memq = set_eq neq eq >= > <= < subset_eq subset freeof + - setdiff union intersection * / ^ . \end{verbatim}} \section{Explicit set representation and MKSET} Explicit sets are represented by lists, and there is a need to convert standard \REDUCE\ lists into a set by removing duplicates. The package also orders the members of the set so the standard {\tt =} predicate will provide set equality.\ttindex{MKSET} {\small\begin{verbatim} mkset {1,2,y,x*y,x+y}; {x + y,x*y,y,1,2} \end{verbatim}} The empty set is represented by the empty list \verb|{}|. \section{Union and intersection} The intersection operator has the name\ttindex{intersect} {\tt intersect}, and set union is denotes by\ttindex{union}{\tt union}. These operators will probably most commonly be used as binary infix operators applied to explicit sets, {\small\begin{verbatim} {1,2,3} union {2,3,4}; {1,2,3,4} {1,2,3} intersect {2,3,4}; {2,3} \end{verbatim}} \section{Symbolic set expressions} If one or more of the arguments evaluates to an unbound identifier then it is regarded as representing a symbolic implicit set, and the union or intersection will evaluate to an expression that still contains the union or intersection operator. These two operators are symmetric, and so if they remain symbolic their arguments will be sorted as for any symmetric operator. Such symbolic set expressions are simplified, but the simplification may not be complete in non-trivial cases. For example: {\small\begin{verbatim} a union b union {} union b union {7,3}; {3,7} union a union b a intersect {}; {} \end{verbatim}} Intersection distributes over union, which is not applied by default but is implemented as a rule list assigned to the variable {\tt set\_distribution\_rule}, {\em e.g.} {\small\begin{verbatim} a intersect (b union c); (b union c) intersection a a intersect (b union c) where set_distribution_rule; a intersection b union a intersection c \end{verbatim}} \section{Set difference} The set difference operator is represented by the symbol \verb|\| and is always output using this symbol, although it can also be input using \ttindex{setdiff} {\tt setdiff}. It is a binary operator. {\small\begin{verbatim} {1,2,3} \ {2,4}; {1,3} a \ {1,2}; a\{1,2} a \ a; {} \end{verbatim}} \section{Predicates on sets} Set membership, inclusion or equality are all binary infix operators. They can only be used within conditional statements or within the argument of the {\tt evalb}\ttindex{evalb} operator provided by this package, and they cannot remain symbolic -- a predicate that cannot be evaluated to a Boolean value causes a normal \REDUCE\ error. The {\tt evalb} operator provides a convenient shorthand for an {\tt if} statement designed purely to display the value of any Boolean expression (not only predicates defined in this package). {\small\begin{verbatim} if a = a then true else false; true evalb(a = a); true if a = b then true else false; false \end{verbatim}} \subsection{Set membership} Set membership is tested by the predicate \ttindex{member}{\tt member}. Its left operand is regarded as a potential set element and its right operand {\em must\/} evaluate to an explicit set. There is currently no sense in which the right operand could be an implicit set. {\small\begin{verbatim} evalb(1 member {1,2,3}); true evalb(2 member {1,2} intersect {2,3}); true evalb(a member b); ***** b invalid as list \end{verbatim}} \subsection{Set inclusion} Set inclusion is tested by the predicate {\tt subset\_eq} \ttindex{subset\_eq} where {\tt a subset\_eq b} is true if the set $a$ is either a subset of or equal to the set $b$; strict inclusion is tested by the predicate {\tt subset}\ttindex{subset} where {\tt a subset b} is true if the set $a$ is {\em strictly\/} a subset of the set $b$ and is false is $a$ is equal to $b$. These predicates provide some support for symbolic set expressions, but is incomplete. {\small\begin{verbatim} evalb({1,2} subset_eq {1,2,3}); true evalb({1,2} subset_eq {1,2}); true evalb({1,2} subset {1,2}); false evalb(a subset a union b); true \end{verbatim}} \newpage {\small\begin{verbatim} evalb(a\b subset a); true \end{verbatim}} An undecidable predicate causes a normal \REDUCE\ error, {\em e.g.\ } {\small\begin{verbatim} evalb(a subset_eq {b}); ***** Cannot evaluate a subset_eq {b} as Boolean-valued set expression \end{verbatim}} \subsection{Set equality} As explained above, equality of two sets in canonical form can be reliably tested by the standard \REDUCE\ equality predicate ({\tt =}). \chapter{SPARSE: Sparse Matrices} \label{SPARSE MATRICES} \typeout{{SPARSE: Sparse Matrices}} {\footnotesize \begin{center} Stephen Scowcroft \\ Konrad-Zuse-Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D-14195 Berlin-Dahlem, Germany \\ \end{center} } \ttindex{SPARSE, Sparse matrices} \ttindex{MATRIX, see also SPARSE} \section{Introduction} This package extends the available matrix feature to enable calculations with sparse matrices. It also provides a selection of functions that are useful in the world of linear algebra with respect to sparse matrices. \\ The package is loaded by: {\tt load\_package sparse;} \section{Sparse Matrix Calculations} To extend the syntax of this class of calculations an expression type {\tt sparse \ttindex{SPARSE}} is added. An identifier may be declared a sparse variable by the declaration {\tt sparse}. The size of the sparse matrix must be declared explicitly in the matrix declaration. This declaration \f{SPARSE} is similar to the declaration \f{MATRIX}. Once a matrix has been declared a sparse matrix all elements of the matrix are treated as if they were initialized to 0. When printing out a sparse matrix only the non-zero elements are printed due to the fact that only the non-zero elements of the matrix are stored. To assign values to the elements of the declared sparse matrix we use the same syntax as for matrices. {\small\begin{verbatim} sparse aa(10,1),bb(200,200); aa(1,1):=10; bb(100,150):=a; \end{verbatim}} \section{Linear Algebra Package for Sparse Matrices} Most of the functions of this package are related to the functions of the linear algebra package \f{LINALG}. For further explanation and examples of the various functions please refer to the \f{LINALG} package. \subsection{Basic matrix handling} {\small\begin{tabular}{l l l l} spadd\_columns \ttindex{SPADD\_COLUMNS} & spadd\_rows \ttindex{SPADD\_ROWS} & spadd\_to\_columns \ttindex{SPADD\_TO\_COLUMNS} & spadd\_to\_rows \ttindex{SPADD\_TO\_ROWS} \\ spaugment\_columns \ttindex{SPAUGMENT\_COLUMNS} & spchar\_poly \ttindex{SPCHAR\_POLY} & spcol\_dim \ttindex{SPCOL\_DIM} & spcopy\_into \ttindex{SPCOPY\_INTO} \\ spdiagonal \ttindex{SPDIAGONAL} & spextend \ttindex{SPEXTEND} & spfind\_companion \ttindex{SPFIND\_COMPANION} & spget\_columns \ttindex{SPGET\_COLUMNS} \\ spget\_rows \ttindex{SPGET\_ROWS} & sphermitian\_tp \ttindex{SPHERMITIAN\_TP} & spmatrix\_augment \ttindex{SPMATRIX\_AUGMENT} & spmatrix\_stack \ttindex{SPMATRIX\_STACK} \\ spminor \ttindex{SPMINOR} & spmult\_columns \ttindex{SPMULT\_COLUMNS} & spmult\_rows \ttindex{SPMULT\_ROWS} & sppivot \ttindex{SPPIVOT} \\ spremove\_columns \ttindex{SPREMOVE\_COLUMNS} & spremove\_rows \ttindex{SPREMOVE\_ROWS} & sprow\_dim \ttindex{SPROW\_DIM} & sprows\_pivot \ttindex{SPROWS\_PIVOT} \\ spstack\_rows \ttindex{SPSTACK\_ROWS} & spsub\_matrix \ttindex{SPSUB\_MATRIX} & spswap\_columns \ttindex{SPSWAP\_COLUMNS} & spswap\_entries \ttindex{SPSWAP\_ENTRIES} \\ spswap\_rows \ttindex{SPSWAP\_ROWS} \end{tabular}} \subsection{Constructors} Functions that create sparse matrices. \begin{tabular}{l l l l} spband\_matrix \ttindex{SPBAND\_MATRIX} & spblock\_matrix \ttindex{SPBLOCK\_MATRIX} & spchar\_matrix \ttindex{SPCHAR\_MATRIX} & spcoeff\_matrix \ttindex{SPCOEFF\_MATRIX} \\ spcompanion \ttindex{SPCOMPANION} & sphessian \ttindex{SPHESSIAN} & spjacobian \ttindex{SPJACOBIAN} & spjordan\_block \ttindex{SPJORDAN\_BLOCK} \\ spmake\_identity \ttindex{SPMAKE\_IDENTITY} \end{tabular} \subsection{High level algorithms} \begin{tabular}{l l l l} spchar\_poly \ttindex{SPCHAR\_POLY} & spcholesky \ttindex{SPCHOLESKY} & spgram\_schmidt \ttindex{SPGRAM\_SCHMIDT} & splu\_decom \ttindex{SPLU\_DECOM} \\ sppseudo\_inverse \ttindex{SPPSEUDO\_INVERSE} & svd \ttindex{SVD} \end{tabular} \subsection{Predicates} \begin{tabular}{l l l l} matrixp \ttindex{MATRIXP} & sparsematp \ttindex{SPARSEMATP} & squarep \ttindex{SQUAREP} & symmetricp \ttindex{SYMMETRICP} \end{tabular} \chapter[SPDE: Symmetry groups of {PDE}'s]% {SPDE: A package for finding symmetry groups of {PDE}'s} \label{SPDE} \typeout{{SPDE: A package for finding symmetry groups of {PDE}'s}} {\footnotesize \begin{center} Fritz Schwarz \\ GMD, Institut F1 \\ Postfach 1240 \\ 5205 St. Augustin, Germany \\[0.05in] e--mail: fritz.schwarz@gmd.de \end{center} } \ttindex{SPDE} The package SPDE provides a set of functions which may be applied to determine the symmetry group of Lie- or point-symmetries of a given system of partial differential equations. Preferably it is used interactively on a computer terminal. In many cases the determining system is solved completely automatically. In some other cases the user has to provide some additional input information for the solution algorithm to terminate. \section{System Functions and Variables} The symmetry analysis of partial differential equations logically falls into three parts. Accordingly the most important functions provided by the package are: \begin{table} \begin{center} \begin{tabular}{| c | c | }\hline Function name & Operation \\ \hline \hline \ttindex{CRESYS} CRESYS(\s{arguments}) & Constructs determining system \\ \hline \ttindex{SIMPSYS} SIMPSYS() & Solves determining system \\ \hline \ttindex{RESULT} RESULT() & Prints infinitesimal generators \\ & and commutator table \\ \hline \end{tabular} \end{center} \caption{SPDE Functions} \end{table} Some other useful functions for obtaining various kinds of output are: \begin{table} \begin{center} \begin{tabular}{| c | c |} \hline Function name & Operation \\ \hline \hline \ttindex{PRSYS} PRSYS() & Prints determining system \\ \hline \ttindex{PRGEN} PRGEN() & Prints infinitesimal generators \\ \hline \ttindex{COMM} COMM(U,V) & Prints commutator of generators U and V \\ \hline \end{tabular} \end{center} \caption{SPDE Useful Output Functions}\label{spde:useful} \end{table} SPDE expects a system of differential equations to be defined as the values of the operator {\tt deq} and other operators. A simple example follows. {\small\begin{verbatim} load_package spde; deq 1:=u(1,1)+u(1,2,2); deq(1) := u(1,2,2) + u(1,1) CRESYS deq 1; PRSYS(); GL(1):=2*df(eta(1),u(1),x(2)) - df(xi(2),x(2),2) - df(xi(2),x(1)) GL(2):=df(eta(1),u(1),2) - 2*df(xi(2),u(1),x(2)) GL(3):=df(eta(1),x(2),2) + df(eta(1),x(1)) GL(4):=df(xi(2),u(1),2) GL(5):=df(xi(2),u(1)) - df(xi(1),u(1),x(2)) GL(6):=2*df(xi(2),x(2)) - df(xi(1),x(2),2) - df(xi(1),x(1)) GL(7):=df(xi(1),u(1),2) GL(8):=df(xi(1),u(1)) GL(9):=df(xi(1),x(2)) The remaining dependencies xi(2) depends on u(1),x(2),x(1) xi(1) depends on u(1),x(2),x(1) eta(1) depends on u(1),x(2),x(1) \end{verbatim}} A detailed description can be found in the SPDE documentation and examples. \chapter{SPECFN: Package for special functions} \label{SPECFN} \typeout{{SPECFN: Package for special functions}} {\footnotesize \begin{center} Chris Cannam \& Winfried Neun \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: neun@zib.de \end{center} } \ttindex{SPECFN} \index{Orthogonal polynomials} This package is designed to provide algebraic and numeric manipulations of several common special functions, namely: \begin{itemize} \item Bernoulli Numbers and Polynomials; \item Euler numbers and Polynomials; \item Fibonacci numbers and Polynomials; \item Stirling Numbers; \item Binomial Coefficients; \item Pochhammer notation; \item The Gamma function; \item The Psi function and its derivatives; \item The Riemann Zeta function; \item The Bessel functions J and Y of the first and second kinds; \item The modified Bessel functions I and K; \item The Hankel functions H1 and H2; \item The Kummer hypergeometric functions M and U; \item The Beta function, and Struve, Lommel and Whittaker functions; \item The Airy functions; \item The Exponential Integral, the Sine and Cosine Integrals; \item The Hyperbolic Sine and Cosine Integrals; \item The Fresnel Integrals and the Error function; \item The Dilog function; \item The Polylogarithm and Lerch Phi function; \item Hermite Polynomials; \item Jacobi Polynomials; \item Legendre Polynomials; \item Associated Legendre Functions (Spherical and Solid Harmonics); \item Laguerre Polynomials; \item Chebyshev Polynomials; \item Gegenbauer Polynomials; \item Lambert's $\omega$ function; \item Jacobi Elliptic Functions and Integrals; \item 3j symbols, 6j symbols and Clebsch Gordan coefficients; \item and some well-known constants. \end{itemize} \section{Simplification and Approximation} All of the operators supported by this package have certain algebraic simplification rules to handle special cases, poles, derivatives and so on. Such rules are applied whenever they are appropriate. However, if the {\tt ROUNDED} switch is on, numeric evaluation is also carried out. Unless otherwise stated below, the result of an application of a special function operator to real or complex numeric arguments in rounded mode will be approximated numerically whenever it is possible to do so. All approximations are to the current precision. \section{Constants} \ttindex{Euler\_Gamma}\ttindex{Khinchin}\ttindex{Golden\_Ratio} \ttindex{Catalan} Some well-known constants are defined in the special function package. Important properties of these constants which can be used to define them are also known. Numerical values are computed at arbitrary precision if the switch ROUNDED is on. \begin{itemize} \item Euler\_Gamma : Euler's constants, also available as -$\psi(1)$; \item Catalan : Catalan's constant; \item Khinchin : Khinchin's constant; \item Golden\_Ratio : $\frac{1 + \sqrt{5}}{2}$ \end{itemize} \section{Functions} The functions provided by this package are given in the following tables. %%\index{Spherical and Solid Harmonics}\ttindex{SphericalHarmonicY} %%\ttindex{SolidHarmonicY} %%\ttindex{Jacobiamplitude} %%\ttindex{JacobiZeta} \begin{center} \fbox{ \begin{tabular}{r l}\\ Function & Operator \\\\ %\hline $\left( { n \atop m } \right)$ & {\tt Binomial(n,m)}\ttindex{Binomial}\index{Binomial coefficients} \\ Motzkin($n$) & {\tt Motzkin(n)}\ttindex{Motzkin}\index{Motzkin} \\ Bernoulli($n$) or $ B_n $ & {\tt Bernoulli(n)}\ttindex{Bernoulli}\index{Bernoulli numbers} \\ Euler($n$) or $ E_n $ & {\tt Euler(n)}\ttindex{Euler}\index{Euler polynomials} \\ Fibonacci($n$) or $ F_n $ & {\tt Fibonacci(n)}\ttindex{Fibonacci}\index{Fibonacci} \\ $S_n^{(m)}$ & {\tt Stirling1(n,m)}\ttindex{Stirling1}\index{Stirling numbers} \\ ${\bf S}_n^{(m)}$ & {\tt Stirling2(n,m)}\ttindex{Stirling2} \\ $B(z,w)$ & {\tt Beta(z,w)}\ttindex{Beta}\index{Beta function} \\ $\Gamma(z)$ & {\tt Gamma(z)}\ttindex{Gamma}\index{Gamma function} \\ incomplete Beta $B_x(a,b)$ & {\tt iBeta(a,b,x)}\ttindex{iBeta}\index{incomplete Beta function} \\ incomplete Gamma $\Gamma(a,z)$ & {\tt iGamma(a,z)}\ttindex{iGamma}\index{incomplete Gamma function} \\ $(a)_k$ & {\tt Pochhammer(a,k)}\ttindex{Pochhammer}\index{Pochhammer's symbol} \\ $\psi(z)$ & {\tt Psi(z)}\ttindex{Psi}\index{Psi function} \\ $\psi^{(n)}(z)$ & {\tt Polygamma(n,z)}\ttindex{Polygamma}\index{Polygamma functions} \\ Riemann's $\zeta(z)$ & {\tt Zeta(z)}\ttindex{Zeta}\index{Zeta function (Riemann's)} \\ $J_\nu(z)$ & {\tt BesselJ(nu,z)}\ttindex{BesselJ}\index{Bessel functions}\\ $Y_\nu(z)$ & {\tt BesselY(nu,z)}\ttindex{BesselY}\\ $I_\nu(z)$ & {\tt BesselI(nu,z)}\ttindex{BesselI}\\ $K_\nu(z)$ & {\tt BesselK(nu,z)}\ttindex{BesselK}\\ $H^{(1)}_\nu(z)$ & {\tt Hankel1(nu,z)}\ttindex{Hankel1}\index{Hankel functions}\\ $H^{(2)}_\nu(z)$ & {\tt Hankel2(nu,z)}\ttindex{Hankel2}\\ $B(z,w)$ & {\tt Beta(z,w)}\ttindex{Beta}\index{Beta function}\\ \end{tabular}} \end{center} \begin{center} \fbox{ \begin{tabular}{r l}\\ Function & Operator \\\\ %\hline ${\bf H}_{\nu}(z)$ & {\tt StruveH(nu,z)}\ttindex{StruveH}\index{Struve functions}\\ ${\bf L}_{\nu}(z)$ & {\tt StruveL(nu,z)}\ttindex{StruveL}\\ $s_{a,b}(z)$ & {\tt Lommel1(a,b,z)}\ttindex{Lommel1}\index{Lommel functions}\\ $S_{a,b}(z)$ & {\tt Lommel2(a,b,z)}\ttindex{Lommel2}\\ $Ai(z)$ & {\tt Airy\_Ai(z)}\ttindex{Airy\_Ai}\index{Airy functions}\\ $Bi(z)$ & {\tt Airy\_Bi(z)}\ttindex{Airy\_Bi}\\ $Ai'(z)$ & {\tt Airy\_Aiprime(z)}\ttindex{Airy\_Aiprime}\\ $Bi'(z)$ & {\tt Airy\_Biprime(z)}\ttindex{Airy\_Biprime}\\ $M(a, b, z)$ or $_1F_1(a, b; z)$ or $\Phi(a, b; z)$ & {\tt KummerM(a,b,z)}\ttindex{KummerM}\index{Kummer functions} \\ $U(a, b, z)$ or $z^{-a}{_2F_0(a, b; z)}$ or $\Psi(a, b; z)$ & {\tt KummerU(a,b,z)}\ttindex{KummerU}\\ $M_{\kappa,\mu}(z)$ & {\tt WhittakerM(kappa,mu,z)}\ttindex{WhittakerM}\index{Whittaker functions}\\ $W_{\kappa,\mu}(z)$ & {\tt WhittakerW(kappa,mu,z)}\ttindex{WhittakerW}\\ $B_n(x)$ & {\tt BernoulliP(n,x)}\ttindex{BernoulliP}\index{Bernoulli polynomials} \\ $E_n(x)$ & {\tt EulerP(n,x)}\ttindex{EulerP} \\ Fibonacci Polynomials $F_n(x)$ & {\tt FibonacciP(n,x)}\ttindex{FibonacciP}\index{Fibonacci polynomials} \\ $C_n^{(\alpha)}(x)$ & {\tt GegenbauerP(n,alpha,x)}\ttindex{GegenbauerP}\index{Gegenbauer polynomials}\\ $H_n(x)$ & {\tt HermiteP(n,x)}\ttindex{HermiteP}\index{Hermite polynomials} \\ $L_n(x)$ & {\tt LaguerreP(n,x)}\ttindex{LaguerreP}\index{Laguerre polynomials}\\ $L_n^{(m)}(x)$ & {\tt LaguerreP(n,m,x)}\ttindex{LaguerreP}\\ $P_n(x)$ & {\tt LegendreP(n,x)}\ttindex{LegendreP}\index{Legendre polynomials}\\ $P_n^{(m)}(x)$ & {\tt LegendreP(n,m,x)}\ttindex{LegendreP}\\ $P_n^{(\alpha,\beta)} (x)$ & {\tt JacobiP(n,alpha,beta,x)}\ttindex{JacobiP}\index{Jacobi's polynomials} \\ $U_n(x)$ & {\tt ChebyshevU(n,x)}\ttindex{ChebyshevU}\index{Chebyshev polynomials} \\ $T_n(x)$ & {\tt ChebyshevT(n,x)}\ttindex{ChebyshevT}\\ \end{tabular}} \end{center} \begin{center} \fbox{ \begin{tabular}{r l}\\ Function & Operator \\\\ %\hline $Y_n^{m}(x,y,z,r2)$ & {\tt SolidHarmonicY(n,m,x,y,z,r2)}\ttindex{SolidHarmonicY}\\ $Y_n^{m}(\theta,\phi)$ & {\tt SphericalHarmonicY(n,m,theta,phi)}\ttindex{SphericalHarmonicY}\\ $\left( {j_1 \atop m_1} {j_2 \atop m_2} {j_3 \atop m_3} \right)$ & {\tt ThreeJSymbol(\{j1,m1\},\{j2,m2\},\{j3,m3\})}\ttindex{ThreeJSymbol}\index{3j and 6j symbols}\\ $\left( {j_1m_1j_2m_2 | j_1j_2j_3 - m_3} \right)$ & {\tt Clebsch\_Gordan(\{j1,m1\},\{j2,m2\},\{j3,m3\})}\ttindex{Clebsch\_Gordan}\index{Clebsch Gordan coefficients}\\ $\left\{ {j_1 \atop l_1} {j_2 \atop l_2} {j_3 \atop l_3} \right\}$ & {\tt SixJSymbol(\{j1,j2,j3\},\{l1,l2,l3\})}\ttindex{SixJSymbol}\\ \end{tabular}} \end{center} \begin{center} \fbox{ \begin{tabular}{r l}\\ Function & Operator \\\\ %\hline $Si(z)$ & {\tt Si(z) }\ttindex{Si}\\ $si(z)$ & {\tt s\_i(z) }\ttindex{s\_i}\\ $Ci(z)$ & {\tt Ci(z) }\ttindex{Ci}\\ $Shi(z)$ & {\tt Shi(z) }\ttindex{Shi}\\ $Chi(z)$ & {\tt Chi(z) }\ttindex{Chi}\\ $erf(z)$ & {\tt erf(z) }\ttindex{erf}\\ $erfc(z)$ & {\tt erfc(z) }\ttindex{erfc}\\ $Ei(z)$ & {\tt Ei(z) }\ttindex{Ei}\\ $li(z)$ & {\tt li(z) }\ttindex{li}\\ $C(x)$ & {\tt Fresnel\_C(x)}\ttindex{Fresnel\_C} \\ $S(x)$ & {\tt Fresnel\_S(x)}\ttindex{Fresnel\_S} \\ \\ $dilog(z)$ & {\tt dilog(z)}\ttindex{dilog}\index{Dilogarithm function} \\ $Li_n(z)$ & {\tt Polylog(n,z)}\ttindex{Polylog}\index{Polylogarithm function} \\ Lerch $\Phi(z,s,a)$ & {\tt Lerch\_Phi(z,s,a)}\ttindex{Lerch\_Phi}\index{Lerch Phi function} \\ \\ $sn(u|m)$ & {\tt Jacobisn(u,m)}\ttindex{Jacobisn}\index{Jacobi Elliptic Functions and {Integrals}}\\ $dn(u|m)$ & {\tt Jacobidn(u,m)}\ttindex{Jacobidn}\\ $cn(u|m)$ & {\tt Jacobicn(u,m)}\ttindex{Jacobicn}\\ $cd(u|m)$ & {\tt Jacobicd(u,m)}\ttindex{Jacobicd}\\ $sd(u|m)$ & {\tt Jacobisd(u,m)}\ttindex{Jacobisd}\\ $nd(u|m)$ & {\tt Jacobind(u,m)}\ttindex{Jacobind}\\ $dc(u|m)$ & {\tt Jacobidc(u,m)}\ttindex{Jacobidc}\\ $nc(u|m)$ & {\tt Jacobinc(u,m)}\ttindex{Jacobinc}\\ $sc(u|m)$ & {\tt Jacobisc(u,m)}\ttindex{Jacobisc}\\ $ns(u|m)$ & {\tt Jacobins(u,m)}\ttindex{Jacobins}\\ $ds(u|m)$ & {\tt Jacobids(u,m)}\ttindex{Jacobids}\\ $cs(u|m)$ & {\tt Jacobics(u,m)}\ttindex{Jacobics}\\ $F(\phi|m)$ & {\tt EllipticF(phi,m)}\ttindex{EllipticF}\\ $K(m)$ & {\tt EllipticK(m)}\ttindex{EllipticK}\\ $E(\phi|m) or E(m)$ & {\tt EllipticE(phi,m) or}\\ ~ & {\tt EllipticE(m)}\ttindex{EllipticE}\\ $H(u|m), H_1(u|m), \Theta_1(u|m), \Theta(u|m)$ & {\tt EllipticTheta(a,u,m)}\ttindex{EllipticTheta}\\ $\theta_1(u|m), \theta_2(u|m), \theta_3(u|m), \theta_4(u|m)$ & {\tt EllipticTheta(a,u,m)}\ttindex{EllipticTheta}\\ $Z(u|m)$ & {\tt Zeta\_function(u,m)}\ttindex{Zeta\_function} \\ \\ Lambert $\omega(z)$ & {\tt Lambert\_W(z)}\ttindex{Lambert\_W}\index{Lambert $\omega$ function} \end{tabular}} \end{center} \chapter{SPECFN2: Special special functions} \label{SPECFN2} \typeout{{SPECFN2: Package for special special functions}} {\footnotesize \begin{center} Victor S. Adamchik \\ Byelorussian University \\ Minsk, Belorus \\[0.1in] and\\[0.05in] Winfried Neun \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: neun@zib.de \end{center} } \ttindex{SPECFN2} \index{Generalised Hypergeometric functions} \index{Meijer's G function} The (generalised) hypergeometric functions \begin{displaymath} _pF_q \left( {{a_1, \ldots , a_p} \atop {b_1, \ldots ,b_q}} \Bigg\vert z \right) \end{displaymath} are defined in textbooks on special functions. \section{\REDUCE{} operator HYPERGEOMETRIC} The operator {\tt hypergeometric} expects 3 arguments, namely the list of upper parameters (which may be empty), the list of lower parameters (which may be empty too), and the argument, e.g: {\small\begin{verbatim} hypergeometric ({},{},z); Z E hypergeometric ({1/2,1},{3/2},-x^2); ATAN(X) --------- X \end{verbatim}} \section{Enlarging the HYPERGEOMETRIC operator} Since hundreds of particular cases for the generalised hypergeometric functions can be found in the literature, one cannot expect that all cases are known to the {\tt hypergeometric} operator. Nevertheless the set of special cases can be augmented by adding rules to the \REDUCE{} system, {\em e.g.} {\small\begin{verbatim} let {hypergeometric({1/2,1/2},{3/2},-(~x)^2) => asinh(x)/x}; \end{verbatim}} \chapter{SUM: A package for series summation} \label{SUM} \typeout{{SUM: A package for series summation}} {\footnotesize \begin{center} Fujio Kako \\ Department of Mathematics, Faculty of Science \\ Hiroshima University \\ Hiroshima 730, JAPAN \\[0.05in] e--mail: kako@ics.nara-wu.ac.jp \end{center} } \ttindex{SUM} \index{Gosper's Algorithm}\index{SUM operator}\index{PROD operator} This package implements the Gosper algorithm for the summation of series. It defines operators SUM and PROD. The operator SUM returns the indefinite or definite summation of a given expression, and the operator PROD returns the product of the given expression. These are used with the syntax: \vspace{.1in} \noindent{\tt SUM}(EXPR:{\em expression}, K:{\em kernel}, [LOLIM:{\em expression} [, UPLIM:{\em expression}]]) \\ \noindent{\tt PROD}(EXPR:{\em expression}, K:{\em kernel}, [LOLIM:{\em expression} [, UPLIM:{\em expression}]]) If there is no closed form solution, these operators return the input unchanged. UPLIM and LOLIM are optional parameters specifying the lower limit and upper limit of the summation (or product), respectively. If UPLIM is not supplied, the upper limit is taken as K (the summation variable itself). For example: {\small\begin{verbatim} sum(n**3,n); sum(a+k*r,k,0,n-1); sum(1/((p+(k-1)*q)*(p+k*q)),k,1,n+1); prod(k/(k-2),k); \end{verbatim}} Gosper's algorithm succeeds whenever the ratio \[ \frac{\sum_{k=n_0}^n f(k)}{\sum_{k=n_0}^{n-1} f(k)} \] \noindent is a rational function of $n$. The function SUM!-SQ handles basic functions such as polynomials, rational functions and exponentials.\ttindex{SUM-SQ} The trigonometric functions sin, cos, {\em etc.\ }are converted to exponentials and then Gosper's algorithm is applied. The result is converted back into sin, cos, sinh and cosh. Summations of logarithms or products of exponentials are treated by the formula: \vspace{.1in} \hspace*{2em} \[ \sum_{k=n_0}^{n} \log f(k) = \log \prod_{k=n_0}^n f(k) \] \vspace{.1in} \hspace*{2em} \[ \prod_{k=n_0}^n \exp f(k) = \exp \sum_{k=n_0}^n f(k) \] \vspace{.1in} Other functions can be summed by providing LET rules which must relate the functions evaluated at $k$ and $k - 1$ ($k$ being the summation variable). {\small\begin{verbatim} operator f,gg; % gg used to avoid possible conflict with high energy % physics operator. for all n,m such that fixp m let f(n+m)=if m > 0 then f(n+m-1)*(b*(n+m)**2+c*(n+m)+d) else f(n+m+1)/(b*(n+m+1)**2+c*(n+m+1)+d); for all n,m such that fixp m let gg(n+m)=if m > 0 then gg(n+m-1)*(b*(n+m)**2+c*(n+m)+e) else gg(n+m+1)/(b*(n+m+1)**2+c*(n+m+1)+e); sum(f(n-1)/gg(n),n); f(n) --------------- gg(n)*(d - e) \end{verbatim}} \chapter{SUSY2: Super Symmetry} \label{SUSY2} \typeout{{SUSY2: Super Symmetry}} {\footnotesize \begin{center} Ziemowit Popowicz \\ Institute of Theoretical Physics, University of Wroclaw\\ pl. M. Borna 9 50-205 Wroclaw, Poland \\ e-mail: ziemek@ift.uni.wroc.pl \end{center} } \ttindex{SUSY2} This package deals with supersymmetric functions and with algebra of supersymmetric operators in the extended N=2 as well as in the nonextended N=1 supersymmetry. It allows us to make the realization of SuSy algebra of differential operators, compute the gradients of given SuSy Hamiltonians and to obtain SuSy version of soliton equations using the SuSy Lax approach. There are also many additional procedures encountered in the SuSy soliton approach, as for example: conjugation of a given SuSy operator, computation of general form of SuSy Hamiltonians (up to SuSy-divergence equivalence), checking of the validity of the Jacobi identity for some SuSy Hamiltonian operators. To load the package, type \quad {\tt load susy2;} \\ \\ For full explanation and further examples, please refer to the detailed documentation and the susy2.tst which comes with this package. \section{Operators} \subsection{Operators for constructing Objects} The superfunctions are represented in this package by \f{BOS}(f,n,m) for superbosons and \f{FER}(f,n,m) for superfermions. The first index denotes the name of the given superobject, the second denotes the value of SuSy derivatives, and the last gives the value of usual derivative. \\ In addition to the definitions of the superfunctions, also the inverse and the exponential of superbosons can be defined (where the inverse is defined as \f{BOS}(f,n,m,-1) with the property {\it bos(f,n,m,-1)*bos(f,n,m,1)=1}). The exponential of the superboson function is \f{AXP}(\f{BOS}(f,0,0)). \\ The operator \f{FUN} and \f{GRAS} denote the classical and the Grassmann function. \\ Three different realizations of supersymmetric derivatives are implemented. To select traditional realization declare \f{LET TRAD}. In order to select chiral or chiral1 algebra declare \f{LET CHIRAL} or \f{LET CHIRAL1}. For usual differentiation the operator \f{D}(1) stands for right and \f{D}(2) for left differentiation. SuSy derivatives are denoted as {\it der} and {\it del}. \f{DER} and \f{DEL} are one component argument operations and represent the left and right operators. The action of these operators on the superfunctions depends on the choice of the supersymmetry algebra. \flushleft {\small\begin{center} \begin{tabular}{ l l l l l l} \f{BOS}(f,n,m)\ttindex{BOS} & \f{BOS}(f,n,m,k)\ttindex{BOS} & \f{FER}(f,n,m)\ttindex{FER} & \f{AXP}(f)\ttindex{AXP} & \f{FUN}(f,n)\ttindex{FUN} & \f{FUN}(f,n,m)\ttindex{FUN} \cr \f{GRAS}(f,n)\ttindex{GRAS} & \f{AXX}(f)\ttindex{AXX} & \f{D}(1)\ttindex{D} & \f{D}(2)\ttindex{D} & \f{D}(3)\ttindex{D} & \f{D}(-1)\ttindex{D} \cr \f{D}(-2)\ttindex{D} & \f{D}(-3)\ttindex{D} & \f{D}(-4)\ttindex{D} & \f{DR}(-n)\ttindex{DR} & \f{DER}(1)\ttindex{DER} & \f{DER}(2)\ttindex{DER} \cr \f{DEL}(1)\ttindex{DEL} & \f{DEL}(2)\ttindex{DEL} \end{tabular} \end{center} } \vspace{1cm} {\bf Example}: {\small\begin{verbatim} 1: load susy2; 2: bos(f,0,2,-2)*axp(fer(k,1,2))*del(1); %first susy derivative 2*fer(f,1,2)*bos(f,0,2,-3)*axp(fer(k,1,2)) - bos(k,0,3)*bos(f,0,2,-2)*axp(fer(k,1,2)) + del(1)*bos(f,0,2,-2)*axp(fer(k,1,2)) 3: sub(del=der,ws); bos(f,0,2,-2)*axp(fer(k,1,2))*der(1) \end{verbatim}} \subsection{Commands} There are plenty of operators on superfunction objects. Some of them are introduced here briefly. \begin{itemize} \item By using the operators \f{FPART}, \f{BPART}, \f{BFPART} and \f{BF\_PART} it is possible to compute the coordinates of the arbitrary SuSy expressions. \item With \f{W\_COMB}, \f{FCOMB} and \f{PSE\_ELE} there are three operators to be able to construct different possible combinations of superfunctions and super-pseudo-differential elements with the given conformal dimensions . \item The three operators \f{S\_PART}, \f{D\_PART} and \f{SD\_PART} are implemented to obtain the components of the (pseudo)-SuSy element. \item \f{RZUT} is used to obtain the projection onto the invariant subspace (with respect to commutator) of algebra of pseudo-SuSy-differential algebra. \item To obtain the list of the same combinations of some superfunctions and (SuSy) derivatives from some given operator-valued expression, the operators \f{LYST}, \f{LYST1} and \f{LYST2} are constructed. \end{itemize} \begin{center} \begin{tabular}{ l l} \f{FPART}(expression)\ttindex{FPART} & \f{BPART}(expression)\ttindex{BPART} \cr \f{BF\_PART}(expression,n)\ttindex{BF\_PART} & \f{B\_PART}(expression,n)\ttindex{B\_PART} \cr \f{PR}(n,expression)\ttindex{PR} & \f{PG}(n,expression)\ttindex{PG} \cr \f{W\_COMB}(\{\{f,n,x\},...\},m,z,y)\ttindex{W\_COMB} & \f{FCOMB}(\{\{f,n,x\},...\},m,z,y)\ttindex{FCOMB} \cr \f{PSE\_ELE}(n,\{\{f,n\},...\},z)\ttindex{PSE\_ELE} \cr \f{S\_PART}(expression,n)\ttindex{S\_PART} & \f{D\_PART}(expression,n)\ttindex{D\_PART} \cr \f{SD\_PART}(expression,n,m)\ttindex{SD\_PART} & \f{CP}(expression)\ttindex{CP} \cr \f{RZUT}(expression,n)\ttindex{RZUT} & \f{LYST}(expression)\ttindex{LYST} \cr \f{LYST1}(expression)\ttindex{LYST1} & \f{LYST2}(expression)\ttindex{LYST2} \cr \f{CHAN}(expression)\ttindex{CHAN} & \f{ODWA}(expression)\ttindex{ODWA} \cr \f{GRA}(expression,f)\ttindex{GRA} & \f{DYW}(expression,f)\ttindex{DYW} \cr \f{WAR}(expression,f)\ttindex{WAR} & \f{DOT\_HAM}(equations,expression)\ttindex{DOT\_HAM} \cr \f{N\_GAT}(operator,list)\ttindex{N\_GAT} & \f{FJACOB}(operator,list)\ttindex{FJACOB} \cr \f{JACOB}(operator,list,\{$\alpha,\beta,\gamma$\})\ttindex{JACOB} & \f{MACIERZ}(expression,x,y)\ttindex{MACIERZ} \cr \f{S\_INT}(number,expression,list)\ttindex{S\_INT} \end{tabular} \end{center} \vspace{1cm} {\bf Example}: {\small\begin{verbatim} 4: xxx:=fer(f,2,3); xxx := fer(f,2,3) 5: fpart(xxx); % all components - fun(f0,4) + 2*fun(f1,3) gras(ff2,4) {gras(ff2,3), ----------------------------,0, -------------} 2 2 6: bpart(xxx); % bosonic sector - fun(f0,4) + 2*fun(f1,3) {0,----------------------------,0,0} 2 9: b_part(xxx,1); %the given component in the bosonic sector - fun(f0,4) + 2*fun(f1,3) ---------------------------- 2 \end{verbatim}} \section{Options} The are several options defined in this package. Please note that they are activated by typing \f{let $<$option$>$}. See also above. \\ The \f{TRAD}, \f{CHIRAL} and \f{CHIRAL1} select the different realizations of the supersymmetric derivatives. By default traditional algebra is selected. \\ If the command {\tt LET INVERSE} is used, then three indices {\it bos} objects are transformed onto four indices objects. \begin{center} \begin{tabular}{ l l l l l l } \f{TRAD}\ttindex{TRAD} & \f{CHIRAL}\ttindex{CHIRAL} & \f{CHIRAL1}\ttindex{CHIRAL1} & \f{INVERSE}\ttindex{INVERSE} & \f{DRR}\ttindex{DRR} & \f{NODRR}\ttindex{NODRR} \end{tabular} \end{center} \vspace{1cm} {\bf Example}: {\small\begin{verbatim} 10: let inverse; 11: bos(f,0,3)**3*bos(k,3,1)**40*bos(f,0,3,-2); bos(k,3,1,40)*bos(f,0,3,1); 12: clearrules inverse; 13: xxx:=fer(f,1,2)*bos(k,0,2,-2); xxx := fer(f,1,2)*bos(k,0,2,-2) 14: pr(1,xxx); % first susy derivative - 2*fer(k,1,2)*fer(f,1,2)*bos(k,0,2,-3) + bos(k,0,2,-2)*bos(f,0,3) 15: pr(2,xxx); %second susy derivative - 2*fer(k,2,2)*fer(f,1,2)*bos(k,0,2,-3) - bos(k,0,2,-2)*bos(f,3,2) 16: clearrules trad; 17: let chiral; % changing to chiral algebra 18: pr(1,xxx); - 2*fer(k,1,2)*fer(f,1,2)*bos(k,0,2,-3) \end{verbatim}} \chapter{SYMMETRY: Symmetric matrices} \label{SYMMETRY} \typeout{{SYMMETRY: Operations on symmetric matrices}} {\footnotesize \begin{center} Karin Gatermann\\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: gatermann@zib.de \end{center} } \ttindex{SYMMETRY} The SYMMETRY package provides procedures that compute symmetry-adapted bases and block diagonal forms of matrices which have the symmetry of a group. \section{Operators for linear representations} The data structure for a linear representation, a {\em representation}, is a list consisting of the group identifier and equations which assign matrices to the generators of the group. {\bf Example:} {\small\begin{verbatim} rr:=mat((0,1,0,0), (0,0,1,0), (0,0,0,1), (1,0,0,0)); sp:=mat((0,1,0,0), (1,0,0,0), (0,0,0,1), (0,0,1,0)); representation:={D4,rD4=rr,sD4=sp}; \end{verbatim}} For orthogonal (unitarian) representations the following operators are available. {\tt canonicaldecomposition(representation);}\ttindex{canonicaldecomposition} returns an equation giving the canonical decomposition of the linear representation. {\tt character(representation);}\ttindex{character} computes the character of the linear representation. The result is a list of the group identifier and of lists consisting of a list of group elements in one equivalence class and a real or complex number. {\tt symmetrybasis(representation,nr);}\ttindex{symmetrybasis} computes the basis of the isotypic component corresponding to the irreducible representation of type nr. If the nr-th irreducible representation is multidimensional, the basis is symmetry adapted. The output is a matrix. {\tt symmetrybasispart(representation,nr);}\ttindex{symmetrybasispart} is similar as {\tt symmetrybasis}, but for multidimensional irreducible representations only the first part of the symmetry adapted basis is computed. {\tt allsymmetrybases(representation);}\ttindex{allsymmetrybases} is similar as {\tt symmetrybasis} and {\tt symmetrybasispart}, but the bases of all isotypic components are computed and thus a complete coordinate transformation is returned. {\tt diagonalize(matrix,representation);}\ttindex{diagonalize} returns the block diagonal form of matrix which has the symmetry of the given linear representation. Otherwise an error message occurs. \section{Display Operators} Access is provided to the information for a group, and for adding knowledge for other groups. This is explained in detail in the Symmetry on-line documentation. \chapter{TAYLOR: Manipulation of Taylor series} \label{TAYLOR} \typeout{{TAYLOR: Manipulation of Taylor series}} {\footnotesize \begin{center} Rainer Sch\"opf\\ Zentrum f\"ur Datenverarbeitung der Universit\"at Mainz\\ Anselm-Franz-von-Bentzel-Weg~12\\ D-55055 Mainz, Germany \\[0.05in] e--mail: Schoepf@Uni-Mainz.DE \end{center} } \ttindex{TAYLOR}\index{Taylor Series}\index{TAYLOR package} \index{Laurent series} The TAYLOR package of \REDUCE\ allow Taylor expansion in one or several variables, and efficient manipulation of the resulting Taylor series. Capabilities include basic operations (addition, subtraction, multiplication and division), and also application of certain algebraic and transcendental functions. To a certain extent, Laurent and Puiseux expansions can be performed as well. In many cases, separable singularities are detected and factored out. \noindent {\tt TAYLOR}(EXP:{\em exprn}[,VAR:{\em kernel}, VAR$_0$:{\em exprn},ORDER:{\em integer}]\ldots):{\em exprn} where EXP is the expression to be expanded. It can be any \REDUCE\ object, even an expression containing other Taylor kernels. VAR is the kernel with respect to which EXP is to be expanded. VAR$_0$ denotes the point about which and ORDER the order up to which expansion is to take place. If more than one (VAR, VAR0, ORDER) triple is specified {\tt TAYLOR} will expand its first argument independently with respect to each variable in turn. For example, {\small\begin{verbatim} taylor(e^(x^2+y^2),x,0,2,y,0,2); \end{verbatim}} will calculate the Taylor expansion up to order $X^{2}*Y^{2}$. Note that once the expansion has been done it is not possible to calculate higher orders. Instead of a kernel, VAR may also be a list of kernels. In this case expansion will take place in a way so that the {\em sum\/} of the degrees of the kernels does not exceed ORDER. If VAR$_0$ evaluates to the special identifier \verb|INFINITY| {\tt TAYLOR} tries to expand EXP in a series in 1/VAR. The expansion is performed variable per variable, {\em i.e.\ }in the example above by first expanding $\exp(x^{2}+y^{2})$ with respect to $x$ and then expanding every coefficient with respect to $y$. \index{IMPLICIT\_TAYLOR operator}\index{INVERSE\_TAYLOR} There are two extra operators to compute the Taylor expansions of implicit and inverse functions: \noindent {\tt IMPLICIT\_TAYLOR}(F:{\em exprn},VAR1,VAR2:{\em kernel},\\ \hphantom{{\tt IMPLICIT\_TAYLOR}(}VAR1$_0$,VAR2$_0$:{\em exprn}, ORDER:{\em integer}):{\em exprn} takes a function F depending on two variables VAR1 and VAR2 and computes the Taylor series of the implicit function VAR2(VAR1) given by the equation F(VAR1,VAR2) = 0. For example, {\small\begin{verbatim} implicit_taylor(x^2 + y^2 - 1,x,y,0,1,5); \end{verbatim}} \noindent {\tt INVERSE\_TAYLOR}(F:{\em exprn},VAR1,VAR2:{\em kernel},\\ \hphantom{{\tt INVERSE\_TAYLOR}(}VAR1$_0$:{\em exprn}, ORDER:{\em integer}):{\em exprn} takes a function F depending on VAR1 and computes the Taylor series of the inverse of F with respect to VAR2. For example, {\small\begin{verbatim} inverse_taylor(exp(x)-1,x,y,0,8); \end{verbatim}} \index{TAYLORPRINTTERMS variable} When a Taylor kernel is printed, only a certain number of (non-zero) coefficients are shown. If there are more, an expression of the form \verb|(|$n$\verb| terms)| is printed to indicate how many non-zero terms have been suppressed. The number of terms printed is given by the value of the shared algebraic variable \verb|TAYLORPRINTTERMS|. Allowed values are integers and the special identifier \verb|ALL|. The latter setting specifies that all terms are to be printed. The default setting is $5$. \index{TAYLORKEEPORIGINAL switch} If the switch \verb|TAYLORKEEPORIGINAL| is set to \verb|ON| the original expression EXP is kept for later reference. It can be recovered by means of the operator \hspace*{2em} {\tt TAYLORORIGINAL}(EXP:{\em exprn}):{\em exprn} An error is signalled if EXP is not a Taylor kernel or if the original expression was not kept, {\em i.e.\ }if \verb|TAYLORKEEPORIGINAL| was \verb|OFF| during expansion. The template of a Taylor kernel, {\em i.e.\ } the list of all variables with respect to which expansion took place together with expansion point and order can be extracted using \ttindex{TAYLORTEMPLATE} \hspace*{2em} {\tt TAYLORTEMPLATE}(EXP:{\em exprn}):{\em list} This returns a list of lists with the three elements (VAR,VAR0,ORDER). As with \verb|TAYLORORIGINAL|, an error is signalled if EXP is not a Taylor kernel. \hspace*{2em} {\tt TAYLORTOSTANDARD}(EXP:{\em exprn}):{\em exprn} converts all Taylor kernels in EXP into standard form and \ttindex{TAYLORTOSTANDARD} resimplifies the result. \hspace*{2em} {\tt TAYLORSERIESP}(EXP:{\em exprn}):{\em boolean} may be used to determine if EXP is a Taylor kernel. \ttindex{TAYLORSERIESP} Note that this operator is subject to the same restrictions as, {\em e.g.}, ORDP or NUMBERP, {\em i.e.\ }it may only be used in boolean expressions in \verb|IF| or \verb|LET| statements. Finally there is \hspace*{2em} {\tt TAYLORCOMBINE}(EXP:{\em exprn}):{\em exprn} which tries to combine all Taylor kernels found in EXP into one. \ttindex{TAYLORCOMBINE} Operations currently possible are: \index{Taylor series ! arithmetic} \begin{itemize} \item Addition, subtraction, multiplication, and division. \item Roots, exponentials, and logarithms. \item Trigonometric and hyperbolic functions and their inverses. \end{itemize} Application of unary operators like \verb|LOG| and \verb|ATAN| will nearly always succeed. For binary operations their arguments have to be Taylor kernels with the same template. This means that the expansion variable and the expansion point must match. Expansion order is not so important, different order usually means that one of them is truncated before doing the operation. \ttindex{TAYLORKEEPORIGINAL}\ttindex{TAYLORCOMBINE} If \verb|TAYLORKEEPORIGINAL| is set to \verb|ON| and if all Taylor kernels in \verb|exp| have their original expressions kept \verb|TAYLORCOMBINE| will also combine these and store the result as the original expression of the resulting Taylor kernel. \index{TAYLORAUTOEXPAND switch} There is also the switch \verb|TAYLORAUTOEXPAND| (see below). There are a few restrictions to avoid mathematically undefined expressions: it is not possible to take the logarithm of a Taylor kernel which has no terms ({\em i.e.\ }is zero), or to divide by such a beast. There are some provisions made to detect singularities during expansion: poles that arise because the denominator has zeros at the expansion point are detected and properly treated, {\em i.e.\ }the Taylor kernel will start with a negative power. (This is accomplished by expanding numerator and denominator separately and combining the results.) Essential singularities of the known functions (see above) are handled correctly. \index{Taylor series ! differentiation} Differentiation of a Taylor expression is possible. Differentiating with respect to one of the Taylor variables will decrease the order by one. \index{Taylor series ! substitution} Substitution is a bit restricted: Taylor variables can only be replaced by other kernels. There is one exception to this rule: one can always substitute a Taylor variable by an expression that evaluates to a constant. Note that \REDUCE\ will not always be able to determine that an expression is constant. \index{Taylor series ! integration} Only simple Taylor kernels can be integrated. More complicated expressions that contain Taylor kernels as parts of themselves are automatically converted into a standard representation by means of the TAYLORTOSTANDARD operator. In this case a suitable warning is printed. \index{Taylor series ! reversion} It is possible to revert a Taylor series of a function $f$, {\em i.e.}, to compute the first terms of the expansion of the inverse of $f$ from the expansion of $f$. This is done by the operator \hspace*{2em} {\tt TAYLORREVERT}(EXP:{\em exprn},OLDVAR:{\em kernel}, NEWVAR:{\em kernel}):{\em exprn} EXP must evaluate to a Taylor kernel with OLDVAR being one of its expansion variables. Example: {\small\begin{verbatim} taylor (u - u**2, u, 0, 5); taylorrevert (ws, u, x); \end{verbatim}} This package introduces a number of new switches: \begin{itemize} \index{TAYLORAUTOCOMBINE switch} \item If \verb|TAYLORAUTOCOMBINE| is set to \verb|ON| \REDUCE\ automatically combines Taylor expressions during the simplification process. This is equivalent to applying \verb|TAYLORCOMBINE| to every expression that contains Taylor kernels. Default is \verb|ON|. \index{TAYLORAUTOEXPAND switch} \item \verb|TAYLORAUTOEXPAND| makes Taylor expressions ``contagious'' in the sense that \verb|TAYLORCOMBINE| tries to Taylor expand all non-Taylor subexpressions and to combine the result with the rest. Default is \verb|OFF|. \index{TAYLORKEEPORIGINAL switch} \item \verb|TAYLORKEEPORIGINAL|, if set to \verb|ON|, forces the package to keep the original expression, {\em i.e.\ }the expression that was Taylor expanded. All operations performed on the Taylor kernels are also applied to this expression which can be recovered using the operator \verb|TAYLORORIGINAL|. Default is \verb|OFF|. \index{TAYLORPRINTORDER switch} \item \verb|TAYLORPRINTORDER|, if set to \verb|ON|, causes the remainder to be printed in big-$O$ notation. Otherwise, three dots are printed. Default is \verb|ON|. \end{itemize} \chapter{TPS: A truncated power series package} \label{TPS} \typeout{{TPS: A truncated power series package}} {\footnotesize \begin{center} Alan Barnes \\ Dept. of Computer Science and Applied Mathematics \\ Aston University, Aston Triangle, \\ Birmingham B4 7ET, England \\[0.05in] e--mail: barnesa@aston.ac.uk \\[0.1in] and \\[0.1in] Julian Padget \\ School of Mathematics, University of Bath \\ Bath, BA2 7AY, England \\[0.05in] e--mail: jap@maths.bath.ac.uk \end{center} } \ttindex{TPS}\ttindex{PS} \index{power series}\index{truncated power series} \index{Laurent series expansions} This package implements formal Laurent series expansions in one variable using the domain mechanism of \REDUCE. This means that power series objects can be added, multiplied, differentiated {\em etc}. like other first class objects in the system. A lazy evaluation scheme is used in the package and thus terms of the series are not evaluated until they are required for printing or for use in calculating terms in other power series. The series are extendible giving the user the impression that the full infinite series is being manipulated. The errors that can sometimes occur using series that are truncated at some fixed depth (for example when a term in the required series depends on terms of an intermediate series beyond the truncation depth) are thus avoided. \newpage \section{Basic Truncated Power Series} \subsection{PS Operator} Syntax: \noindent{\tt PS}(EXPRN:{\em algebraic},DEPVAR:{\em kernel},ABOUT:{\em algebraic}):{\em ps object} \index{PS operator} The {\tt PS} operator returns a power series object representing the univariate formal power series expansion of EXPRN with respect to the dependent variable DEPVAR about the expansion point ABOUT. EXPRN may itself contain power series objects. The algebraic expression ABOUT should simplify to an expression which is independent of the dependent variable DEPVAR, otherwise an error will result. If ABOUT is the identifier {\tt INFINITY} then the power series expansion about DEPVAR = $\infty$ is obtained in ascending powers of 1/DEPVAR. \index{PSEXPLIM operator} The power series object representing EXPRN is compiled and then a number of terms of the power series expansion are evaluated. The expansion is carried out as far as the value specified by {\tt PSEXPLIM}. If, subsequently, the value of {\tt PSEXPLIM} is increased, sufficient information is stored in the power series object to enable the additional terms to be calculated without recalculating the terms already obtained. If the function has a pole at the expansion point then the correct Laurent series expansion will be produced. \noindent The following examples are valid uses of {\tt PS}: {\small\begin{verbatim} psexplim 6; ps(log x,x,1); ps(e**(sin x),x,0); ps(x/(1+x),x,infinity); ps(sin x/(1-cos x),x,0); \end{verbatim}} \index{power series ! of user defined function} New user-defined functions may be expanded provided the user provides LET rules giving \begin{enumerate} \item the value of the function at the expansion point \item a differentiation rule for the new function. \end{enumerate} \noindent For example {\small\begin{verbatim} operator sech; forall x let df(sech x,x)= - sech x * tanh x; let sech 0 = 1; ps(sech(x**2),x,0); \end{verbatim}} \index{power series ! of integral} The power series expansion of an integral may also be obtained (even if \REDUCE\ cannot evaluate the integral in closed form). An example of this is {\small\begin{verbatim} ps(int(e**x/x,x),x,1); \end{verbatim}} Note that if the integration variable is the same as the expansion variable then \REDUCE's integration package is not called; if on the other hand the two variables are different then the integrator is called to integrate each of the coefficients in the power series expansion of the integrand. The constant of integration is zero by default. If another value is desired, then the shared variable {\tt PSINTCONST} should be set to required value.\index{PSINTCONST (shared)} \subsection{PSORDLIM Operator} \index{PSORDLIM operator} Syntax: \hspace*{2em} {\tt PSORDLIM}(UPTO:{\em integer}):{\em integer} \hspace*{4em} or \hspace*{2em} {\tt PSORDLIM}():{\em integer} An internal variable is set to the value of {\tt UPTO} (which should evaluate to an integer). The value returned is the previous value of the variable. The default value is 15. If {\tt PSORDLIM} is called with no argument, the current value is returned. The significance of this control is that the system attempts to find the order of the power series required, that is the order is the degree of the first non-zero term in the power series. If the order is greater than the value of this variable an error message is given and the computation aborts. This prevents infinite loops in examples such as {\small\begin{verbatim} ps(1 - (sin x)**2 - (cos x)**2,x,0); \end{verbatim}} where the expression being expanded is identically zero, but is not recognised as such by \REDUCE. \section{Controlling Power Series} \subsection{PSTERM Operator} \index{PSTERM operator} Syntax: \hspace*{2em} {\tt PSTERM}(TPS:{\em power series object},NTH:{\em integer}):{\em algebraic} The operator {\tt PSTERM} returns the NTH term of the existing power series object TPS. If NTH does not evaluate to an integer or TPS to a power series object an error results. It should be noted that an integer is treated as a power series. \subsection{PSORDER Operator} \index{PSORDER operator} Syntax: \hspace*{2em} {\tt PSORDER}(TPS:{\em power series object}):{\em integer} The operator {\tt PSORDER} returns the order, that is the degree of the first non-zero term, of the power series object TPS. TPS should evaluate to a power series object or an error results. If TPS is zero, the identifier {\tt UNDEFINED} is returned. \subsection{PSSETORDER Operator} \index{PSSETORDER operator} Syntax: \hspace*{2em} {\tt PSSETORDER}(TPS:{\em power series object}, ORD:{\em integer}):{\em integer} The operator {\tt PSSETORDER} sets the order of the power series TPS to the value ORD, which should evaluate to an integer. If TPS does not evaluate to a power series object, then an error occurs. The value returned by this operator is the previous order of TPS, or 0 if the order of TPS was undefined. This operator is useful for setting the order of the power series of a function defined by a differential equation in cases where the power series package is inadequate to determine the order automatically. \subsection{PSDEPVAR Operator} \index{PSDEPVAR operator} Syntax: \hspace*{2em} {\tt PSDEPVAR}(TPS:{\em power series object}):{\em identifier} The operator {\tt PSDEPVAR} returns the expansion variable of the power series object TPS. TPS should evaluate to a power series object or an integer, otherwise an error results. If TPS is an integer, the identifier {\tt UNDEFINED} is returned. \subsection{PSEXPANSIONPT operator} \index{PSEXPANSIONPT operator} Syntax: \hspace*{2em} {\tt PSEXPANSIONPT}(TPS:{\em power series object}):{\em algebraic} The operator {\tt PSEXPANSIONPT} returns the expansion point of the power series object TPS. TPS should evaluate to a power series object or an integer, otherwise an error results. If TPS is integer, the identifier {\tt UNDEFINED} is returned. If the expansion is about infinity, the identifier {\tt INFINITY} is returned. \subsection{PSFUNCTION Operator} \index{PSFUNCTION operator} Syntax: \hspace*{2em} {\tt PSFUNCTION}(TPS:{\em power series object}):{\em algebraic} The operator {\tt PSFUNCTION} returns the function whose expansion gave rise to the power series object TPS. TPS should evaluate to a power series object or an integer, otherwise an error results. \subsection{PSCHANGEVAR Operator} \index{PSCHANGEVAR operator} Syntax: \hspace*{2em} {\tt PSCHANGEVAR}(TPS:{\em power series object}, X:{\em kernel}):{\em power series object} The operator {\tt PSCHANGEVAR} changes the dependent variable of the power series object TPS to the variable X. TPS should evaluate to a power series object and X to a kernel, otherwise an error results. Also X should not appear as a parameter in TPS. The power series with the new dependent variable is returned. \subsection{PSREVERSE Operator} \index{PSREVERSE operator} Syntax: \hspace*{2em} {\tt PSREVERSE}(TPS:{\em power series object}):{\em power series} Power series reversion. The power series TPS is functionally inverted. Four cases arise: \begin{enumerate} \item If the order of the series is 1, then the expansion point of the inverted series is 0. \item If the order is 0 {\em and} if the first order term in TPS is non-zero, then the expansion point of the inverted series is taken to be the coefficient of the zeroth order term in TPS. \item If the order is -1 the expansion point of the inverted series is the point at infinity. In all other cases a \REDUCE\ error is reported because the series cannot be inverted as a power series. Puiseux \index{Puiseux expansion} expansion would be required to handle these cases. \item If the expansion point of TPS is finite it becomes the zeroth order term in the inverted series. For expansion about 0 or the point at infinity the order of the inverted series is one. \end{enumerate} If TPS is not a power series object after evaluation an error results. \noindent Here are some examples: {\small\begin{verbatim} ps(sin x,x,0); psreverse(ws); % produces series for asin x about x=0. ps(exp x,x,0); psreverse ws; % produces series for log x about x=1. ps(sin(1/x),x,infinity); psreverse(ws); % produces series for 1/asin(x) about x=0. \end{verbatim}} \subsection{PSCOMPOSE Operator} \index{PSCOMPOSE operator} Syntax: \hspace*{2em} {\tt PSCOMPOSE}(TPS1:{\em power series}, TPS2:{\em power series}):{\em power series} \index{power series ! composition} {\tt PSCOMPOSE} performs power series composition. The power series TPS1 and TPS2 are functionally composed. That is to say that TPS2 is substituted for the expansion variable in TPS1 and the result expressed as a power series. The dependent variable and expansion point of the result coincide with those of TPS2. The following conditions apply to power series composition: \begin{enumerate} \item If the expansion point of TPS1 is 0 then the order of the TPS2 must be at least 1. \item If the expansion point of TPS1 is finite, it should coincide with the coefficient of the zeroth order term in TPS2. The order of TPS2 should also be non-negative in this case. \item If the expansion point of TPS1 is the point at infinity then the order of TPS2 must be less than or equal to -1. \end{enumerate} If these conditions do not hold the series cannot be composed (with the current algorithm terms of the inverted series would involve infinite sums) and a \REDUCE\ error occurs. \noindent Examples of power series composition include the following. {\small\begin{verbatim} a:=ps(exp y,y,0); b:=ps(sin x,x,0); pscompose(a,b); % Produces the power series expansion of exp(sin x) % about x=0. a:=ps(exp z,z,1); b:=ps(cos x,x,0); pscompose(a,b); % Produces the power series expansion of exp(cos x) % about x=0. a:=ps(cos(1/x),x,infinity); b:=ps(1/sin x,x,0); pscompose(a,b); % Produces the power series expansion of cos(sin x) % about x=0. \end{verbatim}} \subsection{PSSUM Operator} \index{PSSUM operator} Syntax: \begin{tabbing} \hspace*{2em} {\tt PSSUM}(\=J:{\em kernel} = LOWLIM:{\em integer}, COEFF:{\em algebraic}, X:{\em kernel}, \\ \> ABOUT:{\em algebraic}, POWER:{\em algebraic}):{\em power series} \end{tabbing} The formal power series sum for J from LOWLIM to {\tt INFINITY} of {\small\begin{verbatim} COEFF*(X-ABOUT)**POWER \end{verbatim}} or if ABOUT is given as {\tt INFINITY} {\small\begin{verbatim} COEFF*(1/X)**POWER \end{verbatim}} is constructed and returned. This enables power series whose general term is known to be constructed and manipulated using the other procedures of the power series package. J and X should be distinct simple kernels. The algebraics ABOUT, COEFF and POWER should not depend on the expansion variable X, similarly the algebraic ABOUT should not depend on the summation variable J. The algebraic POWER should be a strictly increasing integer valued function of J for J in the range LOWLIM to {\tt INFINITY}. {\small\begin{verbatim} pssum(n=0,1,x,0,n*n); % Produces the power series summation for n=0 to % infinity of x**(n*n). pssum(m=1,(-1)**(m-1)/(2m-1),y,1,2m-1); % Produces the power series expansion of atan(y-1) % about y=1. pssum(j=1,-1/j,x,infinity,j); % Produces the power series expansion of log(1-1/x) % about the point at infinity. pssum(n=0,1,x,0,2n**2+3n) + pssum(n=1,1,x,0,2n**2-3n); % Produces the power series summation for n=-infinity % to +infinity of x**(2n**2+3n). \end{verbatim}} \subsection{Arithmetic Operations} \index{power series ! arithmetic} As power series objects are domain elements they may be combined together in algebraic expressions in algebraic mode of \REDUCE\ in the normal way. For example if A and B are power series objects then the commands such as: \index{+ ! power series}\index{- ! power series}\index{/ ! power series} \index{* ! power series}\index{** ! power series} {\small\begin{verbatim} a*b; a**2+b**2; \end{verbatim}} will produce power series objects representing the product and the sum of the squares of the power series objects A and B respectively. \subsection{Differentiation} \index{power series ! differentiation} If A is a power series object depending on X then the input {\tt df(a,x);} will produce the power series expansion of the derivative of A with respect to X. \section{Restrictions and Known Bugs} If A and B are power series objects and X is a variable which evaluates to itself then currently expressions such as {\tt a/b} and {\tt a*x} do not evaluate to a single power series object (although the results are in each case formally valid). Instead use {\tt ps(a/b,x,0)} and {\tt ps(a*x,x,0)} {\em etc.}. \chapter{TRI: TeX REDUCE interface} \label{TRI} \typeout{{TRI: TeX REDUCE interface}} {\footnotesize \begin{center} Werner Antweiler, Andreas Strotmann and Volker Winkelmann \\ University of Cologne Computer Center, Abt. Anwendungssoftware, Robert-Koch-Stra\ss{e} 10 \\ 5000 K"oln 41, Germany \\[0.05in] e--mail: antweil@epas.utoronto.ca strotmann@rrz.uni-koeln.de winkelmann@rrz.uni-koeln.de \end{center} } \ttindex{TRI} The \REDUCE-\TeX-Interface incorporates three levels of \TeX\ output: without line breaking, with line breaking, and with line breaking plus indentation. During loading the package some default initialisations are performed. The default page width is set to 15 centimetres, the tolerance for page breaking is set to 20 by default. Moreover, TRI is enabled to translate Greek names, {\em e.g.\ }TAU or PSI, into equivalent \TeX\ symbols, {\em e.g.\ } $\tau$ or $\psi$, respectively. Letters are printed lowercase as defined through assertion of the set LOWERCASE. \section{Switches for TRI} The three TRI modes can be selected by switches, which can be used alternatively and incrementally. Switching {\tt TEX}\ttindex{TEX} on gives standard \TeX-output; switching {\tt TEXBREAK}\ttindex{TEXBREAK} gives broken \TeX-output, and {\tt TEXINDENT}\ttindex{TEXINDENT} to give broken \TeX-output plus indentation. Thus the three levels of TRI are enabled or disabled according to: {\small\begin{verbatim} On TeX; % switch TeX is on On TeXBreak; % switches TeX and TeXBreak are on On TeXIndent; % switches TeX, TeXBreak and TeXIndent are on Off TeXIndent; % switch TeXIndent is off Off TeXBreak; % switches TeXBreak and TeXIndent are off Off TeX; % all three switches are off \end{verbatim}} How TRI breaks multiple lines of \TeX-code may be controlled by setting values for page width and tolerance\ttindex{TeXsetbreak} {\small\begin{verbatim} TeXsetbreak(page_width, tolerance); \end{verbatim}} Page width is measured in millimetres, and tolerance is a positive integer in the closed interval $[0\ldots10000]$.\index{TRI ! page-width} The higher the tolerance, the more breakpoints become feasible. A tolerance of 0 means that actually no breakpoint will be considered feasible, while a value of 10000 allows any breakpoint to be considered feasible.\index{TRI ! tolerance} For line-breaking without indentation, suitable values for the tolerance lie between 10 and 100. As a rule of thumb, use higher values the deeper the term is nested. If using indentation, use much higher tolerance values; reasonable values for tolerance here lie between 700 and 1500. \subsection{Adding Translations} Sometimes it is desirable to add special REDUCE-symbol-to-\TeX-item translations. For such a task TRI provides a function {\tt TeXlet} which binds any REDUCE-symbol to one of the predefined \TeX-items. A call to this function has the following syntax: \ttindex{TeXlet} {\tt TeXlet}({\em REDUCE-symbol}, {\em \TeX-item}); For example {\small\begin{verbatim} TeXlet('velocity,'!v); TeXlet('gamma,\verb|'!\!G!a!m!m!a! |); TeXlet('acceleration,\verb|'!\!v!a!r!t!h!e!t!a! |); \end{verbatim}} Besides this method of single assertions one can assert one of (currently) two standard sets providing substitutions for lowercase and Greek letters. These sets are loaded by default. These sets can be switched on or off using the functions \noindent{\tt TeXassertset} {\em setname};\\ \noindent{\tt TeXretractset} {\em setname}; where the setnames currently defined are {\tt 'GREEK} and {\tt 'LOWERCASE}. There are facilities for creating other sets of substitutions, using the function {\tt TeXitem}\ttindex{TeXitem}. \section{Examples of Use} Some representative examples demonstrate the capabilities of TRI. {\small\begin{verbatim} load_package tri; % TeX-REDUCE-Interface 0.50 % set greek asserted % set lowercase asserted % \tolerance 10 % \hsize=150mm TeXsetbreak(150,250); % \tolerance 250 % \hsize=150mm on TeXindent; (x+y)^16/(v-w)^16; $$\displaylines{\qdd \(x^{16} +16\cdot x^{15}\cdot y +120\cdot x^{14}\cdot y^{2} +560\cdot x^{13}\cdot y^{3} +1820\cdot x^{12}\cdot y^{4} +4368\cdot x^{11}\cdot y^{5}\nl \off{327680} +8008\cdot x^{10}\cdot y^{6} +11440\cdot x^{9}\cdot y^{7} +12870\cdot x^{8}\cdot y^{8} +11440\cdot x^{7}\cdot y^{9} +8008\cdot x^{6}\cdot y^{10}\nl \off{327680} +4368\cdot x^{5}\cdot y^{11} +1820\cdot x^{4}\cdot y^{12} +560\cdot x^{3}\cdot y^{13} +120\cdot x^{2}\cdot y^{14} +16\cdot x\cdot y^{15} +y^{16} \) /\nl \(v^{16} -16\cdot v^{15}\cdot w +120\cdot v^{14}\cdot w^{2} -560\cdot v^{13}\cdot w^{3} +1820\cdot v^{12}\cdot w^{4} -4368\cdot v^{11}\cdot w^{5}\nl \off{327680} +8008\cdot v^{10}\cdot w^{6} -11440\cdot v^{9}\cdot w^{7} +12870\cdot v^{8}\cdot w^{8} -11440\cdot v^{7}\cdot w^{9} +8008\cdot v^{6}\cdot w^{10} -4368\cdot v^{5}\cdot w^{11}\nl \off{327680} +1820\cdot v^{4}\cdot w^{12} -560\cdot v^{3}\cdot w^{13} +120\cdot v^{2}\cdot w^{14} -16\cdot v\cdot w^{15} +w^{16} \) \Nl}$$ \end{verbatim}} A simple example using matrices: {\small\begin{verbatim} load_package ri; % TeX-REDUCE-Interface 0.50 % set greek asserted % set lowercase asserted % \tolerance 10 % \hsize=150mm on Tex; mat((1,a-b,1/(c-d)),(a^2-b^2,1,sqrt(c)),((a+b)/(c-d),sqrt(d),1)); $$ \pmatrix{1&a -b& \frac{1}{ c -d}\cr a^{2} -b^{2}&1& \sqrt{c}\cr \frac{a +b}{ c -d}& \sqrt{d}&1\cr } $$ \end{verbatim}} Note that the resulting output uses a number of \TeX\ macros which are defined in the file {\tt tridefs.tex} which is distributed with the example file. \chapter[TRIGSIMP: Trigonometric simplification]% {TRIGSIMP: Simplification and factorisation of trigonometric and hyperbolic functions} \label{TRIGSIMP} \typeout{{TRIGSIMP: Simplification and factorisation of trigonometric and hyperbolic functions}} {\footnotesize \begin{center} Wolfram Koepf, Andreas Bernig and Herbert Melenk\\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: Koepf@zib.de \end{center} } \ttindex{TRIGSIMP} There are three procedures included in TRIGSIMP: trigsimp, trigfactorize and triggcd. The first is for finding simplifications of trigonometric or hyperbolic expressions with many options, the second for factorising them and the third for finding the greatest common divisor of two trigonometric or hyperbolic polynomials. \section{Simplifiying trigonometric expressions} As there is no normal form for trigonometric and hyperbolic functions, the same function can convert in many different directions, {\em e.g. } $\sin(2x) \leftrightarrow 2\sin(x)\cos(x)$. The user has the possibility to give several parameters to the procedure {\tt trigsimp} in order to influence the direction of transformations. The decision whether a rational expression in trigonometric and hyperbolic functions vanishes or not is possible. \ttindex{trigsimp} To simplify a function {\tt f}, one uses {\tt trigsimp(f[,options])}. Example: {\small\begin{verbatim} 2: trigsimp(sin(x)^2+cos(x)^2); 1 \end{verbatim}} Possible options are (* denotes the default): \begin{enumerate} \item {\tt sin} (*) or {\tt cos}\index{trigsimp ! sin}\index{trigsimp ! cos} \item {\tt sinh} (*) or {\tt cosh}\index{trigsimp ! sinh}\index{trigsimp ! cosh} \item {\tt expand} (*) or {\tt combine} or {\tt compact}\index{trigsimp ! expand}\index{trigsimp ! combine}\index{trigsimp ! compact} \item {\tt hyp} or {\tt trig} or {\tt expon}\index{trigsimp ! hyp}\index{trigsimp ! trig}\index{trigsimp ! expon} \item {\tt keepalltrig}\index{trigsimp ! keepalltrig} \end{enumerate} From each group one can use at most one option, otherwise an error message will occur. The first group fixes the preference used while transforming a trigonometric expression. The second group is the equivalent for the hyperbolic functions. The third group determines the type of transformations. With the default {\tt expand}, an expression is written in a form only using single arguments and no sums of arguments. With {\tt combine}, products of trigonometric functions are transformed to trigonometric functions involving sums of arguments. {\small\begin{verbatim} trigsimp(sin(x)^2,cos); 2 - cos(x) + 1 trigsimp(sin(x)*cos(y),combine); sin(x - y) + sin(x + y) ------------------------- 2 \end{verbatim}} With {\tt compact}, the \REDUCE\ operator {\tt compact} (see chapter~\ref{COMPACT}) is applied to {\tt f}. This leads often to a simple form, but in contrast to {\tt expand} one doesn't get a normal form. {\small\begin{verbatim} trigsimp((1-sin(x)**2)**20*(1-cos(x)**2)**20,compact); 40 40 cos(x) *sin(x) \end{verbatim}} With the fourth group each expression is transformed to a trigonometric, hyperbolic or exponential form: {\small\begin{verbatim} trigsimp(sin(x),hyp); - sinh(i*x)*i trigsimp(e^x,trig); x x cos(---) + sin(---)*i i i \end{verbatim}} Usually, {\tt tan}, {\tt cot}, {\tt sec}, {\tt csc} are expressed in terms of {\tt sin} and {\tt cos}. It can be sometimes useful to avoid this, which is handled by the option {\tt keepalltrig}: {\small\begin{verbatim} trigsimp(tan(x+y),keepalltrig); - (tan(x) + tan(y)) ---------------------- tan(x)*tan(y) - 1 \end{verbatim}} It is possible to use the options of different groups simultaneously. \section{Factorising trigonometric expressions} With {\tt trigfactorize(p,x)} one can factorise the trigonometric or hyperbolic polynomial {\tt p} with respect to the argument x. Example: \ttindex{trigfactorize} {\small\begin{verbatim} trigfactorize(sin(x),x/2); x x {2,cos(---),sin(---)} 2 2 \end{verbatim}} If the polynomial is not coordinated or balanced the output will equal the input. In this case, changing the value for x can help to find a factorisation: {\small\begin{verbatim} trigfactorize(1+cos(x),x); {cos(x) + 1} trigfactorize(1+cos(x),x/2); x x {2,cos(---),cos(---)} 2 2 \end{verbatim}} \section{GCDs of trigonometric expressions} The operator {\tt triggcd}\ttindex{triggcd} is an application of {\tt trigfactorize}. With its help the user can find the greatest common divisor of two trigonometric or hyperbolic polynomials. The syntax is: {\tt triggcd(p,q,x)}, where p and q are the polynomials and x is the smallest unit to use. Example: {\small\begin{verbatim} triggcd(sin(x),1+cos(x),x/2); x cos(---) 2 triggcd(sin(x),1+cos(x),x); 1 \end{verbatim}} See also the ASSIST package (chapter~\ref{ASSIST}). \chapter{WU: Wu algorithm for poly systems} \label{WU} \typeout{{WU: Wu algorithm for polynomial systems}} {\footnotesize \begin{center} Russell Bradford \\ School of Mathematical Sciences, University of Bath,\\ Bath, BA2 7AY, England \\[0.05in] e--mail: rjb@maths.bath.ac.uk \end{center} } \ttindex{WU} The interface: {\small\begin{verbatim} wu( {x^2+y^2+z^2-r^2, x*y+z^2-1, x*y*z-x^2-y^2-z+1}, {x,y,z}); \end{verbatim}} calls {\tt wu}\ttindex{WU} with the named polynomials, and with the variable ordering ${\tt x} > {\tt y} > {\tt z}$. In this example, {\tt r} is a parameter. The result is {\small\begin{verbatim} 2 3 2 {{{r + z - z - 1, 2 2 2 2 4 2 2 2 r *y + r *z + r - y - y *z + z - z - 2, 2 x*y + z - 1}, y}, 6 4 6 2 6 4 7 4 6 4 5 4 4 {{r *z - 2*r *z + r + 3*r *z - 3*r *z - 6*r *z + 3*r *z + 3* 4 3 4 2 4 2 10 2 9 2 8 2 7 r *z + 3*r *z - 3*r + 3*r *z - 6*r *z - 3*r *z + 6*r *z + 2 6 2 5 2 4 2 3 2 13 12 11 3*r *z + 6*r *z - 6*r *z - 6*r *z + 3*r + z - 3*z + z 10 9 8 7 6 4 3 2 + 2*z + z + 2*z - 6*z - z + 2*z + 3*z - z - 1, 2 2 3 2 y *(r + z - z - 1), 2 x*y + z - 1}, 2 3 2 y*(r + z - z - 1)}} \end{verbatim}} namely, a list of pairs of characteristic sets and initials for the characteristic sets. Thus, the first pair above has the characteristic set $$ r^2 + z^3 - z^2 - 1, r^2 y^2 + r^2 z + r^2 - y^4 - y^2 z^2 + z^2 - z - 2, x y + z^2 - 1$$ and initial $y$. According to Wu's theorem, the set of roots of the original polynomials is the union of the sets of roots of the characteristic sets, with the additional constraints that the corresponding initial is non-zero. Thus, for the first pair above, we find the roots of $\{r^2 + z^3 - z^2 - 1, \ldots~\}$ under the constraint that $y \neq 0$. These roots, together with the roots of the other characteristic set (under the constraint of $y(r^2+z^3-z^2-1) \neq 0$), comprise all the roots of the original set. \chapter[XCOLOR: Color factor in gauge theory]% {XCOLOR: Calculation of the color factor in non-abelian gauge field theories} \label{XCOLOR} \typeout{{XCOLOR: Calculation of the color factor in non-abelian gauge field theories}} {\footnotesize \begin{center} A. Kryukov \\ Institute for Nuclear Physics, Moscow State University \\ 119899, Moscow, Russia \\[0.05in] e--mail: kryukov@npi.msu.su \end{center} } \ttindex{XCOLOR} XCOLOR calculates the colour factor in non-abelian gauge field theories. It provides two commands and two operators. \noindent{\tt SUdim} integer\ttindex{SUdim} Sets the order of the SU group. The default value is 3. \noindent{\tt SpTT} expression\ttindex{SpTT} Sets the normalisation coefficient A in the equation $Sp(T_i T_j) = A \Delta(i,j)$. The default value is 1/2. \noindent{\tt QG}(inQuark, outQuark, Gluon)\ttindex{QG} Describes the quark-gluon vertex. The parameters may be any identifiers. The first and second of then must be in- and out- quarks correspondingly. Third one is a gluon. \noindent{\tt G3}(Gluon1, Gluon2, Gluon3)\ttindex{G3} Describes the three-gluon vertex. The parameters may be any identifiers. The order of gluons must be clockwise. In terms of QG and G3 operators one can input a diagram in ``color'' space as a product of these operators. For example \newpage {\small\begin{verbatim} e1 ---->--- / \ / \ | e2 | v1*............*v2 | | \ / \ e3 / ----<--- \end{verbatim}} where \verb+--->---+ is a quark and \verb+.......+ is a gluon. The related \REDUCE\ expression is {\tt QG(e3,e1,e2)*QG(e1,e3,e2)}. \chapter{XIDEAL: Gr\"obner for exterior algebra} \label{XIDEAL} \typeout{{XIDEAL: Gr\"obner Bases for exterior algebra}} {\footnotesize \begin{center} David Hartley \\ GMD, Institute I1, Schloss Birlinghoven \\ D--53757 St. Augustin, Germany \\[0.05in] e--mail: David.Hartley@gmd.de \\[0.1in] and \\ Philip A.~Tuckey \\ Max Planck Institute for Physics \\ Foehringer Ring 6 \\ D--80805 Munich, Germany \\[0.05in] e--mail: pht@iws170.mppmu.mpg.de \end{center} } \ttindex{XIDEAL} XIDEAL extends the Gr\"obner base method to exterior algebras. XIDEAL constructs Gr\"obner bases for solving the left ideal membership problem: Gr\"obner left ideal bases or GLIBs. For graded ideals, where each form is homogeneous in degree, the distinction between left and right ideals vanishes. Furthermore, if the generating forms are all homogeneous, then the Gr\"obner bases for the non-graded and graded ideals are identical. In this case, XIDEAL is able to save time by truncating the Gr\"obner basis at some maximum degree if desired. XIDEAL uses the EXCALC package (chapter~\ref{EXCALC}). \section{Operators} \subsubsection*{XIDEAL} \f{XIDEAL} calculates a Gr\"obner left ideal basis in an exterior algebra. The syntax is\ttindex{XIDEAL} {\small\begin{verbatim} XIDEAL(S:list of forms[,R:integer]):list of forms. \end{verbatim}} \f{XIDEAL} calculates the Gr\"obner left ideal basis for the left ideal generated by \f{S} using graded lexicographical ordering based on the current kernel ordering. The resulting list can be used for subsequent reductions with \f{XMODULOP} as long as the kernel ordering is not changed. If the set of generators \f{S} is graded, an optional parameter \f{R} can be given, and \f{XIDEAL} produces a truncated basis suitable for reducing exterior forms of degree less than or equal to \f{R} in the left ideal. This can save time and space with large expressions, but the result cannot be used for exterior forms of degree greater than \f{R}. See also the switches \f{XSTATS} and \f{XFULLREDUCTION}. \subsubsection*{XMODULO} \f{XMODULO} reduces exterior forms to their (unique) normal forms modulo a left ideal. The syntax is\ttindex{XMODULO} {\small\begin{verbatim} XMODULO(F:form, S:list of forms):form \end{verbatim}} or {\small\begin{verbatim} XMODULO(F:list of forms, S:list of forms):list of forms. \end{verbatim}} An alternative infix syntax is also available: {\small\begin{verbatim} F XMODULO S. \end{verbatim}} \f{XMODULO(F,S)} first calculates a Gr\"obner basis for the left ideal generated by \f{S}, and then reduces \f{F}. \f{F} may be either a single exterior form, or a list of forms, and \f{S} is a list of forms. If \f{F} is a list of forms, each element is reduced, and any which vanish are deleted from the result. If this operator is used more than once, and \f{S} does not change between calls, then the Gr\"obner basis is not recalculated. If the set of generators \f{S} is graded, then a truncated Gr\"obner basis is calculated using the degree of \f{F} (or the maximal degree in \f{F}). \subsubsection*{XMODULOP} \f{XMODULOP} reduces exterior forms to their (not necessarily unique) normal forms modulo a set of exterior polynomials. The syntax is\ttindex{XMODULOP} {\small\begin{verbatim} XMODULOP(F:form, S:list of forms):form \end{verbatim}} or {\small\begin{verbatim} XMODULOP(F:list of forms, S:list of forms):list of forms. \end{verbatim}} An alternative infix syntax is also available: {\small\begin{verbatim} F XMODULOP S. \end{verbatim}} \f{XMODULOP(F,S)} reduces \f{F} with respect to the set of exterior polynomials \f{S}, which is not necessarily a Gr\"obner basis. \f{F} may be either a single exterior form, or a list of forms, and \f{S} is a list of forms. This operator can be used in conjunction with \f{XIDEAL} to produce the same effect as \f{XMODULO}: for a single form \f{F} in an ideal generated by the graded set \f{S}, \f{F XMODULO S} is equivalent to \f{F XMODULOP XIDEAL(S,EXDEGREE F)}. \section{Switches} \subsubsection*{XFULLREDUCE} \f{ON XFULLREDUCE}\ttindex{XFULLREDUCE} allows \f{XIDEAL} and \f{XMODULO} to calculate reduced (but not necessarily normed) Gr\"obner bases, which speeds up subsequent reductions, and guarantees a unique form (up to scaling) for the Gr\"obner basis. \f{OFF XFULLREDUCE} turns of this feature, which may speed up calculation of the Gr\"obner basis. \f{XFULLREDUCE} is \f{ON} by default. \subsubsection*{XSTATS} \f{ON XSTATS}\ttindex{XSTATS} produces counting and timing information. As \f{XIDEAL} is running, a hash mark (\verb.#.) is printed for each form taken from the input list, followed by a sequences of carets (\verb.^.) and dollar signs (\verb.$.). Each caret represents a new basis element obtained by a simple wedge product, and each dollar sign represents a new basis element obtained from an S-polynomial. At the end, a table is printed summarising the calculation. \f{XSTATS} is \f{OFF} by default. \section{Examples} Suppose EXCALC and XIDEAL have been loaded, the switches are at their default settings, and the following exterior variables have been declared: {\small\begin{verbatim} pform x=0,y=0,z=0,t=0,f(i)=1,h=0,hx=0,ht=0; \end{verbatim}} In a commutative polynomial ring, a single polynomial is its own Gr\"obner basis. This is no longer true for exterior algebras because of the presence of zero divisors, and can lead to some surprising reductions: {\small\begin{verbatim} xideal {d x^d y - d z^d t}; {d T^d Z + d X^d Y, d X^d Y^d Z, d T^d X^d Y} f(3)^f(4)^f(5)^f(6) xmodulo {f(1)^f(2) + f(3)^f(4) + f(5)^f(6)}; 0 \end{verbatim}} The heat equation, $h_{xx}=h_t$ can be represented by the following exterior differential system. {\small\begin{verbatim} S := {d h - ht*d t - hx*d x, d ht^d t + d hx^d x, d hx^d t - ht*d x^d t}; \end{verbatim}} \f{XMODULO} can be used to check that the exterior differential system is closed under exterior differentiation. {\small\begin{verbatim} d S xmodulo S; {} \end{verbatim}} Non-graded left and right ideals are no longer the same: {\small\begin{verbatim} d t^(d z+d x^d y) xmodulo {d z+d x^d y}; 0 (d z+d x^d y)^d t xmodulo {d z+d x^d y}; - 2*d t^d z \end{verbatim}} Higher order forms can now reduce lower order ones: {\small\begin{verbatim} d x xmodulo {d y^d z + d x,d x^d y + d z}; 0 \end{verbatim}} Any form containing a 0-form term generates the whole ideal: {\small\begin{verbatim} xideal {1 + f(1) + f(1)^f(2) + f(2)^f(3)^f(4)}; {1} \end{verbatim}} \chapter[ZEILBERG: Indef \& definite summation]% {ZEILBERG: A package for indefinite and definite summation} \label{ZEILBERG} \typeout{{ZEILBERG: A package for indefinite and definite summation}} {\footnotesize \begin{center} Wolfram Koepf and Gregor St\"olting \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: Koepf@zib.de \end{center} } \ttindex{ZEILBERG} \newcommand{\N} {{\rm {\mbox{\protect\makebox[.15em][l]{I}N}}}} The ZEILBERG package provides an implementation of the Gosper and Zeilberger algorithms for indefinite, and definite summation of hypergeometric terms, respectively, with extensions for ratios of products of powers, factorials, $\Gamma$ function terms, binomial coefficients, and shifted factorials that are rational-linear in their arguments. \section{The GOSPER summation operator} The {\tt gosper}\ttindex{gosper} operator is an implementation of the Gosper algorithm. \begin{itemize} \item {\tt gosper(a,k)} determines a closed form antidifference. If it does not return a closed form solution, then a closed form solution does not exist. \item {\tt gosper(a,k,m,n)} determines \[ \sum_{k=m}^n a_k \] using Gosper's algorithm. This is only successful if Gosper's algorithm applies. \end{itemize} Example: {\small\begin{verbatim} gosper((-1)^(k+1)*(4*k+1)*factorial(2*k)/ (factorial(k)*4^k*(2*k-1)*factorial(k+1)),k); k - ( - 1) *factorial(2*k) ------------------------------------ 2*k 2 *factorial(k + 1)*factorial(k) gosper(binomial(k,n),k); (k + 1)*binomial(k,n) ----------------------- n + 1 \end{verbatim}} \section{EXTENDED\_GOSPER operator} The {\tt extended\_gosper}\ttindex{extended\_gosper} operator is an implementation of an extended version of Gosper's algorithm. \begin{itemize} \item {\tt extended\_gosper(a,k)} determines an antidifference $g_k$ of $a_k$ whenever there is a number $m$ such that $h_{k}-h_{k-m}=a_k$, and $h_k$ is an {\sl $m$-fold hypergeometric term}, i.\ e. \[ h_{k}/h_{k-m}\quad\mbox{is a rational function with respect to $k$.} \] If it does not return a solution, then such a solution does not exist. \item {\tt extended\_gosper(a,k,m)} determines an {\sl $m$-fold antidifference} $h_k$ of $a_k$, i.\ e.\ $h_{k}-h_{k-m}=a_k$, if it is an $m$-fold hypergeometric term. \end{itemize} Examples: {\small\begin{verbatim} extended_gosper(binomial(k/2,n),k); k k - 1 (k + 2)*binomial(---,n) + (k + 1)*binomial(-------,n) 2 2 ------------------------------------------------------- 2*(n + 1) extended_gosper(k*factorial(k/7),k,7); k (k + 7)*factorial(---) 7 \end{verbatim}} \section{SUMRECURSION operator} The {\tt sumrecursion}\ttindex{sumrecursion} operator is an implementation of the (fast) Zeilberger algorithm. \begin{itemize} \item {\tt sumrecursion(f,k,n)} determines a holonomic recurrence equation for \[ {\tt sum(n)} =\sum\limits_{k=-\infty}^\infty f(n,k) \] with respect to $n$. %%, applying {\tt extended\_sumrecursion} if necessary %%(section~\ref{sec:EXTENDED_SUMRECURSION}). The resulting expression equals zero. \item {\tt sumrecursion(f,k,n,j)} searches for a holonomic recurrence equation of order $j$.%% This %%operator does not use %%{\tt extended\_sumrecursion} automatically. Note that if $j$ is too large, the recurrence equation may not be unique, and only one particular solution is returned. \end{itemize} {\small\begin{verbatim} sumrecursion(binomial(n,k),k,n); 2*sum(n - 1) - sum(n) \end{verbatim}} %%\section{EXTENDED\_SUMRECURSION operator} %%\label{sec:EXTENDED_SUMRECURSION} %% %%The {\tt extended\_sumrecursion}\ttindex{extended\_sumrecursion} %%operator uses extension to handle hypergeometric terms. As {\tt %%sumrecusion} uses this algorithm automatically in the case of three %%arguments, it is only needed in the four argument case, or for %%detailed investigations. More details may be found in the on-line %%documentation. \section{HYPERRECURSION operator} If a recursion for a generalised hypergeometric function is to be established, one can use \begin{itemize} \item {\tt hyperrecursion(upper,lower,x,n)}\ttindex{hyperrecursion} determines a holonomic recurrence equation with respect to $n$ for \[_{p}F_{q}\left.\left(\begin{array}{cccc} a_{1},&a_{2},&\cdots,&a_{p}\\ b_{1},&b_{2},&\cdots,&b_{q}\\ \end{array}\right| x\right) , \] where {\tt upper}$=\{a_{1}, a_{2}, \ldots, a_{p}\}$ is the list of upper parameters, and {\tt lower}$=\{b_{1}, b_{2}, \ldots, b_{q}\}$ is the list of lower parameters depending on $n$. \item {\tt hyperrecursion(upper,lower,x,n,j)} $(j\in\N)$ searches only for a holonomic recurrence equation of order $j$. This operator does not automatically use {\tt extended\_sumrecursion}. \end{itemize} {\small\begin{verbatim} hyperrecursion({-n,b},{c},1,n); (b - c - n + 1)*sum(n - 1) + (c + n - 1)*sum(n) \end{verbatim}} If a hypergeometric expression is given in hypergeometric notation, then the use of {\tt hyperrecursion} is more natural than the use of {\tt sumrecursion}. Moreover the \REDUCE\ operator \begin{itemize} \item {\tt hyperterm(upper,lower,x,k)}\ttindex{hyperterm} yields the hypergeometric term \[ \frac {(a_{1})_{k}\cdot(a_{2})_{k}\cdots(a_{p})_{k}} {(b_{1})_{k}\cdot(b_{2})_{k}\cdots(b_{q})_{k}\,k!}x^{k} \] with upper parameters {\tt upper}$=\{a_{1}, a_{2}, \ldots, a_{p}\}$, and lower parameters {\tt lower}$=\{b_{1}, b_{2}, \ldots, b_{q}\}$ \end{itemize} in connection with hypergeometric terms. \section{HYPERSUM operator} With the operator {\tt hypersum}\ttindex{hypersum}, hypergeometric sums are directly evaluated in closed form whenever the extended Zeilberger algorithm leads to a recurrence equation containing only two terms: \begin{itemize} \item {\tt hypersum(upper,lower,x,n)} determines a closed form representation for\\ $_{p}F_{q}\left.\left(\begin{array}{cccc} a_{1},&a_{2},&\cdots,&a_{p}\\ b_{1},&b_{2},&\cdots,&b_{q}\\ \end{array}\right| x\right) $, where {\tt upper}$=\{a_{1}, a_{2}, \ldots, a_{p}\}$ is the list of upper parameters, and {\tt lower}$=\{b_{1}, b_{2}, \ldots, b_{q}\}$ is the list of lower parameters depending on $n$. The result is given as a hypergeometric term with respect to $n$. If the result is a list of length $m$, we call it $m$-{\sl fold symmetric}, which is to be interpreted as follows: Its $j^{th}$ part is the solution valid for all $n$ of the form $n=mk+j-1 \;(k\in\N_0)$. In particular, if the resulting list contains two terms, then the first part is the solution for even $n$, and the second part is the solution for odd $n$. \end{itemize} {\small\begin{verbatim} hypersum({a,1+a/2,c,d,-n},{a/2,1+a-c,1+a-d,1+a+n},1,n); pochhammer(a - c - d + 1,n)*pochhammer(a + 1,n) ------------------------------------------------- pochhammer(a - c + 1,n)*pochhammer(a - d + 1,n) hypersum({a,1+a/2,d,-n},{a/2,1+a-d,1+a+n},-1,n); pochhammer(a + 1,n) ------------------------- pochhammer(a - d + 1,n) \end{verbatim}} Note that the operator {\tt togamma}\ttindex{togamma} converts expressions given in factorial-$\Gamma$-binomial-Pochhammer notation into a pure $\Gamma$ function representation: {\small\begin{verbatim} togamma(hypersum({a,1+a/2,d,-n},{a/2,1+a-d,1+a+n},-1,n)); gamma(a - d + 1)*gamma(a + n + 1) ----------------------------------- gamma(a - d + n + 1)*gamma(a + 1) \end{verbatim}} \section{SUMTOHYPER operator} With the operator {\tt sumtohyper}\ttindex{sumtohyper}, sums given in factorial-$\Gamma$-binomial-Poch\-hammer notation are converted into hypergeometric notation. \begin{itemize} \item {\tt sumtohyper(f,k)} determines the hypergeometric representation of\linebreak $\sum\limits_{k=-\infty}^\infty f_k$, {\em i.e.\ } its output is {\tt c*hypergeometric(upper,lower,x)}, corresponding to the representation \[ \sum\limits_{k=-\infty}^\infty f_k=c\cdot\; _{p}F_{q}\left.\left(\begin{array}{cccc} a_{1},&a_{2},&\cdots,&a_{p}\\ b_{1},&b_{2},&\cdots,&b_{q}\\ \end{array}\right| x\right) \;, \] where {\tt upper}$=\{a_{1}, a_{2}, \ldots, a_{p}\}$ and {\tt lower}$=\{b_{1}, b_{2}, \ldots, b_{q}\}$ are the lists of upper and lower parameters. \end{itemize} Examples: {\small\begin{verbatim} sumtohyper(binomial(n,k)^3,k); hypergeometric({ - n, - n, - n},{1,1},-1) \end{verbatim}} \section{Simplification Operators} For the decision that an expression $a_k$ is a hypergeometric term, it is necessary to find out whether or not $a_{k}/a_{k-1}$ is a rational function with respect to $k$. For the purpose to decide whether or not an expression involving powers, factorials, $\Gamma$ function terms, binomial coefficients, and Pochhammer symbols is a hypergeometric term, the following simplification operators can be used: \begin{itemize} \item {\tt simplify\_gamma(f)}\ttindex{simplify\_gamma} simplifies an expression {\tt f} involving only rational, powers and $\Gamma$ function terms. \item {\tt simplify\_combinatorial(f)}\ttindex{simplify\_combinatorial} simplifies an expression {\tt f} involving powers, factorials, $\Gamma$ function terms, binomial coefficients, and Pochhammer symbols by converting factorials, binomial coefficients, and Poch\-hammer symbols into $\Gamma$ function terms, and applying {\tt simplify\_gamma} to its result. If the output is not rational, it is given in terms of $\Gamma$ functions. If factorials are preferred use \item {\tt gammatofactorial} (rule)\ttindex{gammatofactorial} converting $\Gamma$ function terms into factorials using $\Gamma\:(x)\rightarrow (x-1)!$. \item {\tt simplify\_gamma2(f)}\ttindex{simplify\_gamma2} uses the duplication formula of the $\Gamma$ function to simplify $f$. \item {\tt simplify\_gamman(f,n)}\ttindex{simplify\_gamman} uses the multiplication formula of the $\Gamma$ function to simplify $f$. \end{itemize} The use of {\tt simplify\_combinatorial(f)} is a safe way to decide the rationality for any ratio of products of powers, factorials, $\Gamma$ function terms, binomial coefficients, and Pochhammer symbols. Example: {\small\begin{verbatim} simplify_gamma2(gamma(2*n)/gamma(n)); 2*n 2*n + 1 2 *gamma(---------) 2 ----------------------- 2*sqrt(pi) \end{verbatim}} \chapter{ZTRANS: $Z$-transform package} \label{ZTRANS} \typeout{{ZTRANS: $Z$-transform package}} {\footnotesize \begin{center} Wolfram Koepf and Lisa Temme \\ Konrad--Zuse--Zentrum f\"ur Informationstechnik Berlin \\ Takustra\"se 7 \\ D--14195 Berlin--Dahlem, Germany \\[0.05in] e--mail: Koepf@zib.de \end{center} } \ttindex{ZTRANS} The $Z$-Transform of a sequence $\{f_n\}$ is the discrete analogue of the Laplace Transform, and \[{\cal Z}\{f_n\} = F(z) = \sum^\infty_{n=0} f_nz^{-n}\;.\] \\ This series converges in the region outside the circle $|z|=|z_0|= \limsup\limits_{n \rightarrow \infty} \sqrt[n]{|f_n|}\;.$ In the same way that a Laplace Transform can be used to solve differential equations, so $Z$-Transforms can be used to solve difference equations. \begin{tabbing} {\bf SYNTAX:}\ \ {\tt ztrans($f_n$, n, z)}\ \ \ \ \ \ \ \ \=where $f_n$ is an expression, and $n$,$z$ \\ \> are identifiers.\\ \end{tabbing} \ttindex{ztrans} \begin{tabbing} This pack\=age can compute the \= $Z$-Transforms of the \=following list of $f_n$, and \\ certain combinations thereof.\\ \\ \>$1$ \>$e^{\alpha n}$ \>$\frac{1}{(n+k)}$ \\ \\ \>$\frac{1}{n!}$ \>$\frac{1}{(2n)!}$ \>$\frac{1}{(2n+1)!}$ \\ \\ \>$\frac{\sin(\beta n)}{n!}$ \>$\sin(\alpha n+\phi)$ \>$e^{\alpha n} \sin(\beta n)$ \\ \\ \>$\frac{\cos(\beta n)}{n!}$ \>$\cos(\alpha n+\phi)$ \>$e^{\alpha n} \cos(\beta n)$ \\ \\ \>$\frac{\sin(\beta (n+1))}{n+1}$ \>$\sinh(\alpha n+\phi)$ \>$\frac{\cos(\beta (n+1))}{n+1}$ \\ \\ \>$\cosh(\alpha n+\phi)$ \>${n+k \choose m}$\\ \end{tabbing} \begin{tabbing} \underline {{\bf Other Combinations}}\= \\ \\ \underline {Linearity} \>${\cal Z} \{a f_n+b g_n \} = a{\cal Z} \{f_n\}+b{\cal Z}\{g_n\}$ \\ \\ \underline {Multiplication by $n$} \>${\cal Z} \{n^k \cdot f_n\} = -z \frac{d}{dz} \left({\cal Z}\{n^{k-1} \cdot f_n,n,z\} \right)$ \\ \\ \underline {Multiplication by $\lambda^n$} \>${\cal Z} \{\lambda^n \cdot f_n\}=F \left(\frac{z}{\lambda}\right)$ \\ \\ \underline {Shift Equation} \>${\cal Z} \{f_{n+k}\} = z^k \left(F(z) - \sum\limits^{k-1}_{j=0} f_j z^{-j}\right)$ \\ \\ \underline {Symbolic Sums} \> ${\cal Z} \left\{ \sum\limits_{k=0}^{n} f_k \right\} = \frac{z}{z-1} \cdot {\cal Z} \{f_n\}$ \\ \\ \>${\cal Z} \left\{ \sum\limits_{k=p}^{n+q} f_k \right\}$ \ \ \ combination of the above \\ \\ where $k$,$\lambda \in$ {\bf N}$- \{0\}$; and $a$,$b$ are variables or fractions; and $p$,$q \in$ {\bf Z} or \\ are functions of $n$; and $\alpha$, $\beta$ and $\phi$ are angles in radians. \end{tabbing} The calculation of the Laurent coefficients of a regular function results in the following inverse formula for the $Z$-Transform: If $F(z)$ is a regular function in the region $|z|> \rho$ then $\exists$ a sequence \{$f_n$\} with ${\cal Z} \{f_n\}=F(z)$ given by \[f_n = \frac{1}{2 \pi i}\oint F(z) z^{n-1} dz\] \begin{tabbing} {\bf SYNTAX:}\ \ {\tt invztrans($F(z)$, z, n)}\ \ \ \ \ \ \ \ \=where $F(z)$ is an expression, \\ \> and $z$,$n$ are identifiers. \end{tabbing} \ttindex{invztrans} \begin{tabbing} This \= package can compute the Inverse \= Z-Transforms of any rational function, \\ whose denominator can be factored over ${\bf Q}$, in addition to the following list \\ of $F(z)$.\\ \\ \> $\sin \left(\frac{\sin (\beta)}{z} \ \right) e^{\left(\frac{\cos (\beta)}{z} \ \right)}$ \> $\cos \left(\frac{\sin (\beta)}{z} \ \right) e^{\left(\frac{\cos (\beta)}{z} \ \right)}$ \\ \\ \> $\sqrt{\frac{z}{A}} \sin \left( \sqrt{\frac{z}{A}} \ \right)$ \> $\cos \left( \sqrt{\frac{z}{A}} \ \right)$ \\ \\ \> $\sqrt{\frac{z}{A}} \sinh \left( \sqrt{\frac{z}{A}} \ \right)$ \> $\cosh \left( \sqrt{\frac{z}{A}} \ \right)$ \\ \\ \> $z \ \log \left(\frac{z}{\sqrt{z^2-A z+B}} \ \right)$ \> $z \ \log \left(\frac{\sqrt{z^2+A z+B}}{z} \ \right)$ \\ \\ \> $\arctan \left(\frac{\sin (\beta)}{z+\cos (\beta)} \ \right)$ \\ \end{tabbing} here $k$,$\lambda \in$ {\bf N}$ - \{0\}$ and $A$,$B$ are fractions or variables ($B>0$) and $\alpha$,$\beta$, \& $\phi$ are angles in radians. Examples: {\small\begin{verbatim} ztrans(sum(1/factorial(k),k,0,n),n,z); 1/z e *z -------- z - 1 invztrans(z/((z-a)*(z-b)),z,n); n n a - b --------- a - b \end{verbatim}} %\documentstyle[11pt,reduce]{article} \part{Standard Lisp Report} \setcounter{examplectr}{0} \chapter{The Standard Lisp Report} \label{SL} \typeout{{The Standard Lisp Report}} {\footnotesize \begin{center} Jed Marti \\ A. C. Hearn \\ M. L. Griss \\ C. Griss \end{center} } \ttindex{Standard Lisp Report} %%% Function/method definition. %%% de{fname}{arglist}{type}{text} For short arg lists. %%% DE{fname}{arglist}{type}{text} For long arg lists. \newlength{\argwidth} % Width of argument box. \setlength{\argwidth}{4in} \newlength{\dewidth} \setlength{\dewidth}{4.5in} % Width of text box. \newcommand{\de}[4] {\vspace{.25in} \noindent \begin{minipage}[t]{\textwidth} \index{#1} {\f{#1}}{#2}\hfill{\em #3} \\ \hspace*{.25in}\begin{minipage}[t]{\dewidth} #4 \end{minipage} \end{minipage} } %%% Global/fluid variable description. %%% variable{name}{initial value}{type}{text} \newcommand{\variable}[4] {\vspace{.25in} \noindent \begin{minipage}[t]{\textwidth} \index{#1 (#3)} {\bf #1} = #2 \hfill {\em #3} \\ \hspace*{.25in} \ \begin{minipage}[t]{\dewidth} #4 \end{minipage} \end{minipage}} %%% Command to display an error or warning message in teletype format. Also %%% leaves blank vertical space around it. \newcommand{\errormessage}[1] {\vspace{.1in} \noindent {\tt #1} \\ \vspace{.1in}} %%% \p is a parameter name (or argument). Just do this as bf. \newcommand{\p}[1] {{\bf #1}} %%% \ty is a type - do as italics. \newcommand{\ty}[1] {{\em #1}} %\begin{document} %\maketitle \section{Introduction} Although the programming language LISP was first formulated in 1960~\cite{LISP1.5}, a widely accepted standard has never appeared. As a result, various dialects of LISP were produced~\cite{CDC-LISP,LISP/360,MACLISP,Interlisp,LISPF1,LISP1.6} in some cases several on the same machine! Consequently, a user often faces considerable difficulty in moving programs from one system to another. In addition, it is difficult to write and use programs which depend on the structure of the source code such as translators, editors and cross-reference programs. In 1969, a model for such a standard was produced~\cite{Hearn:69} as part of a general effort to make a large LISP based algebraic manipulation program, REDUCE~\cite{REDUCE3.3}, as portable as possible. The goal of this work was to define a uniform subset of LISP 1.5 and its variants so that programs written in this subset could run on any reasonable LISP system. In the intervening years, two deficiencies in the approach taken in Ref.~\cite{Hearn:69} have emerged. First in order to be as general as possible, the specific semantics and values of several key functions were left undefined. Consequently, programs built on this subset could not make any assumptions about the form of the values of such functions. The second deficiency related to the proposed method of implementation of this language. The model considered in effect two versions of LISP on any given machine, namely Standard LISP and the LISP of the host machine (which we shall refer to as Target LISP). This meant that if any definition was stored in interpretive form, it would vary from implementation to implementation, and consequently one could not write programs in Standard LISP which needed to assume any knowledge about the structure of such forms. This deficiency became apparent during recent work on the development of a portable compiler for LISP~\cite{PLC}. Clearly a compiler has to know precisely the structure of its source code; we concluded that the appropriate source was Standard LISP and not Target LISP. With these thoughts in mind we decided to attempt again a definition of Standard LISP. However, our approach this time is more aggressive. In this document we define a standard for a reasonably large subset of LISP with as precise as possible a statement about the semantics of each function. Secondly, we now require that the target machine interpreter be modified or written to support this standard, rather than mapping Standard LISP onto Target LISP as previously. We have spent countless hours in discussion over many of the definitions given in this report. We have also drawn on the help and advice of a lot of friends whose names are given in the Acknowledgements. Wherever possible, we have used the definition of a function as given in the LISP 1.5 Programmer's Manual~\cite{LISP1.5} and have only deviated where we felt it desirable in the light of LISP programming experience since that time. In particular, we have given considerable thought to the question of variable bindings and the definition of the evaluator functions EVAL and APPLY. We have also abandoned the previous definition of LISP arrays in favor of the more accepted idea of a vector which most modern LISP systems support. These are the places where we have strayed furthest from the conventional definitions, but we feel that the consistency which results from our approach is worth the redefinition. We have avoided entirely in this report problems which arise from environment passing, such as those represented by the FUNARG problem. We do not necessarily exclude these considerations from our standard, but in this report have decided to avoid the controversy which they create. The semantic differences between compiled and interpreted functions is the topic of another paper~\cite{PLC}. Only functions which affect the compiler in a general way make reference to it. This document is not intended as an introduction to LISP rather it is assumed that the reader is already familiar with some version. The document is thus intended as an arbiter of the syntax and semantics of Standard LISP. However, since it is not intended as an implementation description, we deliberately leave unspecified many of the details on which an actual implementation depends. For example, while we assume the existence of a symbol table for atoms (the "object list" in LISP terminology), we do not specify its structure, since conventional LISP programming does not require this information. Our ultimate goal, however, is to remedy this by defining an interpreter for Standard LISP which is sufficiently complete that its implementation on any given computer will be straightforward and precise. At that time, we shall produce an implementation level specification for Standard LISP which will extend the description of the primitive functions defined herein by introducing a new set of lower level primitive functions in which the structure of the symbol table, heap and so on may be defined. The plan of this chapter is as follows. In Section~\ref{dtypes} we describe the various data types used in Standard LISP. In Section~\ref{slfns}, a description of all Standard LISP functions is presented, organized by type. These functions are defined in an RLISP syntax which is easier to read than LISP S-expressions. Section~\ref{slglobals} describes global variables which control the operation of Standard LISP. \section{Preliminaries} \label{dtypes} \subsection{Primitive Data Types} \label{pdat} \begin{description} \item[integer] Integers are also called "fixed" numbers. The magnitude of an integer is unrestricted. Integers in the LISP input stream are \index{integer ! input} \index{integer ! magnitude} recognized by the grammar: \begin{tabbing} \s{digit} ::= 0$\mid$1$\mid$2$\mid$3$\mid$4$\mid$5$\mid$6$\mid$7$\mid$8$\mid$9 \\ \s{unsigned-integer} ::= \s{digit}$\mid$\s{unsigned-integer}\s{digit} \\ \s{integer} ::= \= \s{unsigned-integer} $\mid$ \\ \> +\s{unsigned-integer} $\mid$ \\ \> ---\s{unsigned-integer} \end{tabbing} \item[floating] - Any floating point number. The precision of floating point \index{floating ! input} numbers is determined solely by the implementation. In BNF floating point numbers are recognized by the grammar: \begin{tabbing} \s{base} ::= \= \s{unsigned-integer}.$\mid$.\s{unsigned-integer}$\mid$ \\ \> \s{unsigned-integer}.\s{unsigned-integer} \\ \> \s{unsigned-floating} ::= \s{base}$\mid$ \\ \> \s{base}E\s{unsigned-integer}$\mid$ \\ \> \s{base}E-\s{unsigned-integer}$\mid$ \\ \> \s{base}E+\s{unsigned-integer} \\ \s{floating} ::= \= \s{unsigned-floating}$\mid$ \\ \> +\s{unsigned-floating}$\mid$-\s{unsigned-floating} \end{tabbing} \item[id] An identifier is a string of characters which may have the \index{id ! input} \index{identifier (see id)} following items associated with it. \begin{description} \item[print name] \index{print name} The characters of the identifier. \item[flags] An identifier may be tagged with a flag. Access is by the FLAG, REMFLAG, and FLAGP functions defined in section~\ref{plist} on page~\pageref{plist}. \index{FLAG} \index{REMFLAG} \index{FLAGP} \item[properties] \index{properties} An identifier may have an indicator-value pair associated with it. Access is by the PUT, GET, and REMPROP functions defined in section~\ref{plist} on page~\pageref{plist}. \index{PUT} \index{GET} \index{REMPROP} \item[values/functions] An identifier may have a value associated with \index{values} \index{functions} it. Access to values is by SET and SETQ defined in \index{SET} \index{SETQ} section~\ref{varsandbinds} on page~\pageref{varsandbinds}. The method by which the value is attached to the identifier is known as the binding type, being one of LOCAL, GLOBAL, or FLUID. Access to the binding type is by the GLOBAL, GLOBALP, FLUID, FLUIDP, and UNFLUID functions. \index{GLOBAL} \index{GLOBALP} \index{FLUID} \index{FUIDP} \index{UNFLUID} An identifier may have a function or macro associated with it. Access is by the PUTD, GETD, and REMD functions (see ``Function Definition'', section~\ref{fdef}, on page~\pageref{fdef}). \index{PUTD} \index{GETD} \index{REMD} An identifier may not have both a function and a value associated with it. \item[OBLIST entry] \index{OBLIST entry} An identifier may be entered and removed from a structure called the OBLIST. Its presence on the OBLIST does not directly affect the other properties. Access to the OBLIST is by the INTERN, REMOB, and READ functions. \index{INTERN} \index{REMOB} \index{READ} \end{description} The maximum length of a Standard LISP identifier is 24 characters \index{id ! maximum length} (excluding occurrences of the escape character !) but an \index{id ! escape character} implementation may allow more. Special characters (digits in the first position and punctuation) must be prefixed with an escape character, an ! in Standard LISP. In BNF identifiers are recognized by the grammar: \begin{tabbing} \s{special-character} ::= !\s{any-character} \\ \s{alphabetic} ::= \\ \hspace*{.25in} \= A$\mid$B$\mid$C$\mid$D$\mid$E$\mid$F$\mid$G$\mid$H$ \mid$I$\mid$J$\mid$K$\mid$L$\mid$M$\mid$N$\mid$O$\mid$P$\mid$Q$\mid$R$ \mid$S$\mid$T$\mid$U$\mid$V$\mid$W$\mid$X$\mid$Y$\mid$Z$\mid$ \\ \> a$\mid$b$\mid$c$\mid$d$\mid$e$\mid$f$\mid$g$\mid$h$\mid$i$\mid$j$ \mid$k$\mid$l$\mid$m$\mid$n$\mid$o$\mid$p$\mid$q$\mid$r$\mid$s$\mid$t$ \mid$u$\mid$v$\mid$w$\mid$x$\mid$y$\mid$z \\ \s{lead-character} ::= \s{special-character}$\mid$\s{alphabetic} \\ \s{regular-character} ::= \s{lead-character}$\mid$\s{digit} \\ \s{last-part} ::= \= \s{regular-character} $\mid$ \\ \> \s{last-part}\s{regular-character} \\ \s{id} ::= \s{lead-character}$\mid$\s{lead-character}\s{last-part} \end{tabbing} Note: Using lower case letters in identifiers may cause portability problems. Lower case letters are automatically converted to upper case when the !*RAISE flag is T. \index{*RAISE (global)} \item[string] \index{string} A set of characters enclosed in double quotes as in "THIS IS A STRING". A quote is included by doubling it as in "HE SAID, ""LISP""". The maximum size of strings is 80 characters but an implementation may allow more. Strings are not part of the OBLIST and are considered constants like numbers, vectors, and function-pointers. \item[dotted-pair] A primitive structure which has a left and right part. \index{dotted-pair} \index{dot-notation} A notation called {\em dot-notation} is used for dotted pairs and takes the form: \begin{tabbing} (\s{left-part} . \s{right-part}) \end{tabbing} The \s{left-part} is known as the CAR portion and the \s{right-part} as the CDR portion. The left and right parts may be of any type. Spaces are used to resolve ambiguity with floating point numbers. \item[vector] \index{vector} A primitive uniform structure in which an integer index is used to access random values in the structure. The individual elements of a vector may be of any type. Access to vectors is restricted to functions defined in ``Vectors'' section~\ref{vectors} on page~\pageref{vectors}. A notation for vectors, {\em vector-notation}, has the elements of a vector surrounded \index{vector-notation} by square brackets\footnote{Vector elements are not separated by commas as in the published version of this document.} \begin{tabbing} \s{elements} ::= \s{any}$\mid$\s{any} \s{elements} \\ \s{vector} ::= [\s{elements}] \end{tabbing} \item[function-pointer] \index{function-pointer} An implementation may have functions which deal with specific data types other than those listed. The use of these entities is to be avoided with the exception of a restricted use of the function-pointer, an access method to compiled EXPRs and FEXPRs. A particular function-pointer must remain valid \index{EXPR} \index{FEXPR} throughout execution. Systems which change the location of a function must use either an indirect reference or change all occurrences of the associated value. There are two classes of use of function-pointers, those which are supported by Standard LISP but are not well defined, and those which are well defined. \begin{description} \item[Not well defined] Function pointers may be displayed by the print functions or expanded by EXPLODE. \index{EXPLODE} The value appears in the convention of the implementation site. The value is not defined in Standard LISP. Function pointers may be created by COMPRESS \index{COMPRESS} in the format used for printing but the value used is not defined in Standard LISP. Function pointers may be created by functions which deal with compiled function loading. Again, the values created are not well defined in Standard LISP. \item[Well defined] The function pointer associated with an EXPR or FEXPR may be retrieved by GETD \index{GETD} and is valid as long as Standard LISP is in execution. Function pointers may be stored using \index{PUTD} \index{PUT} \index{SETQ} PUTD, PUT, SETQ and the like or by being bound to variables. Function pointers may be checked for equivalence by EQ. \index{EQ ! of function-pointers} The value may be checked for being a function pointer by the CODEP function. \index{CODEP} \end{description} \end{description} \subsection{Classes of Primitive Data Types} \label{pclasses} The classes of primitive types are a notational convenience for describing the properties of functions. \begin{description} \item[boolean] \index{boolean} The set of global variables \{T,NIL\}, or their respective values, \{T, NIL\}. \index{T (global)} \index{NIL (global)} \item[extra-boolean] \index{extra-boolean} Any value in the system. Anything that is not NIL \index{NIL (global)} has the boolean interpretation T. \index{T (global)} \item[ftype] \index{ftype} The class of definable function types. The set of ids \{EXPR, FEXPR, MACRO\}. \index{EXPR} \index{FEXPR} \index{MACRO} \item[number] \index{number} The set of \{integer, floating\}. \item[constant] \index{constant} The set of \{integer, floating, string, vector, function-pointer\}. Constants evaluate to themselves (see the definition of EVAL in ``The Interpreter'', section~\ref{interpreter} on page~\pageref{interpreter}). \index{EVAL ! of constants} \item[any] \index{any} The set of \{integer, floating, string, id, dotted-pair, vector, function-pointer\}. An S-expression is another term for any. All Standard LISP entities have some value unless an ERROR occurs during evaluation or the function causes transfer of control (such as GO and RETURN). \item[atom] \index{atom} The set \{any\}-\{dotted-pair\}. \end{description} \subsection{Structures} \index{data structures} \index{structures} Structures are entities created out of the primitive types by the use of dotted-pairs. Lists are structures very commonly required as actual parameters to functions. Where a list of homogeneous entities is required by a function this class will be denoted by \s{{\bf xxx}-list} where {\bf \em xxx} is the name of a class of primitives or structures. Thus a list of ids is an {\em id-list}, a list of integers an {\em integer-list} and so on. \index{id-list} \index{integer-list} \index{-list} \begin{description} \item[list] \index{list} A list is recursively defined as NIL or the \index{list-notation} \index{NIL (global)} dotted-pair (any~.~list). A special notation called {\em list-notation} is used to represent lists. List-notation eliminates extra parentheses and dots. The list (a . (b . (c . NIL))) in list notation is (a b c). \index{dot-notation} List-notation and dot-notation may be mixed as in (a b . c) or (a (b . c) d) which are (a . (b . c)) and (a . ((b . c) . (d . NIL))). In BNF lists are recognized by the grammar: \begin{tabbing} \s{left-part} ::= ( $\mid$ \s{left-part} \s{any} \\ \s{list} ::= \s{left-part}) $\mid$ \s{left-part} . \s{any}) \end{tabbing} Note: () is an alternate input representation of NIL. \index{()} \item[alist] \index{alist} An association list; each element of the list is a dotted-pair, the CAR part being a key associated with the value in the CDR part. \index{association list} \item[cond-form] \index{cond-form} A cond-form is a list of 2 element lists of the form: (\p{ANTECEDENT}:{\em any} \p{CONSEQUENT}:{\em any}) The first element will henceforth be known as the antecedent and \index{antecedent (cond-form)} \index{consequent (cond-form)} the second as the consequent. The antecedent must have a value. The consequent may have a value or an occurrence of GO or RETURN \index{GO} \index{RETURN} as described in the ``Program Feature Functions'', section~\ref{prog} on page~\pageref{prog}. \item[lambda] \index{LAMBDA} A LAMBDA expression which must have the form (in list notation): (LAMBDA parameters body). ``parameters'' is a list of formal parameters for ``body'' an S-expression to be evaluated. The semantics of the evaluation are defined with the EVAL function (see ``The Interpreter'', section~\ref{interpreter} on \index{EVAL ! lambda expressions} page~\pageref{interpreter}). \index{lambda expression} \item[function] \index{function} A LAMBDA expression or a function-pointer to a function. A function is always evaluated as an EVAL, SPREAD form. \index{EVAL ! function} \end{description} \subsection{Function Descriptions} Each function is provided with a prototypical header line. Each formal parameter is given a name and suffixed with its allowed type. Lower case, italic tokens are names of classes and upper case, bold face, tokens are parameter names referred to in the definition. The type of the value returned by the function (if any) is suffixed to the parameter list. If it is not commonly used the parameter type may be a specific set enclosed in brackets \{\ldots\}. \index{\{\ldots\} ! as syntax} For example: \vspace{.1in} \noindent \f{PUTD}(\p{FNAME}:\ty{id}, \p{TYPE}:\ty{ftype}, \p{BODY}:\{\ty{lambda, function-pointer}\}):\ty{id} \vspace{.1in} PUTD is a function with three parameters. The parameter FNAME is an id to be the name of the function being defined. TYPE is the type of the function being defined and BODY is a lambda expression or a function-pointer. PUTD returns the name of the function being defined. Functions which accept formal parameter lists of arbitrary length have the type class and parameter enclosed in square brackets indicating that zero or more occurrences of that argument are permitted. \index{[\ldots] syntax} For example: \vspace{.1in} \noindent \f{AND}([\p{U}:\ty{any}]):\ty{extra-boolean} \vspace{.1in} AND is a function which accepts zero or more arguments which may be of any type. \subsection{Function Types} EVAL type functions are those which are invoked with evaluated \index{EVAL ! function type} arguments. NOEVAL functions are invoked with unevaluated arguments. \index{NOEVAL ! function type} SPREAD type functions have their arguments passed in one-to-one \index{SPREAD ! function type} correspondence with their formal parameters. NOSPREAD functions \index{NOSPREAD ! function type} receive their arguments as a single list. EVAL, SPREAD functions are \index{FEXPR} associated with EXPRs and NO\-EVAL, NO\-SPREAD functions with FEXPRs. EVAL, NO\-SPREAD and NOEVAL, SPREAD functions can be simulated using NOEVAL, NO\-SPREAD functions or MACROs. \index{MACRO} EVAL, SPREAD type functions may have a maximum of 15 parameters. \index{formal parameter limit} There is no limit on the number of parameters a NOEVAL, NOSPREAD function or MACRO may have. In the context of the description of an EVAL, SPREAD function, then we speak of the formal parameters we mean their actual values. However, in a NOEVAL, NOSPREAD function it is the unevaluated actual parameters. A third function type, the MACRO, implements functions which \index{MACRO} create S-expressions based on actual parameters. When a macro invocation is encountered, the body of the macro, a lambda expression, is invoked as a NOEVAL, NOSPREAD function with the macro's invocation bound as a list to the macros single formal parameter. When the macro has been evaluated the resulting S-expression is reevaluated. The description of the EVAL and EXPAND \index{EVAL ! MACRO functions} functions provide precise details. \subsection{Error and Warning Messages} \index{error messages} Many functions detect errors. The description of such functions will include these error conditions and suggested formats for display \index{ERROR} of the generated error messages. A call on the ERROR function is implied but the error number is not specified by Standard LISP. In some cases a warning message is sufficient. To distinguish between \index{warning messages} \index{***** (error message)} \index{*** (warning message)} errors and warnings, errors are prefixed with five asterisks and warnings with only three. Primitive functions check arguments that must be of a certain primitive type for being of that type and display an error message if the argument is not correct. The type mismatch error always takes the form: \index{error ! type mismatch error} \errormessage{***** PARAMETER not TYPE for FN} Here PARAMETER is the unacceptable actual parameter, TYPE is the type that PARAMETER was supposed to be. FN is the name of the function that detected the error. \subsection{Comments} \index{comments} \index{\%} The character \% signals the start of a comment, text to be ignored during parsing. A comment is terminated by the end of the line it \index{READCH} \index{READ} is on. The function READCH must be able to read a comment one character at a time. Comments are transparent to the function READ. \% may occur as a character in identifiers by preceding it with the \index{escape character} escape character !. \section{Functions} \label{slfns} \subsection{Elementary Predicates} \label{elpreds} \index{predicate !} \index{T (global)} \index{NIL (global)} Functions in this section return T when the condition defined is met and NIL when it is not. Defined are type checking functions and elementary comparisons. \de{ATOM}{(\p{U}:\ty{any}):{\ty boolean}}{eval, spread} {Returns T if U is not a pair. {\tt \begin{tabbing} EXPR PROCEDURE ATOM(U); \\ \hspace*{1em} NULL PAIRP U; \end{tabbing}}} \de{CODEP}{(\p{U}:\f{any}):{\ty boolean}}{eval, spread} {Returns T if U is a function-pointer.} \de{CONSTANTP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is a constant (a number, string, function-pointer, or vector). {\tt \begin{tabbing} EXPR PROCEDURE CONSTANTP(U); \\ \hspace*{1em} NULL OR(PAIRP U, IDP U); \end{tabbing}} } \de{EQ}{(\p{U}:\ty{any}, \p{V}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U points to the same object as V. EQ is \underline{not} a reliable comparison between numeric arguments. } \de{EQN}{(\p{U}:\ty{any}, \p{V}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U and V are EQ or if U and V are numbers and have the same value and type. } \de{EQUAL}{(\p{U}:\ty{any}, \p{V}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U and V are the same. Dotted-pairs are compared recursively to the bottom levels of their trees. Vectors must have identical dimensions and EQUAL values in all positions. Strings must \index{EQ ! of function-pointers} \index{EQN} have identical characters. Function pointers must have EQ values. Other atoms must be EQN equal. } \de{FIXP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is an integer (a fixed number).} \de{FLOATP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is a floating point number. } \de{IDP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is an id.} \de{MINUSP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is a number and less than 0. If U is not a number or is a positive number, NIL is returned. {\tt \begin{tabbing} EXPR PROCEDURE MINUSP(U); \\ \hspace*{1em} IF NUMBERP U THEN LESSP(U, 0) ELSE NIL; \end{tabbing}}} \de{NULL}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is NIL. {\tt \begin{tabbing} EXPR PROCEDURE NULL(U); \\ \hspace*{1em} U EQ NIL; \end{tabbing}}} \de{NUMBERP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is a number (integer or floating). {\tt \begin{tabbing} EXPR PROCEDURE NUMBERP(U); \\ \hspace*{1em} IF OR(FIXP U, FLOATP U) THEN T ELSE NIL; \end{tabbing}}} \de{ONEP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread.} {Returns T if U is a number and has the value 1 or 1.0. Returns NIL otherwise. \footnote{The definition in the published report is incorrect as it does not return T for \p{U} of 1.0.} {\tt \begin{tabbing} EXPR PROCEDURE ONEP(U); \\ \hspace*{1em} OR(EQN(U, 1), EQN(U, 1.0)); \end{tabbing}}} \de{PAIRP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is a dotted-pair. } \de{STRINGP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is a string. } \de{VECTORP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is a vector. } \de{ZEROP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread.} {Returns T if U is a number and has the value 0 or 0.0. Returns NIL otherwise.\footnote{The definition in the published report is incorrect as it does not return T for \p{U} of 0.0.} {\tt \begin{tabbing} EXPR PROCEDURE ZEROP(U); \\ \hspace*{1em} OR(EQN(U, 0), EQN(U, 0.0)); \end{tabbing}}} \subsection{Functions on Dotted-Pairs} \index{dotted-pair} The following are elementary functions on dotted-pairs. All functions in this section which require dotted-pairs as parameters detect a type mismatch error if the actual parameter is not a dotted-pair. \de{CAR}{(\p{U}:\ty{dotted-pair}):\ty{any}}{eval, spread} {CAR(CONS(a, b)) $\rightarrow$ a. The left part of U is returned. The type \index{CONS} mismatch error occurs if U is not a dotted-pair.} \de{CDR}{(\p{U}:\ty{dotted-pair}):\ty{any}}{eval, spread} {CDR(CONS(a, b)) $\rightarrow$ b. The right part of U is returned. The type \index{CONS} mismatch error occurs if U is not a dotted-pair.} The composites of CAR and CDR are supported up to 4 levels, namely: \index{CAR ! composite forms} \index{CDR ! composite forms} \hspace*{1in}\begin{tabular}{l l l} CAAAAR & CAAAR & CAAR \\ CAAADR & CAADR & CADR \\ CAADAR & CADAR & CDAR \\ CAADDR & CADDR & CDDR \\ CADAAR & CDAAR & \\ CADADR & CDADR & \\ CADDAR & CDDAR & \\ CADDDR & CDDDR & \\ CDAAAR & & \\ CDAADR & & \\ CDADAR & & \\ CDADDR & & \\ CDDAAR & & \\ CDDADR & & \\ CDDDAR & & \\ CDDDDR & & \end{tabular} \de{CONS}{(\p{U}:\ty{any}, \p{V}:\ty{any}):\ty{dotted-pair}}{eval, spread} {Returns a dotted-pair which is not EQ to anything and has U as its \index{EQ ! of dotted-pairs} \index{dotted-pair} CAR part and V as its CDR part.} \de{LIST}{([\p{U}:\ty{any}]):\ty{list}}{noeval, nospread, or macro} {A list of the evaluation of each element of U is returned. The order of evaluation need not be first to last as the following definition implies.\footnote{The published report's definition implies a specific ordering.} {\tt \begin{tabbing} FEXPR PROCEDURE LIST(U); \\ \hspace*{1em} EVLIS U; \end{tabbing}}} \de{RPLACA}{(\p{U}:\ty{dotted-pair}, \p{V}:\ty{any}):\ty{dotted-pair}}{eval, spread} {The CAR portion of the dotted-pair U is replaced by V. If dotted-pair U is (a . b) then (V . b) is returned. The type mismatch error occurs if U is not a dotted-pair. } \de{RPLACD}{(\p{U}:\ty{dotted-pair}, \p{V}:\ty{any}):\ty{dotted-pair}}{eval, spread} {The CDR portion of the dotted-pair U is replaced by V. If dotted-pair U is (a . b) then (a . V) is returned. The type mismatch error occurs if U is not a dotted-pair.} \subsection{Identifiers} \label{identifiers} The following functions deal with identifiers and the OBLIST, \index{OBLIST} the structure of which is not defined. The function of the OBLIST is to provide a symbol table for identifiers created during input. Identifiers created by READ which have the same characters will \index{READ} \index{EQ ! of identifiers} therefore refer to the same object (see the EQ function in ``Elementary Predicates'', section~\ref{elpreds} on page~\pageref{elpreds}). \de{COMPRESS}{(\p{U}:\ty{id-list}):\{\ty{atom}-\ty{vector}\}}{eval, spread} {U is a list of single character identifiers which is built into a Standard LISP entity and returned. Recognized are numbers, strings, and identifiers with the escape character prefixing special characters. The formats of these items appear in ``Primitive Data Types'' section~\ref{pdat} on page~\pageref{pdat}. Identifiers are not interned on the OBLIST. Function pointers may be compressed but this is an undefined use. If an entity cannot be parsed out of U or characters are left over after parsing an error occurs: \errormessage{***** Poorly formed atom in COMPRESS} } \de{EXPLODE}{(\p{U}:\{\ty{atom}\}-\{\ty{vector}\}):\ty{id-list}}{eval, spread} {Returned is a list of interned characters representing the characters to print of the value of U. The primitive data types have these formats: \begin{description} \item[integer] \index{integer ! output} Leading zeroes are suppressed and a minus sign prefixes the digits if the integer is negative. \item[floating] \index{floating ! output} The value appears in the format [-]0.nn...nnE[-]mm if the magnitude of the number is too large or small to display in [-]nnnn.nnnn format. The crossover point is determined by the implementation. \item[id] \index{id ! output} The characters of the print name of the identifier are produced with special characters prefixed with the escape character. \item[string] \index{string ! output} The characters of the string are produced surrounded by double quotes "\ldots". \item[function-pointer] \index{function-pointer ! output} The value of the function-pointer is created as a list of characters conforming to the conventions of the system site. \end{description} The type mismatch error occurs if U is not a number, identifier, string, or function-pointer. } \de{GENSYM}{():\ty{identifier}}{eval, spread} {Creates an identifier which is not interned on the OBLIST and consequently not EQ to anything else. \index{OBLIST entry} \index{EQ ! of GENSYMs}} \de{INTERN}{(\p{U}:\{\ty{id,string}\}):\ty{id}}{eval, spread} {INTERN searches the OBLIST for an identifier with the same print name \index{OBLIST entry} as U and returns the identifier on the OBLIST if a match is found. Any properties and global values associated with U may be lost. If U does not match any entry, a new one is created and returned. If U has more than the maximum number of characters permitted by the implementation (the minimum number is 24) an error occurs: \index{id ! minimum size} \errormessage{***** Too many characters to INTERN} } \de{REMOB}{(\p{U}:\ty{id}):\ty{id}}{eval, spread} {If U is present on the OBLIST it is removed. This does not affect U \index{OBLIST entry} having properties, flags, functions and the like. U is returned.} \subsection{Property List Functions} \label{plist} \index{property list} With each id in the system is a ``property list'', a set of entities which are associated with the id for fast access. These entities are called ``flags'' if their use gives the id a single valued \index{flags} property, and ``properties'' if the id is to have a multivalued \index{properties} attribute: an indicator with a property. Flags and indicators may clash, consequently care should be taken to avoid this occurrence. Flagging X with an id which already is an indicator for X may result in that indicator and associated property being lost. Likewise, adding an indicator which is the same id as a flag may result in the flag being destroyed. \de{FLAG}{(\p{U}:\ty{id-list}, \p{V}:\ty{id}):\ty{NIL}}{eval, spread} {U is a list of ids which are flagged with V. The effect of FLAG is that FLAGP will have the value T for those ids of U which were flagged. Both V and all the elements of U must be identifiers or the type mismatch error occurs.} \de{FLAGP}{(\p{U}:\ty{any}, \p{V}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U has been previously flagged with V, else NIL. Returns NIL if either U or V is not an id.} \de{GET}{(\p{U}:\ty{any}, \p{IND}:\ty{any}):\ty{any}}{eval, spread} {Returns the property associated with indicator IND from the property list of U. If U does not have indicator IND, NIL is returned. GET cannot be used to access functions (use GETD instead). \index{GET ! not for functions}} \de{PUT}{(\p{U}:\ty{id}, \p{IND}:\ty{id}, \p{PROP}:\ty{any}):\ty{any}}{eval, spread} {The indicator IND with the property PROP is placed on the property list of the id U. If the action of PUT occurs, the value of PROP is returned. If either of U and IND are not ids the type mismatch error will occur and no property will be placed. PUT cannot be used to define functions (use PUTD instead). \index{PUT ! not for functions}} \de{REMFLAG}{(\p{U}:\ty{any-list}, \p{V}:\ty{id}):\ty{NIL}}{eval, spread} {Removes the flag V from the property list of each member of the list U. Both V and all the elements of U must be ids or the type mismatch error will occur.} \de{REMPROP}{(\p{U}:\ty{any}, \p{IND}:\ty{any}):\ty{any}}{eval, spread} {Removes the property with indicator IND from the property list of U. Returns the removed property or NIL if there was no such indicator.} \subsection{Function Definition} \label{fdef} Functions in Standard LISP are global entities. To avoid function-variable naming clashes no variable may have the same name as a function. \index{function ! as GLOBAL} \de{DE}{(\p{FNAME}:\ty{id}, \p{PARAMS}:\ty{id-list}, \p{FN}:\ty{any}):\ty{id}}{noeval, nospread} {The function FN with the formal parameter list PARAMS is added to the set of defined functions with the name FNAME. Any previous definitions of the function are lost. The function created is of type \index{*COMP (fluid)} EXPR. If the !*COMP variable is non-NIL, the EXPR is first \index{EXPR} compiled. The name of the defined function is returned. {\tt \begin{tabbing} FEXPR PROCEDURE DE(U); \\ \hspace*{1em} PUTD(CAR U, 'EXPR, LIST('LAMBDA, CADR U, CADDR U)); \end{tabbing}}} \de{DF}{(\p{FNAME}:\ty{id}, \p{PARAM}:\ty{id-list}, \p{FN}:\ty{any}):\ty{id}}{noeval, nospread} {The function FN with formal parameter PARAM is added to the set of defined functions with the name FNAME. Any previous definitions of the function are lost. The function created is of type FEXPR. \index{*COMP variable} \index{FEXPR} If the !*COMP variable is T the FEXPR is first compiled. The name of the defined function is returned. {\tt \begin{tabbing} FEXPR PROCEDURE DF(U); \\ \hspace*{1em} PUTD(CAR U, 'FEXPR, LIST('LAMBDA, CADR U, CADDR U)); \\ \end{tabbing} }} \de{DM}{(\p{MNAME}:\ty{id}, \p{PARAM}:\ty{id-list}, \p{FN}:\ty{any}):\ty{id}}{noeval, nospread} {The macro FN with the formal parameter PARAM is added to the set of defined functions with the name MNAME. Any previous definitions of the function are overwritten. The function created is of type MACRO. \index{MACRO} The name of the macro is returned. {\tt \begin{tabbing} FEXPR PROCEDURE DM(U); \\ \hspace*{1em} PUTD(CAR U, 'MACRO, LIST('LAMBDA, CADR U, CADDR U)); \end{tabbing} } } \de{GETD}{(\p{FNAME}:\ty{any}):\{NIL, \ty{dotted-pair}\}}{eval, spread} {If FNAME is not the name of a defined function, NIL is returned. If FNAME is a defined function then the dotted-pair \vspace{.15in} (\p{TYPE}:\ty{ftype} . \p{DEF}:\{\ty{function-pointer, lambda}\}) \vspace{.15in} is returned.} \de{PUTD}{(\p{FNAME}:\ty{id}, \p{TYPE}:\ty{ftype}, \p{BODY}:\ty{function}):\ty{id}}{eval, spread} {Creates a function with name FNAME and definition BODY of type TYPE. If PUTD succeeds the name of the defined function is returned. The effect of PUTD is that GETD will return a dotted-pair with the functions type and definition. Likewise the GLOBALP predicate will \index{GLOBALP} \index{function ! as global} return T when queried with the function name. If the function FNAME has already been declared as a GLOBAL or FLUID variable the error: \errormessage{***** FNAME is a non-local variable} occurs and the function will not be defined. If function FNAME already exists a warning message will appear: \errormessage{*** FNAME redefined} The function defined by PUTD will be compiled before definition \index{*COMP (fluid)} if the !*COMP global variable is non-NIL.} \de{REMD}{(\p{FNAME}:\ty{id}):\{NIL, \ty{dotted-pair}\}}{eval, spread} {Removes the function named FNAME from the set of defined functions. Returns the (ftype . function) dotted-pair or NIL as does GETD. The global/function attribute of FNAME is removed and the name may be used subsequently as a variable.} \subsection{Variables and Bindings} \label{varsandbinds} \index{variable scope} \index{scope} A variable is a place holder for a Standard LISP entity which is said to be bound to the variable. The scope of a variable is the range over which the variable has a defined value. There are three different binding mechanisms in Standard LISP. \begin{description} \item[Local Binding] \index{local binding} This type of binding occurs \index{scope ! local} only in compiled functions. Local variables occur as formal parameters in lambda expressions and as PROG form variables. The binding occurs when a lambda expression is evaluated or when a PROG form is executed. The scope of a local variable is the body of the function in which it is defined. \item[Global Binding] \index{global binding} Only one binding of a \index{scope ! global} global variable exists at any time allowing direct access to the value bound to the variable. The scope of a global variable is universal. Variables declared GLOBAL may not appear as parameters in lambda expressions or as PROG form variables. A variable must be declared GLOBAL prior to its use as a global variable since the default type for undeclared variables is FLUID. \item[Fluid Binding] \index{fluid binding} \index{fluid binding ! as default} Fluid variables are global in scope but may occur as \index{scope ! fluid} formal parameters or PROG form variables. In interpreted functions all formal parameters and PROG form variables are considered to have fluid binding until changed to local binding by compilation. When fluid variables are used as parameters they are rebound in such a way that the previous binding may be restored. All references to fluid variables are to the currently active binding. \end{description} \de{FLUID}{(\p{IDLIST}:\ty{id-list}):\p{NIL}}{eval, spread} {The ids in IDLIST are declared as FLUID type variables (ids not previously declared are initialized to NIL). Variables in IDLIST already declared FLUID are ignored. Changing a variable's type from GLOBAL to FLUID is not permissible and results in the error: \errormessage{***** ID cannot be changed to FLUID} } \de{FLUIDP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {If U has been declared FLUID (by declaration only) T is returned, otherwise NIL is returned.} \de{GLOBAL}{(\p{IDLIST}:\ty{id-list}):\p{NIL}}{eval, spread} {The ids of IDLIST are declared global type variables. If an id has not been declared previously it is initialized to NIL. Variables already declared GLOBAL are ignored. Changing a variables type from FLUID to GLOBAL is not permissible and results in the error: \errormessage{***** ID cannot be changed to GLOBAL} } \de{GLOBALP}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {If U has been declared GLOBAL or is the name of a defined function, T is returned, else NIL is returned.} \de{SET}{(\p{EXP}:\ty{id}, \p{VALUE}:\ty{any}):\ty{any}}{eval, spread} {EXP must be an identifier or a type mismatch error occurs. The effect of SET is replacement of the item bound to the identifier by VALUE. If the identifier is not a local variable or has not been declared GLOBAL it is automatically declared FLUID with the resulting warning message: \errormessage{*** EXP declared FLUID} EXP must not evaluate to T or NIL or an error occurs: \index{T ! cannot be changed} \index{NIL ! cannot be changed} \errormessage{***** Cannot change T or NIL} } \de{SETQ}{(\p{VARIABLE}:\ty{id}, \p{VALUE}:\ty{any}):\ty{any}}{noeval, nospread} {If VARIABLE is not local or GLOBAL it is by default declared FLUID and the warning message: \errormessage{*** VARIABLE declared FLUID} appears. The value of the current binding of VARIABLE is replaced by the value of VALUE. VARIABLE must not be T or NIL or an error occurs: \index{T ! cannot be changed} \index{NIL ! cannot be changed} \errormessage{***** Cannot change T or NIL} {\tt \begin{tabbing} MACRO PROCEDURE SETQ(X); \\ \hspace*{1em} LIST('SET, LIST('QUOTE, CADR X), CADDR X); \end{tabbing}} } \de{UNFLUID}{(\p{IDLIST}:\ty{id-list}):\ty{NIL}}{eval, spread} {The variables in IDLIST that have been declared as FLUID variables are no longer considered as fluid variables. Others are ignored. This affects only compiled functions as free variables in interpreted functions are automatically considered fluid~\cite{PLC}. \index{scope ! fluid and compiled}} \subsection{Program Feature Functions} \label{prog} These functions provide for explicit control sequencing, and the definition of blocks altering the scope of local variables. \de{GO}{(\p{LABEL}:\ty{id})}{noeval, nospread} {GO alters the normal flow of control within a PROG function. The next statement of a PROG function to be evaluated is immediately preceded by LABEL. A GO may only appear in the following situations: \begin{enumerate} \item At the top level of a PROG referencing a label which also appears at the top level of the same PROG. \item As the consequent of a COND item of a COND appearing on the top level of a PROG. \index{GO ! in COND} \index{RETURN ! in COND} \item As the consequent of a COND item which appears as the consequent of a COND item to any level. \item As the last statement of a PROGN which appears at the top level of a PROG or in a PROGN appearing in the consequent of a COND to any level subject to the restrictions of 2 and 3. \item As the last statement of a PROGN within a PROGN or as the consequent of a COND in a PROGN to any level subject to the restrictions of 2, 3 and 4. \end{enumerate} If LABEL does not appear at the top level of the PROG in which the GO appears, an error occurs: \errormessage{***** LABEL is not a known label} If the GO has been placed in a position not defined by rules 1-5, another error is detected: \errormessage{***** Illegal use of GO to LABEL} } \de{PROG}{(\p{VARS}:\ty{id-list}, [\p{PROGRAM}:\{\ty{id, any}\}]):\ty{any}}{noeval, nospread} {VARS is a list of ids which are considered fluid when the PROG is interpreted and local when compiled (see ``Variables and Bindings'', section~\ref{varsandbinds} on page~\pageref{varsandbinds}). The PROGs variables are allocated space when the PROG form is invoked and are deallocated when the PROG is exited. PROG variables are initialized to \index{PROG ! variables} NIL. The PROGRAM is a set of expressions to be evaluated in order of their appearance in the PROG function. Identifiers appearing in the top level of the PROGRAM are labels which can be referenced by GO. The value returned by the PROG function is determined by a RETURN function \index{PROG ! default value} or NIL if the PROG ``falls through''.} \de{PROGN}{([\p{U}:\ty{any}]):\ty{any}}{noeval, nospread} {U is a set of expressions which are executed sequentially. The value returned is the value of the last expression.} \de{PROG2}{(A:any, B:any)\ty{any}}{eval, spread} {Returns the value of B. {\tt \begin{tabbing} EXPR PROCEDURE PROG2(A, B);\\ \hspace*{1em} B; \end{tabbing}}} \de{RETURN}{(\p{U}:\ty{any})}{eval, spread} {Within a PROG, RETURN terminates the evaluation of a PROG and returns U as the value of the PROG. The restrictions on the placement of RETURN are exactly those of GO. Improper placement of RETURN results in the error: \errormessage{***** Illegal use of RETURN} } \subsection{Error Handling} \label{errors} \de{ERROR}{(\p{NUMBER}:\ty{integer}, \p{MESSAGE}:\ty{any})}{eval, spread} {NUMBER and MESSAGE are passed back to a surrounding ERRORSET (the Standard LISP reader has an ERRORSET). MESSAGE is placed in the \index{EMSG* (global)} global variable EMSG!* and the error number becomes the value of the surrounding ERRORSET. FLUID variables and local bindings are unbound \index{fluid ! unbinding by ERROR} to return to the environment of the ERRORSET. Global variables are not affected by the process.} \de{ERRORSET}{(\p{U}:\ty{any}, \p{MSGP}:\ty{boolean}, \p{TR}:\ty{boolean}):\ty{any}}{eval, spread} {If an error occurs during the evaluation of U, the value of NUMBER from the ERROR call is returned as the value of ERRORSET. In addition, if the value of MSGP is non-NIL, the MESSAGE from the ERROR call is displayed upon both the standard output device and the currently selected output device unless the standard output device is not open. The message appears prefixed with 5 asterisks. The MESSAGE \index{***** (error message)} list is displayed without top level parentheses. The MESSAGE from the \index{EMSG* (global)} ERROR call will be available in the global variable EMSG!*. The exact format of error messages generated by Standard LISP functions described in this document are not fixed and should not be relied upon to be in any particular form. Likewise, error numbers generated by Standard LISP functions are implementation dependent. If no error occurs during the evaluation of U, the value of (LIST (EVAL U)) is returned. If an error has been signaled and the value of TR is non-NIL a traceback sequence will be initiated on the selected output device. The traceback will display information such as unbindings of FLUID \index{fluid ! in traceback} variables, argument lists and so on in an implementation dependent format.} \subsection{Vectors} \label{vectors} \index{vector} Vectors are structured entities in which random elements may be accessed with an integer index. A vector has a single dimension. Its maximum size is determined by the implementation and available space. A suggested input ``vector notation'' is defined in ``Classes of Primitive Data Types'', section~\ref{pclasses} on page~\pageref{pclasses} and output with EXPLODE, ``Identifiers'' section~\ref{identifiers} on page~\pageref{identifiers}. \index{EXPLODE} \de{GETV}{(\p{V}:\ty{vector}, \p{INDEX}:\ty{integer}):\ty{any}}{eval, spread} {Returns the value stored at position INDEX of the vector V. The type mismatch error may occur. An error occurs if the INDEX does not lie within 0\ldots UPBV(V) inclusive: \errormessage{***** INDEX subscript is out of range} } \de{MKVECT}{(\p{UPLIM}:\ty{integer}):\ty{vector}}{eval, spread} {Defines and allocates space for a vector with UPLIM+1 elements accessed as 0\ldots UPLIM. Each element is initialized to NIL. An error will occur if UPLIM is $<$ 0 or there is not enough space for a vector of this size: \errormessage{***** A vector of size UPLIM cannot be allocated} } \de{PUTV}{(\p{V}:\ty{vector}, \p{INDEX}:\ty{integer}, \p{VALUE}:\ty{any}):\ty{any}}{eval, spread} {Stores VALUE into the vector V at position INDEX. VALUE is returned. The type mismatch error may occur. If INDEX does not lie in 0\ldots UPBV(V) an error occurs: \errormessage{***** INDEX subscript is out of range} } \de{UPBV}{(\p{U}:\ty{any}):{NIL,\ty{integer}}}{eval, spread} {Returns the upper limit of U if U is a vector, or NIL if it is not.} \subsection{Boolean Functions and Conditionals} \de{AND}{([\p{U}:\ty{any}]):\ty{extra-boolean}}{noeval, nospread} {AND evaluates each U until a value of NIL is found or the end of the list is encountered. If a non-NIL value is the last value it is returned, or NIL is returned. {\tt \begin{tabbing} FEXPR PROCEDURE AND(U); \\ BEGIN \\ \hspace*{1em} IF NULL U THEN RETURN NIL; \\ LOOP: IF \= NULL CDR U THEN RETURN EVAL CAR U \\ \> ELSE IF NULL EVAL CAR U THEN RETURN NIL; \\ \hspace*{2em} \= U := CDR U; \\ \> GO LOOP \\ END; \end{tabbing} }} \de{COND}{([\p{U}:\ty{cond-form}]):\ty{any}}{noeval, nospread} {The antecedents of all U's are evaluated in order of their appearance until a non-NIL value is encountered. The consequent of the selected U is evaluated and becomes the value of the COND. The consequent may also contain the special functions GO and RETURN subject to the restraints given for these functions in ``Program Feature Functions'', section~\ref{prog} on page~\pageref{prog}. \index{GO ! in COND} \index{RETUNR ! in CODE} In these cases COND does not have a defined value, but rather an effect. If no antecedent is non-NIL the value of COND is NIL. An error is detected if a U is improperly formed: \errormessage{***** Improper cond-form as argument of COND} } \de{NOT}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {If U is NIL, return T else return NIL (same as function NULL). {\tt \begin{tabbing} EXPR PROCEDURE NOT(U); \\ \hspace*{1em} U EQ NIL; \end{tabbing}} } \de{OR}{([\p{U}:\ty{any}]):\ty{extra-boolean}}{noeval, nospread} {U is any number of expressions which are evaluated in order of their appearance. When one is found to be non-NIL it is returned as the value of OR. If all are NIL, NIL is returned. {\tt \begin{tabbing} FEXPR PROCEDURE OR(U); \\ BEGIN SCALAR X; \\ LOOP: IF \= NULL U THEN RETURN NIL \\ \> ELSE IF (X := EVAL CAR U) THEN RETURN X; \\ \hspace*{2em} \= U := CDR U; \\ \> GO LOOP \\ END; \end{tabbing} }} \subsection{Arithmetic Functions} Conversions between numeric types are provided explicitly by the \index{FIX} \index{FLOAT} FIX and FLOAT functions and implicitly by any multi-parameter \index{mixed-mode arithmetic} arithmetic function which receives mixed types of arguments. A conversion from fixed to floating point numbers may result in a loss of precision without a warning message being generated. Since \index{integer ! magnitude} integers may have a greater magnitude that that permitted for floating numbers, an error may be signaled when the attempted conversion cannot be done. Because the magnitude of integers is unlimited the conversion of a floating point number to a fixed number is always possible, the only loss of precision being the digits to the right of the decimal point which are truncated. If a function receives mixed types of arguments the general rule will have the fixed numbers converted to floating before arithmetic operations are performed. In all cases an error occurs if the parameter to an arithmetic function is not a number: \errormessage{***** XXX parameter to FUNCTION is not a number} XXX is the value of the parameter at fault and FUNCTION is the name of the function that detected the error. Exceptions to the rule are noted where they occur. \de{ABS}{(\p{U}:\ty{number}):\ty{number}}{eval, spread} {Returns the absolute value of its argument. {\tt \begin{tabbing} EXPR PROCEDURE ABS(U); \\ \hspace*{1em} IF LESSP(U, 0) THEN MINUS(U) ELSE U; \end{tabbing}}} \de{ADD1}{(\p{U}:\ty{number}):\ty{number}}{eval, spread} {Returns the value of U plus 1 of the same type as U (fixed or floating). {\tt \begin{tabbing} EXPR PROCEDURE ADD1(U); \\ % God knows why, but hspace* isn't accepted here. \hspace{1em} PLUS2(U, 1); \end{tabbing}} } \de{DIFFERENCE}{(\p{U}:\ty{number}, \p{V}:\ty{number}):\ty{number}}{eval, spread} {The value U - V is returned.} \de{DIVIDE}{(\p{U}:\ty{number}, \p{V}:\ty{number}):\ty{dotted-pair}}{eval, spread} {The dotted-pair (quotient . remainder) is returned. The quotient part is computed the same as by QUOTIENT and the remainder the same as by REMAINDER. An error occurs if division by zero is attempted: \index{division by zero} \errormessage{***** Attempt to divide by 0 in DIVIDE} {\tt \begin{tabbing} EXPR PROCEDURE DIVIDE(U, V); \\ \hspace*{1em} (QUOTIENT(U, V) . REMAINDER(U, V)); \end{tabbing}}} \de{EXPT}{(\p{U}:\ty{number}, \p{V}:\ty{integer}):\ty{number}}{eval, spread} {Returns U raised to the V power. A floating point U to an integer power V does \underline{not} have V changed to a floating number before exponentiation.} \de{FIX}{(\p{U}:\ty{number}):\ty{integer}}{eval, spread} {Returns an integer which corresponds to the truncated value of U. The result of conversion must retain all significant portions of U. If U is an integer it is returned unchanged. } \de{FLOAT}{(\p{U}:\ty{number}):\ty{floating}}{eval, spread} {The floating point number corresponding to the value of the argument U is returned. Some of the least significant digits of an integer may be lost do to the implementation of floating point numbers. FLOAT of a floating point number returns the number unchanged. If U is too large to represent in floating point an error occurs: \errormessage{***** Argument to FLOAT is too large} } \de{GREATERP}{(\p{U}:\ty{number}, \p{V}:\ty{number}):\ty{boolean}}{eval, spread} {Returns T if U is strictly greater than V, otherwise returns NIL.} \de{LESSP}{(\p{U}:\ty{number}, \p{V}:\ty{number}):\ty{boolean}}{eval, spread} {Returns T if U is strictly less than V, otherwise returns NIL. } \de{MAX}{([\p{U}:\ty{number}]):\ty{number}}{noeval, nospread, or macro} {Returns the largest of the values in U. If two or more values are the same the first is returned. {\tt \begin{tabbing} MACRO PROCEDURE MAX(U); \\ \hspace*{1em} EXPAND(CDR U, 'MAX2); \end{tabbing}}} \de{MAX2}{(\p{U}:\ty{number}, \p{V}:\ty{number}):\ty{number}}{eval, spread} {Returns the larger of U and V. If U and V are the same value U is returned (U and V might be of different types). {\tt \begin{tabbing} EXPR PROCEDURE MAX2(U, V); \\ \hspace*{1em} IF LESSP(U, V) THEN V ELSE U; \end{tabbing}}} \de{MIN}{([\p{U}:\ty{number}]):\ty{number}}{noeval, nospread, or macro} {Returns the smallest of the values in U. If two or more values are the same the first of these is returned. {\tt \begin{tabbing} MACRO PROCEDURE MIN(U); \\ \hspace*{1em} EXPAND(CDR U, 'MIN2); \end{tabbing}}} \de{MIN2}{(\p{U}:\ty{number}, \p{V}:\ty{number}):\ty{number}}{eval, spread} {Returns the smaller of its arguments. If U and V are the same value, U is returned (U and V might be of different types). {\tt \begin{tabbing} EXPR PROCEDURE MIN2(U, V); \\ \hspace*{1em} IF GREATERP(U, V) THEN V ELSE U; \end{tabbing}}} \de{MINUS}{(\p{U}:\ty{number}):\ty{number}}{eval, spread} {Returns -U. {\tt \begin{tabbing} EXPR PROCEDURE MINUS(U); \\ \hspace*{1em} DIFFERENCE(0, U); \end{tabbing}}} \de{PLUS}{([\p{U}:\ty{number}]):\ty{number}}{noeval, nospread, or macro} {Forms the sum of all its arguments. {\tt \begin{tabbing} MACRO PROCEDURE PLUS(U); \\ \hspace*{1em} EXPAND(CDR U, 'PLUS2); \end{tabbing}}} \de{PLUS2}{(\p{U}:\ty{number}, \p{V}:\ty{number}):\ty{number}}{eval, spread} {Returns the sum of U and V.} \de{QUOTIENT}{(\p{U}:\ty{number}, \p{V}:\ty{number}):\ty{number}}{eval, spread} {The quotient of U divided by V is returned. Division of two positive or two negative integers is conventional. When both U and V are integers and exactly one of them is negative the value returned is the negative truncation of the absolute value of U divided by the absolute value of V. An error occurs if division by zero is attempted: \index{division by zero} \errormessage{***** Attempt to divide by 0 in QUOTIENT} } \de{REMAINDER}{(\p{U}:\ty{number}, \p{V}:\ty{number}):\ty{number}}{eval, spread} {If both U and V are integers the result is the integer remainder of U divided by V. If either parameter is floating point, the result is the difference between U and V*(U/V) all in floating point. If either number is negative the remainder is negative. If both are positive or both are negative the remainder is positive. An error occurs if V is zero: \index{division by zero} \errormessage{***** Attempt to divide by 0 in REMAINDER} {\tt \begin{tabbing} EXPR PROCEDURE REMAINDER(U, V); \\ \hspace*{1em} DIFFERENCE(U, TIMES2(QUOTIENT(U, V), V)); \end{tabbing}}} \de{SUB1}{(\p{U}:\ty{number}):\ty{number}}{eval, spread} {Returns the value of U less 1. If U is a FLOAT type number, the value returned is U less 1.0. {\tt \begin{tabbing} EXPR PROCEDURE SUB1(U); \\ \hspace*{1em} DIFFERENCE(U, 1); \end{tabbing}}} \de{TIMES}{([\p{U}:\ty{number}]):\ty{number}}{noeval, nospread, or macro} {Returns the product of all its arguments. {\tt \begin{tabbing} MACRO PROCEDURE TIMES(U); \\ \hspace*{1em} EXPAND(CDR U, 'TIMES2); \end{tabbing}}} \de{TIMES2}{(\p{U}:\ty{number}, \p{V}:\ty{number}):\ty{number}}{eval, spread} {Returns the product of U and V.} \subsection{MAP Composite Functions} \de{MAP}{(\p{X}:\ty{list}, F\p{N}:\ty{function}):\ty{any}}{eval, spread} {Applies FN to successive CDR segments of X. NIL is returned. {\tt \begin{tabbing} EXPR PROCEDURE MAP(X, FN); \\ \hspace*{1em} WHILE X DO $<<$ FN X; X := CDR X $>>$; \end{tabbing}}} \de{MAPC}{(X:list, FN:function):\ty{any}}{eval, spread} {FN is applied to successive CAR segments of list X. NIL is returned. {\tt \begin{tabbing} EXPR PROCEDURE MAPC(X, FN); \\ \hspace*{1em} WHILE X DO $<<$ FN CAR X; X := CDR X $>>$; \end{tabbing}}} \de{MAPCAN}{(X:list, FN:function):\ty{any}}{eval, spread} {A concatenated list of FN applied to successive CAR elements of X is returned. {\tt \begin{tabbing} EXPR PROCEDURE MAPCAN(X, FN); \\ \hspace*{1em} IF\= NULL X THEN NIL \\ \> ELSE NCONC(FN CAR X, MAPCAN(CDR X, FN)); \end{tabbing}}} \de{MAPCAR}{(X:list, FN:function):\ty{any}}{eval, spread} {Returned is a constructed list of FN applied to each CAR of list X. {\tt \begin{tabbing} EXPR PROCEDURE MAPCAR(X, FN); \\ \hspace*{1em} IF\= NULL X THEN NIL \\ \> ELSE FN CAR X . MAPCAR(CDR X, FN); \end{tabbing}}} \de{MAPCON}{(X:list, FN:function):\ty{any}}{eval, spread} {Returned is a concatenated list of FN applied to successive CDR segments of X. {\tt \begin{tabbing} EXPR PROCEDURE MAPCON(X, FN); \\ \hspace*{1em} IF\= NULL X THEN NIL \\ \> ELSE NCONC(FN X, MAPCON(CDR X, FN)); \end{tabbing}}} \de{MAPLIST}{(X:list, FN:function):\ty{any}}{eval, spread} {Returns a constructed list of FN applied to successive CDR segments of X. {\tt \begin{tabbing} EXPR PROCEDURE MAPLIST(X, FN); \\ \hspace*{1em} IF\= NULL X THEN NIL \\ \> ELSE FN X . MAPLIST(CDR X, FN); \end{tabbing}}} \subsection{Composite Functions} \de{APPEND}{(\p{U}:\ty{list}, \p{V}:\ty{list}):\ty{list}}{eval, spread} {Returns a constructed list in which the last element of U is followed by the first element of V. The list U is copied, V is not. {\tt \begin{tabbing} EXPR PROCEDURE APPEND(U, V); \\ \hspace*{1em} IF\= NULL U THEN V \\ \> ELSE CAR U . APPEND(CDR U, V); \end{tabbing}}} \de{ASSOC}{(\p{U}:\ty{any}, \p{V}:\ty{alist}):\{\ty{dotted-pair}, NIL\}}{eval, spread} {If U occurs as the CAR portion of an element of the alist V, the dotted-pair in which U occurred is returned, else NIL is returned. ASSOC might not detect a poorly formed alist so an invalid \index{EQUAL ! in ASSOC} \index{alist ! in ASSOC} construction may be detected by CAR or CDR. {\tt \begin{tabbing} EXPR PROCEDURE ASSOC(U, V); \\ \hspace*{1em} IF \= NULL V THEN NIL \\ \> ELSE \= IF ATOM CAR V THEN \\ \> \> ERROR(000, LIST(V, "is a poorly formed alist")) \\ \> ELSE IF U = CAAR V THEN CAR V \\ \> ELSE ASSOC(U, CDR V); \end{tabbing}} } \de{DEFLIST}{(\p{U}:\ty{dlist}, \p{IND}:\ty{id}):\ty{list}}{eval, spread} {A "dlist" is a list in which each element is a two element list: \index{dlist} (ID:id PROP:any). Each ID in U has the indicator IND with property PROP placed on its property list by the PUT function. The value of DEFLIST is a list of the first elements of each two element list. Like PUT, DEFLIST may not be used to define functions. {\tt \begin{tabbing} EXPR PROCEDURE DEFLIST(U, IND); \\ \hspace*{1em} IF NULL U THEN NIL \\ \hspace*{2em} ELSE $<<$ \= PUT(CAAR U, IND, CADAR U); \\ \> CAAR U $>>$ . DEFLIST(CDR U, IND); \end{tabbing}} } \de{DELETE}{(\p{U}:\ty{any}, \p{V}:\ty{list}):\ty{list}}{eval, spread} {Returns V with the first top level occurrence of U removed from it. \index{EQUAL ! in DELETE} {\tt \begin{tabbing} EXPR PROCEDURE DELETE(U, V); \\ \hspace*{1em} IF NULL V THEN NIL \\ \hspace*{2em} ELSE IF CAR V = U THEN CDR V \\ \hspace*{2em} ELSE CAR V . DELETE(U, CDR V); \end{tabbing}}} \de{DIGIT}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is a digit, otherwise NIL. {\tt \begin{tabbing} EXPR PROCEDURE DIGIT(U); \\ \hspace*{1em} IF MEMQ(U, '(!0 !1 !2 !3 !4 !5 !6 !7 !8 !9)) \\ \hspace*{2em} THEN T ELSE NIL; \end{tabbing}}} \de{LENGTH}{(\p{X}:\ty{any}):\ty{integer}}{eval, spread} {The top level length of the list X is returned. {\tt \begin{tabbing} EXPR PROCEDURE LENGTH(X); \\ \hspace*{1em} IF ATOM X THEN 0 \\ \hspace*{2em} ELSE PLUS(1, LENGTH CDR X); \end{tabbing}}} \de{LITER}{(\p{U}:\ty{any}):\ty{boolean}}{eval, spread} {Returns T if U is a character of the alphabet, NIL otherwise.\footnote{The published report omits escape characters. These are required for both upper and lower case as some systems default to lower.} {\tt \begin{tabbing} EXPR PROCEDURE LITER(U); \\ \hspace*{1em} IF \= MEMQ(U, '(\=!A !B !C !D !E !F !G !H !I !J !K !L !M \\ \> \> !N !O !P !Q !R !S !T !U !V !W !X !Y !Z \\ \> \> !a !b !c !d !e !f !g !h !i !j !k !l !m \\ \> \> !n !o !p !q !r !s !t !u !v !w !x !y !z)) \\ \> THEN T ELSE NIL; \end{tabbing}}} \de{MEMBER}{(\p{A}:\ty{any}, \p{B}:\ty{list}):\ty{extra-boolean}}{eval, spread} {Returns NIL if A is not a member of list B, returns the remainder of B whose first element is A. \index{EQUAL ! in MEMBER} {\tt \begin{tabbing} EXPR PROCEDURE MEMBER(A, B); \\ \hspace*{1em} IF NULL B THEN NIL \\ \hspace*{2em} ELSE IF A = CAR B THEN B \\ \hspace*{2em} ELSE MEMBER(A, CDR B); \end{tabbing}}} \de{MEMQ}{(\p{A}:\ty{any}, \p{B}:\ty{list}):\ty{extra-boolean}}{eval, spread} {Same as MEMBER but an EQ check is used for comparison. \index{EQ ! in MEMQ} {\tt \begin{tabbing} EXPR PROCEDURE MEMQ(A, B); \\ \hspace*{1em} IF \= NULL B THEN NIL \\ \> ELSE IF A EQ CAR B THEN B \\ \> ELSE MEMQ(A, CDR B); \end{tabbing}}} \de{NCONC}{(\p{U}:\ty{list}, \p{V}:\ty{list}):\ty{list}}{eval, spread} {Concatenates V to U without copying U. The last CDR of U is modified to point to V. {\tt \begin{tabbing} EXPR PROCEDURE NCONC(U, V); \\ BEGIN SCALAR W; \\ \hspace*{2em} \= IF NULL U THEN RETURN V; \\ \> W := U; \\ \> WHILE CDR W DO W := CDR W; \\ \> RPLACD(W, V); \\ \> RETURN U \\ END; \end{tabbing}}} \de{PAIR}{(\p{U}:\ty{list}, \p{V}:\ty{list}):\ty{alist}}{eval, spread} {U and V are lists which must have an identical number of elements. If not, an error occurs (the 000 used in the ERROR call is arbitrary and need not be adhered to). Returned is a list where each element is a dotted-pair, the CAR of the pair being from U, and the CDR the corresponding element from V. {\tt \begin{tabbing} EXPR PROCEDURE PAIR(U, V); \\ \hspace*{1em} IF AND(U, V) THEN (CAR U . CAR V) . PAIR(CDR U, CDR V) \\ \hspace*{2em} \= ELSE IF OR(U, V) THEN ERROR(000, \\ \hspace*{4em} "Different length lists in PAIR") \\ \> ELSE NIL; \end{tabbing}}} \de{REVERSE}{(\p{U}:\ty{list}):\ty{list}}{eval, spread} {Returns a copy of the top level of U in reverse order. {\tt \begin{tabbing} EXPR PROCEDURE REVERSE(U); \\ BEGIN SCALAR W; \\ \hspace*{2em} \= WHILE U DO $<<$ \= W := CAR U . W; \\ \> \> U := CDR U $>>$; \\ \> RETURN W \\ END; \end{tabbing}}} \de{SASSOC}{(\p{U}:\ty{any}, \p{V}:\ty{alist}, \p{FN}:\ty{function}):\ty{any}}{eval, spread} {Searches the alist V for an occurrence of U. If U is not in the alist the evaluation of function FN is returned. \index{EQUAL ! in SASSOC} \index{alist ! in SASSOC} {\tt \begin{tabbing} EXPR PROCEDURE SASSOC(U, V, FN); \\ \hspace*{1em} IF NULL V THEN FN() \\ \hspace*{2em} \= ELSE IF U = CAAR V THEN CAR V \\ \> ELSE SASSOC(U, CDR V, FN); \end{tabbing}}} \de{SUBLIS}{(\p{X}:\ty{alist}, \p{Y}:\ty{any}):\ty{any}}{eval, spread} {The value returned is the result of substituting the CDR of each element of the alist X for every occurrence of the CAR part of that element in Y. \index{alist ! in SUBLIS} {\tt \begin{tabbing} EXPR PROCEDURE SUBLIS(X, Y); \\ \hspace*{1em}IF NULL X THEN Y \\ \hspace*{2em} ELSE BEGIN \= SCALAR U; \\ \> U := ASSOC(Y, X); \\ \> RETURN \= IF U THEN CDR U \\ \> \> ELSE IF ATOM Y THEN Y \\ \> \> ELSE \= SUBLIS(X, CAR Y) . \\ \> \> \> SUBLIS(X, CDR Y) \\ \> END; \end{tabbing}}} \de{SUBST}{(\p{U}:\ty{any}, \p{V}:\ty{any}, \p{W}:\ty{any}):\ty{any}}{eval, spread} {The value returned is the result of substituting U for all occurrences of V in W. \index{EQUAL ! in SUBST} {\tt \begin{tabbing} EXPR PROCEDURE SUBST(U, V, W); \\ \hspace*{1em} IF NULL W THEN NIL \\ \hspace*{2em} \= ELSE IF V = W THEN U \\ \> ELSE IF ATOM W THEN W \\ \> ELSE SUBST(U, V, CAR W) . SUBST(U, V, CDR W); \end{tabbing}}} \subsection{The Interpreter} \label{interpreter} \de{APPLY}{(\p{FN}:\{\ty{id,function}\}, \p{ARGS}:\ty{any-list}):\ty{any}}{eval, spread} {APPLY returns the value of FN with actual parameters ARGS. The actual parameters in ARGS are already in the form required for binding to the formal parameters of FN. Implementation specific portions described in English are enclosed in boxes. {\tt \begin{tabbing} EXPR PROCEDURE APPLY(FN, ARGS); \\ BEGIN SCALAR DEFN; \\ \hspace*{2em}\= IF CODEP FN THEN RETURN \\ \> \hspace{1em} \framebox[3.25in]{\parbox{3.25in}{Spread the actual parameters in ARGS following the conventions: for calling functions, transfer to the entry point of the function, and return the value returned by the function.}}; \\ \> IF \= IDP FN THEN RETURN \\ \> \> IF \= NULL(DEFN := GETD FN) THEN \\ \> \> \> ERROR(000, LIST(FN, "is an undefined function")) \\ \> \> ELSE IF CAR DEFN EQ 'EXPR THEN \\ \> \> \> APPLY(CDR DEFN, ARGS) \\ \> \> ELSE ERROR(000, \\ \> \> \> LIST(FN, "cannot be evaluated by APPLY")); \\ \> IF OR(ATOM FN, NOT(CAR FN EQ 'LAMBDA)) THEN \\ \> \> ERROR(000, \\ \> \> LIST(FN, "cannot be evaluated by APPLY")); \\ \> RETURN \\ \> \> \framebox[3.25in]{\parbox{3.25in}{Bind the actual parameters in ARGS to the formal parameters of the lambda expression. If the two lists are not of equal length then ERROR(000, "Number of parameters do not match"); The value returned is EVAL CADDR FN.}} \\ END; \end{tabbing}}} \de{EVAL}{(\p{U}:\ty{any}):\ty{any}}{eval, spread} {The value of the expression U is computed. Error numbers are arbitrary. Portions of EVAL involving machine specific coding are expressed in English enclosed in boxes. {\tt \begin{tabbing} EXPR PROCEDURE EVAL(U); \\ BEGIN SCALAR FN; \\ \hspace*{2em} \= IF CONSTANTP U THEN RETURN U; \\ \> IF IDP U THEN RETURN \\ \> \hspace{1em} \framebox[3.25in]{\parbox{3.25in}{U is an id. Return the value most currently bound to U or if there is no such binding: ERROR(000, LIST("Unbound:", U));}} \\ \> IF \= PAIRP CAR U THEN RETURN \\ \> \> IF CAAR U EQ 'LAMBDA THEN APPLY(CAR U, EVLIS CDR U) \\ \> \> ELSE ERROR(\= 000, LIST(CAR U, \\ \> \> \> "improperly formed LAMBDA expression")) \\ \> \> ELSE IF CODEP CAR U THEN \\ \> \> \> RETURN APPLY(CAR U, EVLIS CDR U); \\ \> FN := GETD CAR U; \\ \> IF NULL FN THEN \\ \> \> ERROR(000, LIST(CAR U, "is an undefined function")) \\ \> ELSE IF CAR FN EQ 'EXPR THEN \\ \> \> RETURN APPLY(CDR FN, EVLIS CDR U) \\ \> ELSE IF CAR FN EQ 'FEXPR THEN \\ \> \> RETURN APPLY(CDR FN, LIST CDR U) \\ \> ELSE IF CAR FN EQ 'MACRO THEN \\ \> \> RETURN EVAL APPLY(CDR FN, LIST U) \\ END; \end{tabbing}}} \de{EVLIS}{(\p{U}:\ty{any-list}):\ty{any-list}}{eval, spread} {EVLIS returns a list of the evaluation of each element of U. {\tt \begin{tabbing} EXPR PROCEDURE EVLIS(U); \\ \hspace*{1em} IF NULL U THEN NIL \\ \hspace*{2em} ELSE EVAL CAR U . EVLIS CDR U; \end{tabbing}}} \de{EXPAND}{(\p{L}:\ty{list}, \p{FN}:\ty{function}):\ty{list}}{eval, spread} {FN is a defined function of two arguments to be used in the expansion of a MACRO. EXPAND returns a list in the form: \vspace{.15in} (FN L$_0$ (FN L$_1$ \ldots (FN L$_{n-1}$ L$_n$) \ldots )) \vspace{.15in} where $n$ is the number of elements in L, L$_i$ is the $i$th element of L. {\tt \begin{tabbing} EXPR PROCEDURE EXPAND(L,FN); \\ \hspace*{1em} IF NULL CDR L THEN CAR L \\ \hspace*{2em} ELSE LIST(FN, CAR L, EXPAND(CDR L, FN)); \end{tabbing}}} \de{FUNCTION}{(\p{FN}:\ty{function}):\ty{function}}{noeval, nospread} {The function FN is to be passed to another function. If FN is to have side effects its free variables must be fluid or global. FUNCTION is like QUOTE but its argument may be affected by compilation. We do not \index{FUNARGs not supported} consider FUNARGs in this report.} \de{QUOTE}{(U:any):\ty{any}}{noeval, nospread} {Stops evaluation and returns U unevaluated. {\tt \begin{tabbing} FEXPR PROCEDURE QUOTE(U); \\ \hspace*{2em}CAR U; \end{tabbing}}} \subsection{Input and Output} \label{IO} The user normally communicates with Standard LISP through \index{standard devices} ``standard devices''. The default devices are selected in accordance with the conventions of the implementation site. Other input and output devices or files may be selected for reading and writing using the functions described herein. \de{CLOSE}{(\p{FILEHANDLE}:\ty{any}):\ty{any}}{eval, spread} {Closes the file with the internal name FILEHANDLE writing any necessary end of file marks and such. The value of FILEHANDLE is that returned by the corresponding OPEN. \index{OPEN} The value returned is the value of FILEHANDLE. An error occurs if the file can not be \index{file handle} \index{files} closed. \errormessage{ ***** FILEHANDLE could not be closed} } \de{EJECT}{():NIL}{eval, spread} {Skip to the top of the next output page. Automatic EJECTs are executed by the print functions when the length set by the PAGELENGTH \index{PAGELENGTH} function is exceeded.} \de{LINELENGTH}{(\p{LEN}:\{\ty{integer}, NIL\}):\ty{integer}}{eval, spread} {If LEN is an integer the maximum line length to be printed before the print functions initiate an automatic TERPRI is set to the value LEN. \index{TERPRI} No initial Standard LISP line length is assumed. The previous line length is returned except when LEN is NIL. This special case returns the current line length and does not cause it to be reset. An error occurs if the requested line length is too large for the currently selected output file or LEN is negative or zero. \errormessage{ ***** LEN is an invalid line length} } \de{LPOSN}{():\ty{integer}}{eval, spread} {Returns the number of lines printed on the current page. At the top of a page, 0 is returned. } \de{OPEN}{(\p{FILE}:\ty{any}, \p{HOW}:\ty{id}):\ty{any}}{eval, spread} {Open the file with the system dependent name FILE for output if HOW is EQ to OUTPUT, or input if HOW is EQ to INPUT. If the file is \index{file handle} \index{files} \index{OUTPUT} \index{INPUT} opened successfully, a value which is internally associated with the file is returned. This value must be saved for use by RDS and WRS. An error occurs if HOW is something other than INPUT or OUTPUT or the file can't be opened. \errormessage{***** HOW is not option for OPEN} \errormessage{***** FILE could not be opened} } \de{PAGELENGTH}{(\p{LEN}:\{\ty{integer}, NIL\}):\ty{integer}}{eval, spread} {Sets the vertical length (in lines) of an output page. Automatic page EJECTs are executed by the print functions when this length is \index{EJECT} reached. The initial vertical length is implementation specific. The previous page length is returned. If LEN is 0, no automatic page ejects will occur. } \de{POSN}{():\ty{integer}}{eval, spread} {Returns the number of characters in the output buffer. When the buffer is empty, 0 is returned.} \de{PRINC}{(\p{U}:\ty{id}):\ty{id}}{eval, spread} {U must be a single character id such as produced by EXPLODE or read by READCH or the value of !\$EOL!\$. The effect is the character U \index{\$EOL\$ (global)} displayed upon the currently selected output device. The value of !\$EOL!\$ causes termination of the current line like a call to TERPRI.} \de{PRINT}{(\p{U}:\ty{any}):\ty{any}}{eval, spread} {Displays U in READ readable format and terminates the print line. The value of U is returned. {\tt \begin{tabbing} EXPR PROCEDURE PRINT(U); \\ \hspace*{2em} $<<$ PRIN1 U; TERPRI(); U $>>$; \end{tabbing}}} \de{PRIN1}{(\p{U}:\ty{any}):\ty{any}}{eval, spread} {U is displayed in a READ readable form. The format of display is the result of EXPLODE expansion; special characters are prefixed with the escape character !, and strings are enclosed in "\ldots ". Lists are displayed in list-notation and vectors in vector-notation. } \de{PRIN2}{(\p{U}:\ty{any}):\ty{any}}{eval, spread} {U is displayed upon the currently selected print device but output is not READ readable. The value of U is returned. Items are displayed as described in the EXPLODE function with the exceptions that the escape character does not prefix special characters and strings are not enclosed in "\ldots ". Lists are displayed in list-notation and vectors in vector-notation. The value of U is returned. } \de{RDS}{(\p{FILEHANDLE}:\ty{any}):\ty{any}}{eval, spread} {Input from the currently selected input file is suspended and further input comes from the file named. FILEHANDLE is a system dependent \index{file handle} internal name which is a value returned by OPEN. If FILEHANDLE is NIL the standard input device is selected. When end of file is reached on a non-standard input device, the standard input device is reselected. When end of file occurs on the standard input device the Standard LISP reader terminates. RDS returns the internal name of the previously selected input file. \index{standard input} \errormessage{***** FILEHANDLE could not be selected for input} } \de{READ}{():\ty{any}}{} {The next expression from the file currently selected for input. Valid input forms are: vector-notation, dot-notation, list-notation, numbers, function-pointers, strings, and identifiers with escape characters. Identifiers are interned onW the OBLIST (see \index{INTERN} \index{OBLIST entry} the INTERN function in "Identifiers", section~\ref{identifiers} on page~\pageref{identifiers}). READ returns the \index{\$EOF\$ (global)} value of !\$EOF!\$ when the end of the currently selected input file is reached. } \de{READCH}{():\ty{id}}{} {Returns the next interned character from the file currently selected for input. Two special cases occur. If all the characters in an input \index{\$EOL\$ (global)} \index{\$EOF\$ (global)} record have been read, the value of !\$EOL!\$ is returned. If the file selected for input has all been read the value of !\$EOF!\$ is returned. Comments delimited by \% and end-of-line are not transparent to READCH. \index{\% ! read by READCH} } \de{TERPRI}{():\p{NIL}}{} {The current print line is terminated.} \de{WRS}{(\p{FILEHANDLE}:\ty{any}):\ty{any}}{eval, spread} {Output to the currently active output file is suspended and further output is directed to the file named. FILEHANDLE is an internal name which is returned by OPEN. The file named must have been opened for output. If FILEHANDLE is NIL the standard output device is selected. \index{file handle} \index{standard output} WRS returns the internal name of the previously selected output file. \errormessage{***** FILEHANDLE could not be selected for output} } \subsection{LISP Reader} An EVAL read loop has been chosen to drive a Standard LISP system to provide a continuity in functional syntax. Choices of messages and the amount of extra information displayed are decisions left to the implementor. \index{STANDARD-LISP} {\tt \begin{tabbing} EXPR PROCEDURE STANDARD!-LISP(); \\ BEGIN SCALAR VALUE; \\ \hspace*{2em} \= RDS NIL; WRS NIL; \\ \> PRIN2 "Standard LISP"; TERPRI(); \\ \> WHILE T DO \\ \> \hspace*{1em} $<<$ \= PRIN2 "EVAL:"; TERPRI(); \\ \> \> VALUE := ERRORSET(QUOTE EVAL READ(), T, T); \\ \> \> IF NOT ATOM VALUE THEN PRINT CAR VALUE; \\ \> \> TERPRI() $>>$; \\ END; \end{tabbing}} \de{QUIT}{()}{} {Causes termination of the LISP reader and control to be transferred to the operating system.} \section{System GLOBAL Variables} \label{slglobals} These variables provide global control of the LISP system, or implement values which are constant throughout execution.\footnote{The published document does not specify that all these are GLOBAL.} \variable{*COMP}{NIL}{global} {The value of !*COMP controls whether or not PUTD compiles the function defined in its arguments before defining it. If !*COMP is NIL the function is defined as an xEXPR. If !*COMP is something else the function is first compiled. Compilation will produce certain changes in the semantics of functions particularly FLUID type access.} \variable{EMSG*}{NIL}{global} {Will contain the MESSAGE generated by the last ERROR call (see \index{ERROR} ``Error Handling'' section~\ref{errors} on page~\pageref{errors}).} \variable{\$EOF\$}{\s{an uninterned identifier}}{global} {The value of !\$EOF!\$ is returned by all input functions when the end \index{end of file} of the currently selected input file is reached.} \variable{\$EOL\$}{\s{an uninterned identifier}}{global} {The value of !\$EOL!\$ is returned by READCH when it reaches the end of \index{READCH} \index{end of line} \index{PRINC} a logical input record. Likewise PRINC will terminate its current line (like a call to TERPRI) when !\$EOL!\$ is its argument.} \variable{*GC}{NIL}{global} {!*GC controls the printing of garbage collector messages. If NIL no \index{garbage collector} indication of garbage collection may occur. If non-NIL various system dependent messages may be displayed.} \variable{NIL}{NIL}{global} {NIL is a special global variable. It is protected from being modified by SET or SETQ. \index{NIL ! cannot be changed}} \variable{*RAISE}{NIL}{global} {If !*RAISE is non-NIL all characters input through Standard LISP input/output functions will be raised to upper case. If !*RAISE is NIL characters will be input as is.} \variable{T}{T}{global} {T is a special global variable. It is protected from being modified by SET or SETQ. \index{T ! cannot be changed}} \section{The Extended Syntax} Whenever it is possible to define Standard LISP functions in LISP the text of the function will appear in an extended syntax. These definitions are supplied as an aid to understanding the behavior of functions and not as a strict implementation guide. A formal scheme for the translation of extended syntax to Standard LISP is presented to eliminate misinterpretation of the definitions. \subsection{Definition} The goal of the transformation scheme is to produce a PUTD invocation which has the function translated from the extended syntax as its actual parameter. A rule has a name in brackets \s{\ldots} by which it is known and is defined by what follows the meta symbol ::=. Each rule of the set consists of one or more ``alternatives'' separated by the $\mid$ meta symbol, being the different ways in which the rule will be matched by source text. Each alternative is composed of a ``recognizer'' and a ``generator'' separated by the $\Longrightarrow$ meta symbol. The recognizer is a concatenation of any of three different forms. 1) Terminals - Upper case lexemes and punctuation which is not part of the meta syntax represent items which must appear as is in the source text for the rule to succeed. 2) Rules - Lower case lexemes enclosed in \s{\ldots} are names of other rules. The source text is matched if the named rule succeeds. 3) Primitives - Lower case singletons not in brackets are names of primitives or primitive classes of Standard LISP. The syntax and semantics of the primitives are given in Part I. The recognizer portion of the following rule matches an extended syntax procedure: \s{function} ::= ftype PROCEDURE id (\s{id list}); \\ \hspace*{2em} \s{statement}; $\Longrightarrow$ A function is recognized as an ``ftype'' (one of the tokens EXPR, FEXPR, etc.) followed by the keyword PROCEDURE, followed by an ``id'' (the name of the function), followed by an \s{id list} (the formal parameter names) enclosed in parentheses. A semicolon terminates the title line. The body of the function is a \s{statement} followed by a semicolon. For example: {\small\begin{verbatim} EXPR PROCEDURE NULL(X); EQ(X, NIL); \end{verbatim}} \noindent satisfies the recognizer, causes the generator to be activated and the rule to be matched successfully. The generator is a template into which generated items are substituted. The three syntactic entities have corresponding meanings when they appear in the generator portion. 1) Terminals - These lexemes are copied as is to the generated text. 2) Rules - If a rule has succeeded in the recognizer section then the value of the rule is the result of the generator portion of that rule. 3) Primitives - When primitives are matched the primitive lexeme replaces its occurrence in the generator. If more than one occurrence of an item would cause ambiguity in the generator portion this entity appears with a bracketed subscript. Thus: \begin{tabbing} \s{conditional} ::= \\ \hspace*{2em} IF \s{expression} \= THEN \s{statement$_1$} \\ \> ELSE \s{statement$_2$} \ldots \end{tabbing} \noindent has occurrences of two different \s{statement}s. The generator portion uses the subscripted entities to reference the proper generated value. The \s{function} rule appears in its entirety as: \begin{tabbing} \s{function} ::= ftype PROCEDURE id (\s{id list});\s{statement}; $\Longrightarrow$ \\ \hspace*{2em} \=(PUTD \= (QUOTE id) \\ \> \> (QUOTE ftype) \\ \> \>(QUOTE (LAMBDA (\s{id list}) \s{statement}))) \end{tabbing} If the recognizer succeeds (as it would in the case of the NULL procedure example) the generator returns: {\small\begin{verbatim} (PUTD (QUOTE NULL) (QUOTE EXPR) (QUOTE (LAMBDA (X) (EQ X NIL)))) \end{verbatim}} The identifier in the template is replaced by the procedure name NULL, \s{id list} by the single formal parameter X, the \s{statement} by (EQ X NIL) which is the result of the \s{statement} generator. EXPR replaces ftype, the type of the defined procedure. \subsection{The Extended Syntax Rules} \begin{tabbing} \s{function} ::= ftype \k{PROCEDURE} id (\s{id list}); \s{statement}; $\Longrightarrow$ \\ \hspace*{2em} \= (PUTD \= (QUOTE id) \\ \> \> (QUOTE ftype) \\ \> \> (QUOTE (LAMBDA (\s{id list}) \s{statement}))) \\ \\ \s{id list} ::= id $\Longrightarrow$ id $\mid$ \\ \> id, \s{id list} $\Longrightarrow$ id \s{id list} $\mid$ \\ \> $\Longrightarrow$ NIL \\ \s{statement} ::= \s{expression} $\Longrightarrow$ \s{expression} $\mid$ \\ \> \s{proper statement} $\Longrightarrow$ \s{proper statement} \\ \\ \s{proper statement} ::= \\ \> \s{assignment statement} $\Longrightarrow$ \s{assignment statement} $\mid$ \\ \> \s{conditional statement} $\Longrightarrow$ \s{conditional statement} $\mid$ \\ \> \s{while statement} $\Longrightarrow$ \s{while statement} $\mid$ \\ \> \s{compound statement} $\Longrightarrow$ \s{compound statement} \\ \\ \s{assignment statement} ::= id := \s{expression} $\Longrightarrow$ \\ \> \> (SETQ id \s{expression}) \\ \\ \s{conditional statement} ::= \\ \> \k{IF} \s{expression} \k{THEN} \s{statement$_1$} \k{ELSE} \s{statement$_2$} $\Longrightarrow$ \\ \> \hspace{2em} \= (COND (\s{expression} \s{statement$_1$})(T \s{statement$_2$})) $\mid$ \\ \> \k{IF} \s{expression} \k{THEN} \s{statement} $\Longrightarrow$ \\ \> \> (COND (\s{expression} \s{statement})) \\ \\ \s{while statement} ::= \k{WHILE} \s{expression} \k{DO} \s{statement} $\Longrightarrow$ \\ \> \> (PROG NIL \\ \> \> LBL \= (COND ((NULL \s{expression}) (RETURN NIL))) \\ \> \> \> \s{statement} \\ \> \> \> (GO LBL)) \\ \\ \s{compound statement} ::= \\ \> \k{BEGIN} \k{SCALAR} \s{id list}; \s{program list} \k{END} $\Longrightarrow$ \\ \> \> (PROG (\s{id list}) \s{program list}) $\mid$ \\ \> \k{BEGIN} \s{program list} \k{END} $\Longrightarrow$ \\ \> \> (PROG NIL \s{program list}) $\mid$ \\ \> \k{$<<$} \s{statement list} \k{$>>$} $\Longrightarrow$ (PROGN \s{statement list}) \\ \\ \s{program list} ::= \s{full statement} $\Longrightarrow$ \s{full statement} $\mid$ \\ \> \s{full statement} \s{program list} $\Longrightarrow$ \\ \> \> \s{full statement} \s{program list} \\ \\ \s{full statement} ::= \s{statement} $\Longrightarrow$ \s{statement} $\mid$ id: $\Longrightarrow$ id \\ \\ \s{statement list} ::= \s{statement} $\Longrightarrow$ \s{statement} $\mid$ \\ \> \s{statement}; \s{statement list} $\Longrightarrow$ \\ \> \> \s{statement} \s{statement list} \\ \\ \s{expression} ::= \\ \> \s{expression$_1$} \k{.} \s{expression$_2$} $\Longrightarrow$ \\ \> \> (CONS \s{expression$_1$} \s{expression$_2$} $\mid$ \\ \> \s{expression$_1$} \k{=} \s{expression$_2$} $\Longrightarrow$ \\ \> \> (EQUAL \s{expression$_1$} \s{expression$_2$}) $\mid$ \\ \> \s{expression$_1$} \k{EQ} \s{expression$_2$} $\Longrightarrow$ \\ \> \> (EQ \s{expression$_1$} \s{expression$_2$}) $\mid$ \\ \> '\s{expression} $\Longrightarrow$ (QUOTE \s{expression}) $\mid$ \\ \> function \s{expression} $\Longrightarrow$ (function \s{expression}) $\mid$ \\ \> function(\s{argument list}) $\Longrightarrow$ (function \s{argument list}) $\mid$ \\ \> number $\Longrightarrow$ number $\mid$ \\ \> id $\Longrightarrow$ id \\ \\ \s{argument list} ::= () $\Longrightarrow$ $\mid$ \\ \> \s{expression} $\Longrightarrow$ \s{expression} $\mid$ \\ \> \s{expression}, \s{argument list} $\Longrightarrow$ \s{expression} \s{argument list} \end{tabbing} Notice the three infix operators . EQ and = which are translated into calls on CONS, EQ, and EQUAL respectively. Note also that a call on a function which has no formal parameters must have () as an argument list. The QUOTE function is abbreviated by '. %\bibliography{sl} %\bibliographystyle{plain} %\end{document} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% EndCodemist \part{Appendix} \appendix \chapter{Reserved Identifiers} We list here all identifiers that are normally reserved in \REDUCE{} including names of commands, operators and switches initially in the system. Excluded are words that are reserved in specific implementations of the system. \vspace{13pt} \begin{list}{}{\renewcommand{\makelabel}[1]{#1\hspace{\fill}}% \settowidth{\labelwidth}{Numerical Operators}% \setlength{\labelsep}{1em}% \settowidth{\leftmargin}{Numerical Operators\hspace*{\labelsep}}% \sloppy} \item[Commands] {\tt ALGEBRAIC} {\tt ANTISYMMETRIC} {\tt ARRAY} {\tt BYE} {\tt CLEAR} \linebreak {\tt CLEARRULES} {\tt COMMENT} {\tt CONT} {\tt DECOMPOSE} {\tt DEFINE} {\tt DEPEND} {\tt DISPLAY} {\tt ED} {\tt EDITDEF} {\tt END} {\tt EVEN} {\tt FACTOR} {\tt FOR} {\tt FORALL} {\tt FOREACH} {\tt GO} {\tt GOTO} {\tt IF} {\tt IN} {\tt INDEX} {\tt INFIX} {\tt INPUT} {\tt INTEGER} {\tt KORDER} {\tt LET} {\tt LINEAR} {\tt LISP} {\tt LISTARGP} {\tt LOAD} {\tt LOAD\_PACKAGE} {\tt MASS} {\tt MATCH} {\tt MATRIX} {\tt MSHELL} {\tt NODEPEND} {\tt NONCOM} {\tt NONZERO} {\tt NOSPUR} {\tt ODD} {\tt OFF} {\tt ON} {\tt OPERATOR} {\tt ORDER} {\tt OUT} {\tt PAUSE} {\tt PRECEDENCE} {\tt PRINT\_PRECISION} {\tt PROCEDURE} {\tt QUIT} {\tt REAL} {\tt REMFAC} {\tt REMIND} {\tt RETRY} {\tt RETURN} {\tt SAVEAS} {\tt SCALAR} {\tt SETMOD} {\tt SHARE} {\tt SHOWTIME} {\tt SHUT} {\tt SPUR} {\tt SYMBOLIC} {\tt SYMMETRIC} {\tt VECDIM} {\tt VECTOR} {\tt WEIGHT} {\tt WRITE} {\tt WTLEVEL} \item[Boolean Operators] {\tt EVENP} {\tt FIXP} {\tt FREEOF} {\tt NUMBERP} {\tt ORDP} {\tt PRIMEP} \item[Infix Operators] \verb|:=| \verb|=| \verb|>=| \verb|>| \verb|<=| \verb|<| \verb|=>| \verb|+| \verb|*| \verb|/| \verb|^| \verb|**| \verb|.| {\tt WHERE} {\tt SETQ} {\tt OR} {\tt AND} {\tt MEMBER} {\tt MEMQ} {\tt EQUAL} {\tt NEQ} {\tt EQ} {\tt GEQ} {\tt GREATERP} {\tt LEQ} {\tt LESSP} {\tt PLUS} {\tt DIFFERENCE} {\tt MINUS} {\tt TIMES} {\tt QUOTIENT} {\tt EXPT} {\tt CONS} \item[Numerical Operators] {\tt ABS} {\tt ACOS} {\tt ACOSH} {\tt ACOT} {\tt ACOTH} {\tt ACSC} {\tt ACSCH} {\tt ASEC} {\tt ASECH} {\tt ASIN} {\tt ASINH} {\tt ATAN} {\tt ATANH} {\tt ATAN2} {\tt COS} {\tt COSH} {\tt COT} {\tt COTH} {\tt CSC} {\tt CSCH} {\tt EXP} {\tt FACTORIAL} {\tt FIX} {\tt FLOOR} {\tt HYPOT} {\tt LN} {\tt LOG} {\tt LOGB} {\tt LOG10} {\tt NEXTPRIME} {\tt ROUND} {\tt SEC} {\tt SECH} {\tt SIN} {\tt SINH} {\tt SQRT} {\tt TAN} {\tt TANH} \item[Prefix Operators] {\tt APPEND} {\tt ARGLENGTH} {\tt CEILING} {\tt COEFF} {\tt COEFFN} {\tt COFACTOR} {\tt CONJ} {\tt DEG} {\tt DEN} {\tt DET} {\tt DF} {\tt DILOG} {\tt EI} {\tt EPS} {\tt ERF} {\tt FACTORIZE} {\tt FIRST} {\tt GCD} {\tt G} {\tt IMPART} {\tt INT} {\tt INTERPOL} {\tt LCM} {\tt LCOF} {\tt LENGTH} {\tt LHS} {\tt LINELENGTH} {\tt LTERM} {\tt MAINVAR} {\tt MAT} {\tt MATEIGEN} {\tt MAX} {\tt MIN} {\tt MKID} {\tt NULLSPACE} {\tt NUM} {\tt PART} {\tt PF} {\tt PRECISION} {\tt RANDOM} {\tt RANDOM\_NEW\_SEED} {\tt RANK} {\tt REDERR} {\tt REDUCT} {\tt REMAINDER} {\tt REPART} {\tt REST} {\tt RESULTANT} {\tt REVERSE} {\tt RHS} {\tt SECOND} {\tt SET} {\tt SHOWRULES} {\tt SIGN} {\tt SOLVE} {\tt STRUCTR} {\tt SUB} {\tt SUM} {\tt THIRD} {\tt TP} {\tt TRACE} {\tt VARNAME} \item[Reserved Variables] {\tt CARD\_NO} {\tt E} {\tt EVAL\_MODE} {\tt FORT\_WIDTH} {\tt HIGH\_POW} {\tt I} {\tt INFINITY} {\tt K!*} {\tt LOW\_POW} {\tt NIL} {\tt PI} {\tt ROOT\_MULTIPLICITY} {\tt T} \item[Switches] {\tt ADJPREC} {\tt ALGINT} {\tt ALLBRANCH} {\tt ALLFAC} {\tt BFSPACE} {\tt COMBINEEXPT} {\tt COMBINELOGS} {\tt COMP} {\tt COMPLEX} {\tt CRAMER} {\tt CREF} {\tt DEFN} {\tt DEMO} {\tt DIV} {\tt ECHO} {\tt ERRCONT} {\tt EVALLHSEQP} {\tt EXP} {\tt EXPANDLOGS} {\tt EZGCD} {\tt FACTOR} {\tt FORT} {\tt FULLROOTS} {\tt GCD} {\tt IFACTOR} {\tt INT} {\tt INTSTR} {\tt LCM} {\tt LIST} {\tt LISTARGS} {\tt MCD} {\tt MODULAR} {\tt MSG} {\tt MULTIPLICITIES} {\tt NAT} {\tt NERO} {\tt NOSPLIT} {\tt OUTPUT} {\tt PERIOD} {\tt PRECISE} {\tt PRET} {\tt PRI} {\tt RAT} {\tt RATARG} {\tt RATIONAL} {\tt RATIONALIZE} {\tt RATPRI} {\tt REVPRI} {\tt RLISP88} {\tt ROUNDALL} {\tt ROUNDBF} {\tt ROUNDED} {\tt SAVESTRUCTR} {\tt SOLVESINGULAR} {\tt TIME} {\tt TRA} {\tt TRFAC} {\tt TRIGFORM} {\tt TRINT} \item[Other Reserved Ids] {\tt BEGIN} {\tt DO} {\tt EXPR} {\tt FEXPR} {\tt INPUT} {\tt LAMBDA} {\tt LISP} {\tt MACRO} {\tt PRODUCT} {\tt REPEAT} {\tt SMACRO} {\tt SUM} {\tt UNTIL} {\tt WHEN} {\tt WHILE} {\tt WS} \end{list} \newpage \addcontentsline{toc}{chapter}{Index}{} \appendix \bibliographystyle{plain} \bibliography{bibl,sl} \printindex \end{document}