numDeriv/0000755000176000001440000000000012036454517012102 5ustar ripleyusersnumDeriv/MD50000644000176000001440000000165612036454517012422 0ustar ripleyusers8a95d472873e05799403efce9fd004f9 *DESCRIPTION 68a5918eb427271dd79f07d62fce33a7 *NAMESPACE d661e6355d6260f6927f859ef4be4b64 *NEWS 3ab4a6d24edf3f62fe63725fab3f8b15 *R/numDeriv.R 65ba7a8040ac6c9475a6ec23e9c5e3cd *inst/doc/Guide.Stex 638275da9909da4a0800d4a08b8b2f6f *inst/doc/Guide.pdf 6f429cff9fd52e47bf6ff11bb78de727 *man/00.numDeriv.Intro.Rd 81ca6e89676a8c1a148e28401954a5e2 *man/genD.Rd 54669c2c3f5e81fdddcb3c1981f439c5 *man/grad.Rd 65a89fd686e5ecee8b946ce5a8526aa3 *man/hessian.Rd 9519f5a285a3235449b6f8ad2bbcafe3 *man/jacobian.Rd d8a40f6fcb06290212e91b33d3187719 *man/numDeriv-package.Rd 338ece2354dd67caa573d43821ffe28c *tests/BWeg.R d9b261b7989677a5ba6491a3fcbf7945 *tests/CSD.R 1c14632bb7692efc750c890044afe3b7 *tests/grad01.R 6932a6ef2283f55bd98fd58d44440bf2 *tests/hessian01.R 6280a2be34665543907c6e84d4490fd7 *tests/jacobian01.R 0d4b05477704df019b1e3f5a0810fde0 *tests/trig01.R 65ba7a8040ac6c9475a6ec23e9c5e3cd *vignettes/Guide.Stex numDeriv/NEWS0000644000176000001440000000373612032061045012574 0ustar ripleyusersKnown BUGS o the hessian function in numDeriv does not accept method="simple". o When method="Richardson", it does not work when r=1, because of subscripting issues. Should fix this such that it does a central difference approximation, without any extrapolation. Changes in numDeriv version 2012.9-1 o added complex step derivatives (from Ravi Varadhan) and related tests. o changed method.args to an empty list in the default methods, as the real defaults depend on the approximation, and are documented in details. Changes in numDeriv version 2012.3-1 o no real changes, but bumping version for new CRAN suitability check. Changes in numDeriv version 2011.11-2 o fixed genD documentation error for denominator in f" (d^2 rather than 2*d noticed by Yilun Wang) Changes in numDeriv version 2011.11-1 o updated maintainer email address. Changes in numDeriv version 2010.11-1 o Added warning in the documentation regarding trying to pass arguments in ... with the same names as numDeriv function arguments. Changes in numDeriv version 2010.2-1 o Added more graceful failure in the case of NA returned by a function (thanks to Adam Kramer). Changes in numDeriv version 2009.2-2 o Standardized NEWS format for new function news(). Changes in numDeriv version 2009.2-1 o argument zero.tol was added to grad, jacobian and genD, and is used to test if parameters are zero in order to determine if eps should be used in place of d. Previous tests using == did not work for very small values. o defaults argument d to grad was 0.0001, but specification made it appear to be 0.1. Specification was changed to make default clear. o unnecessary hessian.default argument setting was removed (they are just passed to genD which duplicated the setting). o Some documentation links to [stats]numericDeriv mistakenly called numericalDeriv were fixed. Changes in numDeriv version 2006.4-1 o First released version. numDeriv/man/0000755000176000001440000000000012032321533012640 5ustar ripleyusersnumDeriv/man/grad.Rd0000644000176000001440000001552412032113564014056 0ustar ripleyusers\name{grad} \alias{grad} \alias{grad.default} \title{Numerical Gradient of a Function} \description{Calculate the gradient of a function by numerical approximation.} \usage{ grad(func, x, method="Richardson", method.args=list(), ...) \method{grad}{default}(func, x, method="Richardson", method.args=list(), ...) } \arguments{ \item{func}{a function with a scalar real result (see details).} \item{x}{a real scalar or vector argument to func, indicating the point(s) at which the gradient is to be calculated.} \item{method}{one of \code{"Richardson"}, \code{"simple"}, or \code{"complex"} indicating the method to use for the approximation.} \item{method.args}{arguments passed to method. Arguments not specified remain with their default values as specified in details} \item{...}{an additional arguments passed to \code{func}. WARNING: None of these should have names matching other arguments of this function.} } \value{A real scalar or vector of the approximated gradient(s).} \details{ The function \code{grad} calculates a numerical approximation of the first derivative of \code{func} at the point \code{x}. Any additional arguments in \dots are also passed to \code{func}, but the gradient is not calculated with respect to these additional arguments. It is assumed \code{func} is a scalar value function. If a vector \code{x} produces a scalar result then \code{grad} returns the numerical approximation of the gradient at the point \code{x} (which has the same length as \code{x}). If a vector \code{x} produces a vector result then the result must have the same length as \code{x}, and it is assumed that this corresponds to applying the function to each of its arguments (for example, \code{sin(x)}). In this case \code{grad} returns the gradient at each of the points in \code{x} (which also has the same length as \code{x} -- so be careful). An alternative for vector valued functions is provided by \code{\link{jacobian}}. If method is "simple", the calculation is done using a simple epsilon difference. For method "simple" \code{methods.args=list(eps=1e-4)} is the default. Only \code{eps} is used by this method. If method is "complex", the calculation is done using the complex step derivative approach described in Lyness and Moler. This method requires that the function be able to handle complex valued arguments and return the appropriate complex valued result, even though the user may only be interested in the real-valued derivatives. For cases where it can be used, it is faster than Richardson's extrapolation, and it also provides gradients that are correct to machine precision (16 digits). For method "complex", \code{methods.args} is ignored. The algorithm uses an \code{eps} of \code{.Machine$double.eps} which cannot (and should not) be modified. If method is "Richardson", the calculation is done by Richardson's extrapolation (see e.g. Linfield and Penny, 1989, or Fornberg and Sloan, 1994.) This method should be used if accuracy, as opposed to speed, is important (but see method "complex" above). For this method \code{methods.args=list(eps=1e-4, d=0.0001, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2, show.details=FALSE)} is set as the default. \code{d} gives the fraction of \code{x} to use for the initial numerical approximation. The default means the initial approximation uses \code{0.0001 * x}. \code{eps} is used instead of \code{d} for elements of \code{x} which are zero (absolute value less than zero.tol). \code{zero.tol} tolerance used for deciding which elements of \code{x} are zero. \code{r} gives the number of Richardson improvement iterations (repetitions with successly smaller \code{d}. The default \code{4} general provides good results, but this can be increased to \code{6} for improved accuracy at the cost of more evaluations. \code{v} gives the reduction factor. \code{show.details} is a logical indicating if detailed calculations should be shown. The general approach in the Richardson method is to iterate for \code{r} iterations from initial values for interval value \code{d}, using reduced factor \code{v}. The the first order approximation to the derivative with respect to \eqn{x_{i}}{x_{i}} is \deqn{f'_{i}(x) = /(2*d)}{% f'_{i}(x) = /(2*d)} This is repeated \code{r} times with successively smaller \code{d} and then Richardson extraplolation is applied. If elements of \code{x} are near zero the multiplicative interval calculation using \code{d} does not work, and for these elements an additive calculation using \code{eps} is done instead. The argument \code{zero.tol} is used determine if an element should be considered too close to zero. In the iterations, interval is successively reduced to eventual be \code{d/v^r} and the square of this value is used in second derivative calculations (see \code{\link{genD}}) so the default \code{zero.tol=sqrt(.Machine$double.eps/7e-7)} is set to ensure the interval is bigger than \code{.Machine$double.eps} with the default \code{d}, \code{r}, and \code{v}. } \references{ Linfield, G. R. and Penny, J. E. T. (1989) \emph{Microcomputers in Numerical Analysis}. New York: Halsted Press. Fornberg, B. and Sloan, D, M. (1994) ``A review of pseudospectral methods for solving partial differential equations.'' \emph{Acta Numerica}, 3, 203-267. Lyness, J. N. and Moler, C. B. (1967) ``Numerical Differentiation of Analytic Functions.'' \emph{SIAM Journal for Numerical Analysis}, 4(2), 202-210. } \seealso{ \code{\link{jacobian}}, \code{\link{hessian}}, \code{\link{genD}}, \code{\link[stats]{numericDeriv}} } \examples{ grad(sin, pi) grad(sin, (0:10)*2*pi/10) func0 <- function(x){ sum(sin(x)) } grad(func0 , (0:10)*2*pi/10) func1 <- function(x){ sin(10*x) - exp(-x) } curve(func1,from=0,to=5) x <- 2.04 numd1 <- grad(func1, x) exact <- 10*cos(10*x) + exp(-x) c(numd1, exact, (numd1 - exact)/exact) x <- c(1:10) numd1 <- grad(func1, x) numd2 <- grad(func1, x, "complex") exact <- 10*cos(10*x) + exp(-x) cbind(numd1, numd2, exact, (numd1 - exact)/exact, (numd2 - exact)/exact) sc2.f <- function(x){ n <- length(x) sum((1:n) * (exp(x) - x)) / n } sc2.g <- function(x){ n <- length(x) (1:n) * (exp(x) - 1) / n } x0 <- rnorm(100) exact <- sc2.g(x0) g <- grad(func=sc2.f, x=x0) max(abs(exact - g)/(1 + abs(exact))) gc <- grad(func=sc2.f, x=x0, method="complex") max(abs(exact - gc)/(1 + abs(exact))) } \keyword{multivariate} numDeriv/man/00.numDeriv.Intro.Rd0000644000176000001440000000052310413326044016233 0ustar ripleyusers\name{00.numDeriv.Intro} \alias{00.numDeriv.Intro} \docType{package} \title{Accurate Numerical Derivatives} \description{Calculate (accurate) numerical approximations to derivatives.} \details{ See \code{\link{numDeriv-package}} ( in the help system use package?numDeriv or ?"numDeriv-package") for an overview. } \keyword{package} numDeriv/man/jacobian.Rd0000644000176000001440000000553712032113564014712 0ustar ripleyusers\name{jacobian} \alias{jacobian} \alias{jacobian.default} \title{Gradient of a Vector Valued Function} \description{ Calculate the m by n numerical approximation of the gradient of a real m-vector valued function with n-vector argument. } \usage{ jacobian(func, x, method="Richardson", method.args=list(), ...) \method{jacobian}{default}(func, x, method="Richardson", method.args=list(), ...) } \arguments{ \item{func}{a function with a real (vector) result.} \item{x}{a real or real vector argument to func, indicating the point at which the gradient is to be calculated.} \item{method}{one of \code{"Richardson"}, \code{"simple"}, or \code{"complex"} indicating the method to use for the approximation.} \item{method.args}{arguments passed to method. See \code{\link{grad}}. (Arguments not specified remain with their default values.)} \item{...}{any additional arguments passed to \code{func}. WARNING: None of these should have names matching other arguments of this function.} } \value{A real m by n matrix.} \details{ For \eqn{f:R^n -> R^m}{f:R^n -> R^m} calculate the \eqn{m x n}{m x n} Jacobian \eqn{dy/dx}{dy/dx}. The function \code{jacobian} calculates a numerical approximation of the first derivative of \code{func} at the point \code{x}. Any additional arguments in \dots are also passed to \code{func}, but the gradient is not calculated with respect to these additional arguments. If method is "Richardson", the calculation is done by Richardson's extrapolation. See \code{link{grad}} for more details. For this method \code{methods.args=list(eps=1e-4, d=0.0001, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2, show.details=FALSE)} is set as the default. If method is "simple", the calculation is done using a simple epsilon difference. For method "simple" \code{methods.args=list(eps=1e-4)} is the default. Only \code{eps} is used by this method. If method is "complex", the calculation is done using the complex step derivative approach described in Lyness and Moler. This method requires that the function be able to handle complex valued arguments and return the appropriate complex valued result, even though the user may only be interested in the real case. For cases where it can be used, it is faster than Richardson's extrapolation, and it also provides gradients that are correct to machine precision (16 digits). For method "complex", \code{methods.args} is ignored. The algorithm uses an \code{eps} of \code{.Machine$double.eps} which cannot (and should not) be modified. } \seealso{ \code{\link{grad}}, \code{\link{hessian}}, \code{\link[stats]{numericDeriv}} } \examples{ func2 <- function(x) c(sin(x), cos(x)) x <- (0:1)*2*pi jacobian(func2, x) jacobian(func2, x, "complex") } \keyword{multivariate} numDeriv/man/numDeriv-package.Rd0000644000176000001440000000252312032113564016316 0ustar ripleyusers\name{numDeriv-package} \alias{numDeriv-package} \alias{numDeriv.Intro} \docType{package} \title{Accurate Numerical Derivatives} \description{Calculate (accurate) numerical approximations to derivatives.} \details{ The main functions are \preformatted{ grad to calculate the gradient (first derivative) of a scalar real valued function (possibly applied to all elements of a vector argument). jacobian to calculate the gradient of a real m-vector valued function with real n-vector argument. hessian to calculate the Hessian (second derivative) of a scalar real valued function with real n-vector argument. genD to calculate the gradient and second derivative of a real m-vector valued function with real n-vector argument. } } \author{Paul Gilbert, based on work by Xingqiao Liu, and Ravi Varadhan (who wrote complex-step derivative codes)} \references{ Linfield, G. R. and Penny, J. E. T. (1989) \emph{Microcomputers in Numerical Analysis}. New York: Halsted Press. Fornberg, B. and Sloan, D, M. (1994) ``A review of pseudospectral methods for solving partial differential equations.'' \emph{Acta Numerica}, 3, 203-267. Lyness, J. N. and Moler, C. B. (1967) ``Numerical Differentiation of Analytic Functions.'' \emph{SIAM Journal for Numerical Analysis}, 4(2), 202-210. } \keyword{package} numDeriv/man/genD.Rd0000644000176000001440000001025612031452061014010 0ustar ripleyusers\name{genD} \alias{genD} \alias{genD.default} \title{Generate Bates and Watts D Matrix} \description{Generate a matrix of function derivative information.} \usage{ genD(func, x, method="Richardson", method.args=list(), ...) \method{genD}{default}(func, x, method="Richardson", method.args=list(), ...) } \arguments{ \item{func}{a function for which the first (vector) argument is used as a parameter vector.} \item{x}{The parameter vector first argument to \code{func}.} \item{method}{one of \code{"Richardson"} or \code{"simple"} indicating the method to use for the aproximation.} \item{method.args}{arguments passed to method. See \code{\link{grad}}. (Arguments not specified remain with their default values.)} \item{...}{any additional arguments passed to \code{func}. WARNING: None of these should have names matching other arguments of this function.} } \value{ A list with elements as follows: \code{D} is a matrix of first and second order partial derivatives organized in the same manner as Bates and Watts, the number of rows is equal to the length of the result of \code{func}, the first p columns are the Jacobian, and the next p(p+1)/2 columns are the lower triangle of the second derivative (which is the Hessian for a scalar valued \code{func}). \code{p} is the length of \code{x} (dimension of the parameter space). \code{f0} is the function value at the point where the matrix \code{D} was calculated. The \code{genD} arguments \code{func}, \code{x}, \code{d}, \code{method}, and \code{method.args} also are returned in the list. } \details{ The derivatives are calculated numerically using Richardson improvement. Methods "simple" and "complex" are not supported in this function. The "Richardson" method calculates a numerical approximation of the first and second derivatives of \code{func} at the point \code{x}. For a scalar valued function these are the gradient vector and Hessian matrix. (See \code{\link{grad}} and \code{\link{hessian}}.) For a vector valued function the first derivative is the Jacobian matrix (see \code{\link{jacobian}}). For the Richardson method \code{methods.args=list(eps=1e-4, d=0.0001, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2)} is set as the default. See \code{\link{grad}} for more details on the Richardson's extrapolation parameters. The the first order derivative with respect to \eqn{x_i}{x_i} is \deqn{f'_{i}(x) = /(2*d)}{% f'_{i}(x) = /(2*d)} The second order derivative with respect to \eqn{x_i}{x_i} is \deqn{f''_{i}(x) = /(d^2) }{% f''_{i}(x) = /(d^2) } The second order derivative with respect to \eqn{x_i, x_j}{x_i, x_j} is \deqn{f''_{i,j}(x) = /(2*d^2) - (f''_{i}(x) + f''_{j}(x))/2 }{% f(x_{1},\dots,x_{i}-d,\dots,x_{j}-d,\dots,x_{n})>/(2*d^2) - (f''_{i}(x) + f''_{j}(x))/2 } } \references{ Linfield, G.R. and Penny, J.E.T. (1989) "Microcomputers in Numerical Analysis." Halsted Press. Bates, D.M. & Watts, D. (1980), "Relative Curvature Measures of Nonlinearity." J. Royal Statistics Soc. series B, 42:1-25 Bates, D.M. and Watts, D. (1988) "Non-linear Regression Analysis and Its Applications." Wiley. } \seealso{ \code{\link{hessian}}, \code{\link{grad}} } \examples{ func <- function(x){c(x[1], x[1], x[2]^2)} z <- genD(func, c(2,2,5)) } \keyword{multivariate} numDeriv/man/hessian.Rd0000644000176000001440000000601012032113564014561 0ustar ripleyusers\name{hessian} \alias{hessian} \alias{hessian.default} \title{Calculate Hessian Matrix} \description{Calculate a numerical approximation to the Hessian matrix of a function at a parameter value.} \usage{ hessian(func, x, method="Richardson", method.args=list(), ...) \method{hessian}{default}(func, x, method="Richardson", method.args=list(), ...) } \arguments{ \item{func}{a function for which the first (vector) argument is used as a parameter vector.} \item{x}{the parameter vector first argument to func.} \item{method}{one of \code{"Richardson"} or \code{"complex"} indicating the method to use for the approximation.} \item{method.args}{arguments passed to method. See \code{\link{grad}}. (Arguments not specified remain with their default values.)} \item{...}{an additional arguments passed to \code{func}. WARNING: None of these should have names matching other arguments of this function.} } \value{An n by n matrix of the Hessian of the function calculated at the point \code{x}.} \details{ The function \code{hessian} calculates an numerical approximation to the n x n second derivative of a scalar real valued function with n-vector argument. The argument \code{method} can be \code{"Richardson"} or \code{"complex"}. Method \code{"simple"} is not supported. For method \code{"complex"} the Hessian matrix is calculated as the Jacobian of the gradient. The function \code{grad} with method "complex" is used, and \code{methods.args} is ignored for this (an \code{eps} of \code{.Machine$double.eps} is used - see \code{\link{grad}} for more details). However, \code{jacobian} is used in the second step, with method \code{"Richardson"} and argument \code{methods.args} is used for this. The default is \code{methods.args=list(eps=1e-4, d=0.1, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2, show.details=FALSE)}. (These are the defaults for \code{hessian} with method \code{"Richardson"}, which are slightly different from the defaults for \code{jacobian} with method \code{"Richardson"}.) Methods \code{"Richardson"} uses \code{\link{genD}} and extracts the second derivative. For this method \code{methods.args=list(eps=1e-4, d=0.1, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2, show.details=FALSE)} is set as the default. } \seealso{ \code{\link{jacobian}}, \code{\link{grad}}, \code{\link{genD}} } \examples{ sc2.f <- function(x){ n <- length(x) sum((1:n) * (exp(x) - x)) / n } sc2.g <- function(x){ n <- length(x) (1:n) * (exp(x) - 1) / n } x0 <- rnorm(5) hess <- hessian(func=sc2.f, x=x0) hessc <- hessian(func=sc2.f, x=x0, "complex") all.equal(hess, hessc, tolerance = .Machine$double.eps) # Hessian = Jacobian of the gradient jac <- jacobian(func=sc2.g, x=x0) jacc <- jacobian(func=sc2.g, x=x0, "complex") all.equal(hess, jac, tolerance = .Machine$double.eps) all.equal(hessc, jacc, tolerance = .Machine$double.eps) } \keyword{multivariate} numDeriv/DESCRIPTION0000644000176000001440000000210212036454517013603 0ustar ripleyusersPackage: numDeriv Version: 2012.9-1 Title: Accurate Numerical Derivatives Description: This package provide methods for calculating (usually) accurate numerical first and second order derivatives. Accurate calculations are done using Richardson's extrapolation or, when applicable, a complex step derivative is available. A simple difference method is also provided. Simple difference is (usually) less accurate but is much quicker than Richardson's extrapolation and provides a useful cross-check. Methods are provided for real scalar and vector valued functions. Depends: R (>= 1.8.1) LazyLoad: yes License: GPL-2 Copyright: 2006-2011, Bank of Canada. 2012, Paul Gilbert Author: Paul Gilbert and Ravi Varadhan Maintainer: Paul Gilbert URL: http:http://optimizer.r-forge.r-project.org/ Repository: CRAN Repository/R-Forge/Project: optimizer Repository/R-Forge/Revision: 679 Repository/R-Forge/DateTimeStamp: 2012-09-30 23:59:38 Date/Publication: 2012-10-14 06:18:55 Packaged: 2012-10-01 14:21:40 UTC; rforge numDeriv/vignettes/0000755000176000001440000000000012032323364014101 5ustar ripleyusersnumDeriv/vignettes/Guide.Stex0000644000176000001440000000307712012503635016011 0ustar ripleyusers\documentclass[english]{article} \begin{document} %\VignetteIndexEntry{numDeriv Guide} \SweaveOpts{eval=TRUE,echo=TRUE,results=hide,fig=FALSE} \begin{Scode}{echo=FALSE,results=hide} options(continue=" ") \end{Scode} \section{Functions to calculate Numerical Derivatives and Hessian Matrix} In R, the functions in this package are made available with \begin{Scode} library("numDeriv") \end{Scode} The code from the vignette that generates this guide can be loaded into an editor with \emph{edit(vignette("Guide", package="numDeriv"))}. This uses the default editor, which can be changed using \emph{options()}. Here are some examples of grad. \begin{Scode} grad(sin, pi) grad(sin, (0:10)*2*pi/10) func0 <- function(x){ sum(sin(x)) } grad(func0 , (0:10)*2*pi/10) func1 <- function(x){ sin(10*x) - exp(-x) } curve(func1,from=0,to=5) x <- 2.04 numd1 <- grad(func1, x) exact <- 10*cos(10*x) + exp(-x) c(numd1, exact, (numd1 - exact)/exact) x <- c(1:10) numd1 <- grad(func1, x) exact <- 10*cos(10*x) + exp(-x) cbind(numd1, exact, (numd1 - exact)/exact) \end{Scode} Here are some examples of jacobian. \begin{Scode} func2 <- function(x) c(sin(x), cos(x)) x <- (0:1)*2*pi jacobian(func2, x) \end{Scode} Here are some examples of hessian. \begin{Scode} x <- 0.25 * pi hessian(sin, x) fun1e <- function(x) sum(exp(2*x)) x <- c(1, 3, 5) hessian(fun1e, x, method.args=list(d=0.01)) \end{Scode} Here are some examples of genD. \begin{Scode} func <- function(x){c(x[1], x[1], x[2]^2)} z <- genD(func, c(2,2,5)) z \end{Scode} \end{document} numDeriv/inst/0000755000176000001440000000000012032321533013042 5ustar ripleyusersnumDeriv/inst/doc/0000755000176000001440000000000012032323364013613 5ustar ripleyusersnumDeriv/inst/doc/Guide.Stex0000644000176000001440000000307712032323364015524 0ustar ripleyusers\documentclass[english]{article} \begin{document} %\VignetteIndexEntry{numDeriv Guide} \SweaveOpts{eval=TRUE,echo=TRUE,results=hide,fig=FALSE} \begin{Scode}{echo=FALSE,results=hide} options(continue=" ") \end{Scode} \section{Functions to calculate Numerical Derivatives and Hessian Matrix} In R, the functions in this package are made available with \begin{Scode} library("numDeriv") \end{Scode} The code from the vignette that generates this guide can be loaded into an editor with \emph{edit(vignette("Guide", package="numDeriv"))}. This uses the default editor, which can be changed using \emph{options()}. Here are some examples of grad. \begin{Scode} grad(sin, pi) grad(sin, (0:10)*2*pi/10) func0 <- function(x){ sum(sin(x)) } grad(func0 , (0:10)*2*pi/10) func1 <- function(x){ sin(10*x) - exp(-x) } curve(func1,from=0,to=5) x <- 2.04 numd1 <- grad(func1, x) exact <- 10*cos(10*x) + exp(-x) c(numd1, exact, (numd1 - exact)/exact) x <- c(1:10) numd1 <- grad(func1, x) exact <- 10*cos(10*x) + exp(-x) cbind(numd1, exact, (numd1 - exact)/exact) \end{Scode} Here are some examples of jacobian. \begin{Scode} func2 <- function(x) c(sin(x), cos(x)) x <- (0:1)*2*pi jacobian(func2, x) \end{Scode} Here are some examples of hessian. \begin{Scode} x <- 0.25 * pi hessian(sin, x) fun1e <- function(x) sum(exp(2*x)) x <- c(1, 3, 5) hessian(fun1e, x, method.args=list(d=0.01)) \end{Scode} Here are some examples of genD. \begin{Scode} func <- function(x){c(x[1], x[1], x[2]^2)} z <- genD(func, c(2,2,5)) z \end{Scode} \end{document} numDeriv/inst/doc/Guide.pdf0000644000176000001440000015427712032323364015363 0ustar ripleyusers%PDF-1.5 % 3 0 obj << /Length 1222 /Filter /FlateDecode >> stream xXK4ϯ89;Y0,Hp@}[)Iqγ_b!z|# iJL ɉ#EDy i'!\!H@U֪ :ɚNHA!$IVg2{j_6aq+"_,Fߺf4z}WIRA4b&7FF3e1 a- UðԂ*\OE`6"N<|k yYf9t7Ia3啴JtMU:k>ȫug#ng軖*A^0`-܄OØ+zWpiyL w|aBYc9#W(!O W 8" 4eL'877r=BwV۲E}%o<)!'gm_"K!U=F.gÀ ,/p' 8#8qꠌ5؏+!mhXHXֺKbtCk$xp7pQMl>dqv;ց1zqчn@S7d]4G/CR|?) endstream endobj 14 0 obj << /Length1 1686 /Length2 9465 /Length3 0 /Length 10535 /Filter /FlateDecode >> stream xڍT6LwI ݝ03twJ*- JHtHw}}}]kx0iq!Jp;/7@^CΘÇĤuw-b2A0YȻB@2Ppxxxx|<<] O @ Aܰ>vyœd P0A3A=8 qvb@7ɍBuB oM/jXL};۟ =x8Bۣ xSQh9C`i 8^nw (g wr|[# l~ OdhA%Y_Pgw7n7oaˬ;9A`nXϧu0od a 4A]< * <yxxDl@C3`H<@$ߊFX(` yaQ ? 0y?^?_f9cG&FFzJQRN p xyE‚< 6xD$Xx5 3h`gxy?K=z? ?z/yp BmWzY hCv]x Ant;vxT{ RR=>A!8`|?%x1 7 x$b[A|0z}ԁA ?7+Ϳ /qC9yCCmo@LnB#sx{`WǛxlk񆀱g`pjY*/QɯLFl\~xlUoC˦|&X\Wd=YkCjIiL֝lŚ'+ؓҗs7 q@nFTeq#Q-69U%}[:ogR4Ŕkn͝+aj2VxB?iߥr}>. F r3 f?4UYli,fA.:%JVw-CK'":kLrz9EwXn:wSo]ä2_9> ?cF؉I d{$hDj7^bw$tZ<qN;^OpaS+IWm=kQ@D5U)vB/0M1VZ$`R|O7&1Bsjߦ2kr>QnAd r}^#6[E<C$-V1C1zh5ǭ+ڃjbE&G}L U1sD esCcFqg274Uх,:ȲE0[`!MB;bİk>V3Q<[FCqz8]q@$s=_H!i &%S=WČ L>UfL-9Wkj (<ګ%I. e1 m.NvsJp *{(8bDGsl%ٖa$}enև6kv};-2`ɶIf2o46]+O7j W*:FW9V+DtR?0 J+P҉R71RhD|!HWO(p$8VZ#HU>!\dC%KS" B{wٖ ݺ}Nj1iIٯe鱄5mD$4'DfKW<%,Y|"5/YE)lg$JQ[qЪ'u4M=̺wdo{N]%E< |ڤrSe/gWg16#bk :N/Xx:~TxǙ=5\a:qZ$Dխ#Iպ =T4- {0m6j-gc˗4D]F evڣmY^v֝AϮN?}/RuI.i"*E8v0[CA*[)Nv)cmS" w3<y4?ۙK7qN`zqs2.JM6K2TBd؍Q,T_͏?1I<,JKM/[v^OtCYY=x^N$+<^N;.UCU3'QZjU!D 3N!h8+O퇀;!/?Y|rg@06|wI77?m8N+xB*R`W AO#iכAHTGo]ҝTj>zWxPeC!/P#"*SM6]soZmA#n-T1CU% Qq|݊<ԭ4K;߫u|Fuz$;>N*b{[!?w~+U183 T6NuCtJg}܆aG2aL!> fuODķMs=U.5JeX$1x>VFMWhA]kΨР4a0޿YJŶpuua 4K D)H)!?LS)\|W։ ΨBjxC}>dD'/B٘l*CVSuIU\uvנcݲf8}GRg_qE'B^]ux| v9m-E^ꫢ{RFlIEk^jVkB~۞*M_ F-A#t9kr?|CɲP˗_3\ vH5Hf , Au&]#Ofg>VIk'GSXz$OJc/ )Y2ra] RH93,9\YM(J^ZIU nC:T-uן1oB;O* )+5ݾO&u0Ʌ-c\=ފ<諾Yn o ?T7*GPl0"̢᠕KqW[e)i1Gg ~@>XW5@Ge '/@*4< !8׶.Sp ,ZUZ $˳ٮXWI:[/59{DRЩ]'Yƃ}6zq6O$%$2khVzb+{bgݾˠ5Ѭ2%7 _0t΄`d`KN;`.ۆ%eaWh{Q)֋1j/H?7Fm5vCpXUݴ-Ov[r,:DA$Ni~®uo#!Iu~;BX&`wE3 3+isW$ ,8yJuru]@Fm(ܐh rtTɴȟq\޶gJ =O|nÝM硃H&YfȐ˯  Xb  Eu25b}׽n-IS'm{*VͿs쑢)1>\qfA#*>\jy}+*:uÐzwSO6OYdiJX vOG(f WaPk볏{ni &E($H؆YHJ# ~ЊžqLDt v)? (OL "(irp׃Q4jeMs(:>&'+trbHSIG<2 򡵈"e uBUS<#3u%>}?^k?F ͸`|N)Z(mg#[Y:>5oѬ'%<|@ G:`2qc-^4lt:2mĖw4Zijn,4>O(4S 0=U[v^I\9"}h6hc,TYËn]Fn,\XKxb˕ Y;*z2OHJK"6850.e}ҶGFwyJ)<9!؅: ud|>\vDSUܵ(/ͭ+=]WO?&&Qjmov"hvaТ.> Ϙ=e2'vMLGAgSv ԰0u$i y @E n}3&Cr\EWbntJ=0u*m2UW5sqkF4$:6wRa+GyTxE}lvE%s7QƿHsm_Ukћ-Q>9LnE47 |"L:;0y x(O&uzl˳4aOG2](@$I7|D0fhYuH;^̧ŽFv>BTNQVU2Wط JY.muu:1d,)!4pMFU@c}WeՎ|3x',Q ?r\fe)KkZIr?Mt|mhtgT<.̺_=!)ӪYފN嬂ː蛊lcZ埪"']Ö8 62x]P;:s(|*jJEH3PDhY%ځlO?qdwF2JqVB 6e6M_0Z)KwcMOdK/l}z\\vX(5}k"kȈ+|1q[S *ǘS",Cg$R%FU"'loUzIU"%D ՛L^A"kX_3fJa'l:шHkJ10UFiڽC/$Q1\ybL֒̄C);qCڪhG@l܉Wfu/bVX.ˑ1끬dK -.m9ɠ>Ylj Iu'\ cdc!tʷw߭8i&1'@E|H1L~/g?/-orOpuhY8D83XL$<܉nGdpRQ5))O&:`h>x-'l y8r#;,u3d&!tQ ym%bV{+ҹ|q3ӝEI7T1D/zLpna9垊]|Loǐ]ê+b[O33*b:r)Ws"59:>1U{]h6]yG"Yj=qtl*`OS#LrT! C(S F$d斓]Z5Ku~~*Km̀e*SaA*䛎,ĝ5B/z2c8a,U ξŅ&gIc;p~V)i w]È˴RG2L x6fBjP׾tzӦOpE#j;2w:hݟZP&'J#&Z^PZ0kJNq|}5)FH?G_3)b ,Ahdt{W4qmEwJTb76*~8咮';jcIla(b}.si;A-FfA],]42*DQE.'R1YjmNv(w7=ԍ y) F)]"c3|f#aۧf"sPPGL}ɑ[':T`{+,&&W$;h~Y1<횦.7-'2U]!dK2`XgUg*$%6S:1H.xX9b<)VLOP3kȣ!]0&MI-valIG0⩳A:]C0!vt6E ;3?s{s3˪B9Z/)R{P Cer8&mI 3Vdl:YcFR?TTrLh5+ȉ#I`$ ['zo6{y_wq,,߸*xV7r&yP.'p-Aр$re?^[E^S$.>ţwzhLWw͞gAJ86 / n/ O!>ՄrۓCPOBܻF C3H=Vӵ̴"S)K5xU;PJp? wYя:}6WlV X+pJLYҬW8%z~Ǔ!䁻hJ]0Z?ի&bI7fˁvgD 99OVMpïbv^)!E⊟猌Ivv DbZζVu(=qUD:;E5$i"&7v7^yE83>=UygD)'d=KL5D*ֲߨ}x{t(mu#R' CLN &p$8?Nf+ba {;CexA/3SZD7xo.GQ 8i&EQB@:̉!ڧo|5$B‘erFK>,IXsPwL놔)m;(42DF󮾃Ϲ֒l25`+JRSS@*6Ki4Jτ),0Q:㳏 ]N.FwIoڶj^mS+0حD >Sd8`|q2!žЬRn˺ٶm~?XʢrmE?@XO &ԝރKV}#5yF#u9kQ^pmE,) "ʪ~ћ_i fK@4{Ө7 YդMGhBͱU{PV+A*bE_=W rNS;fvSw ${Ն0&ҍWM^X4 &)*Ѩ/ʿݴmxK7a bJPķg_ܘ~α*ZGpHD7[xP&1x޺҄5Lg"t'NJXeޛ]aVګ 6j@'([9O g4 xŃ0bo8e5m#ot aGJ囑˷tz[W]tmOd_7jd՟fc}^=),TV=<7X`q1 K{+ԈI,\_,.iIEv.0;:Yq{y~nK,/S64+٥V֧Xy3",-6$o@0mͣp5 H\p/Df"FԇӒx?g=oHq !sRyȇWwiJ]1ʤ`Ⓔ%1R1FE2eAgJSӫ m`@ڝCuz;-X> stream xڍP\ 4ni,и$h\kpww8r9v1}9Td*Lbf@)+3+?@BQʎ@Eaj[@tvq@h.`nȹ8ll<vVV:8>Xr {hl||<6 5=@?!h]]YX<<oJI 0uv6Bxo;o0\]tџ"A">?=?b_~[ckA6 _={MV,6*? i/^?}X@U9Jyg_+u|__RJ߳{+d9;_y?@ssP5bL{BT{itL>n(_37ož H]!}9ihMRm{}6NPkCX,<''b}q C*ɍE%ޣOڳlu,|aOu[l)V3 UY<9+1=ƥ'Y7RX"-.]zxĐ1Ʀ}Sp}J<[HsSטИ3mԢAMc˝l_4IMXmјćZݖD+|3uo9d]nzᦑn?o""DVlQkĩe]@Y,4/Цa>WlۅzнcD\W*.:<0 CB~^^[sHdeHvۈQl )cܪaPE͵t5HY)+ݻX5eeW|UUJ{4~4$wkq}[^.n}pY)>OhާN.tih⻝^x33 qw28Tm ǝg2T4EˊGˆKzkEq>n dޡ|O *s¸cI@P]>{ wqf|ltzǮg_3@+&.lq'ͦmLԫeT󆦇X Qmpth JKxTPXJPĀm%H,Tb}`XA.8Hcg/93yr¯Q6T d `_БXÙGÁ` }KRT# eEAzCCf^.!7&ZF]@5;u2 3OtR:"hA.ŕqI[*8q08m(EZ &vdQ1l}YyPS!2o"rÓrh< SEmFu^lY;Yލ髦Wzӳ~^x$ $TNʜdw5u׊r9cq>ܛx BwY3kxt݌#>z$^(Q4ʬZUܲ*vOGLI[]x'sA8|Ejv*dj;1tF7 >< ܈9nm=o|?&J6E qF'~\ZVR|8Ho̵P2h(XQ]-f$=u"S!t8ebxFƚح@y0u7HWczOvCM@񋈬(a . R\]pX $Oĥ~A]t2[S-}_w'4DK;.lX೫@IcTp7dži("W<=mҞ+_3b]N4zۗb7ˮJE=DKN]\e_ L"[`.Ng uI$H 룶䐈ݧKk} a& 레h0JЁ쵫xX[kqgM 'vǒ&uEEBc8%ݯi_=#aWPN#'\Z';ТZSZg̻vӌS&;Y֡|ء5yUg?;xK5YC{Hp1u 9:9 *>4Ƅ·zo,jA b>퍯9m\35WFB_;Q0h.^6Tʁs%>9Q̬e[@rf#d %J.(/ugЩَΪ;kl2vl>e~˃)*n͘?oIX Y wB7leA!B7g2gĨwъ<| 6IbKV?Av^rqR ׂ{'*ʟFÓ6fui'WBY_YlmgͲ z$xds8s=[]*˲&9 ԗ<J@ZOjtiVM{5zht6Y@Ơ/r y8p)rG"T6(K5l^}0㜙(T­j恷.VS7"V7qYgK[5*E,J€'°<2 DCe :-02P\e +晗uD@)hƿNAkA=pȝ;bJL ȠTh #U+l8XyKn`_ciuB3pUDŽpYƬkXXu|$PrXu!#ثNK?jд\f d ;v݂\$ؒt\N }N3EA"Sl ]D*l$auqDrŕ/vW p$a={G%.yp`/]҆iLJ^l5[ߏ_%0Xi/`dsps{%9TEͧ$OBc0lwVtk1}p2]`uW/S.t T,!T4.Nt(W`4-}f0bVoHۈ|lg`ϵ}ᕩɏU+v})+_ktӻR&MM";`2)oP@w/fPT0VmGiW,V9Y%tJ٤§,~)'Ń;ҥ)4.Ӱs:ښ/~Ϸ Mͺ} FsTd22opI#O\$l5|2\@\-C] J U{!n;M˂jZQȄ.`~d ^H{ bdq cjkN\eJ#kUKW{Vg'9F:Bh1hD#IVyƎ#R%u7K?s^n/uGSMJN\}~N尾U*crV pV4ϫ> =83.0)#QX/@%F!*-v{,{Y;bEѵ ɐj'_i1hwz>-y=$#^?j0jv2΀oXf+~BueC|݊۾DTV 4~ ðoYfgMIXn(z`d4"-+US1x+t!Va)nVس{1}q:vD-\ Z: I鄩,nմpz{O|"~(z|宇pRr6g4`H,m[-·2.?_k.ۘ=m /z}l?]&nP k_V YLL/0'Lf:$ z<OMImLCq1lGjE (v"z7_F0xH /M&%T=&)?]DZG ?'b/,Q2 cXmv ] =}cul5n9ޭ^*w:]Skb ⑾bSDݰ'Dx~W)'4ͳ)S›tP|6DkIχp5Av_.{ŁOdu~%ZfNCnM<1`.H~KAb>I5KN/΃ҵKR Gt}aLL=,v(P cCwR%wu ]OxU oD2&inOzp-p\+_ XK{ocQ+H*C?"q1f*. j[*G -ni}MmmA&5;1$6thV'E>.;K1ۄ"uhp@rg4IL({`,62F1d&IT} lU.Ex$OzYV?r%l a]LKȱ°*%#t񑼨ϚEtUVQNs[E-kְؑNE.L-zų"@˲yo=bq }B uMKIrku8$D8+x0lbίQU]Ucr=a_63Ҟ3TC65uWVEnjϔչCc˲{25wJ`ViSb<`*鰣bvŒȜ+{u©qS,d%MX"lvרc/QALE{*Ube4!}8}kuS3cKS2*<ݸ$gLߥ1$ Ρ3bTm߇6Is4vZP0H2cQ1J fhsL4V7R19 ԠhWpT-RRuP]l1@`1P̈ @jMIuml!z_Y{LNcd*J]6P!SMuTn1AZfUhXz8F6zYELdr4H4̆S>NMƝՅEs(^iYY"űx\u-j^v4vMXs9[( S j|#GtR1,1hehkF}xkdoݥu֒*)WVE cg -Y%X҂t^t *K)ԸS>QY̻ Z}0T

U tv;<_IݜZ#@~٠!HAu4(.L7ĉ#pߥ9b[Pz+*7Anp?6cȗlG1dYYga(qĢiwt3T}GR8cWK_`o K#839( HޠJΦD`~6BX?ekxkCD%ܶo\3CzRH\٢W}w"SҪ[,qqr"gp~ VCpA0!}(7K3zsa~qR2̘smfzOki `3hEd\|vɅwzx9n 4~TK۪+k4'ȡo]ap-#b՛t)_:[7"ʂ`X zvҜUP`f $6b; 37 ?Ub>{P\ ?լ(-tp..K%D">nzzv\IPp%ÄFN,4\"ty;ِ8q#1CmM{L6:H582'a+%{J-PGyLQcsamϞ{jH^-8yD]LN U2CUF*n+ZUEk>*0μs~`/arm9DIManIL\aNv 3ѹ~!N=;ޮVtߢp t mQ! $#Hɐ; u`) t u`\dm>tR˗W{ˆhru>ph1_C]NT16 ͂ Ɠ>4câHkg)΂4 \Wk$Lr2 /5 6z>8'u_az` 0<y;4KTa&{OXw'*/UN_m7Ɵ1hOa:l݋w!{M䌕Ǥ]~P( W #М.-EbEȎ+(>ݝNbY^yQrBu@\ʂw*zċ/29NhDNO[nC;nrܧmlģZ4ZE8tHQwk. |^Dz`ձ 7{y3;x{+SI$T.jVʹc3~f BxCNWĸ'IJMxSWST Т?0eSPbHr.ъmvbK똜q#yY}8,iɉj7zc/}fel}_XU.ɿoKj}IYlL! FpWE/6ļRH7p zB_y-7C$*%ĥhRUy;Z>`?J;qst@joTcNMF G〻]]H@__iaFxp[sԐ%ބ[iNV#MNbo0c-Vӯ~ *6 rԧ/Ӊ%({qyc twn2=4Jvdm(X0ػd#}D~sKlr, kTsASkă Wdtƛׁ ܊idz W}6vX]cn${p}Wi}$+LGb "^=4C`ռ>o-Ȫt]lG;/҅M%rJFRUexΔm~gP;]Z&@br|AzbN>L6ޅǏ9fclJB]MhDD,~  eb6y-4G3ŲK i:'{FOE7,x__] Zx)w71MmW715M;_"lgpr)^^,Ɣ'=MV8 :d8R,@zor5߮$7EJVCc8wZ:#>5Z+<:R>umG{Z Pv5Ǎxf|p}lwB,w ״H1Z | Un bGbr TTAmS{}+kMۦo QB]FEZB{ #Ow'hʄД;Qj4Ze#i!GHC?\dZ~9L]HL7޽C=?N!WW^2G),>Wu|E4:baL!2O}Pڒ3&FݑԌ#=p(v,; U%-,uI,}frj/%hagL\?Z۰yM76㳓 4"17 BmM϶n)K- #L-B -FfoZۢE8x5o~# {JƂ9Wj9E * FTÎW?{*?3 ɺw ,UO'F:yX:i7'_3K5@WD@{eYg"3 \`fmFf޲4 ox_/ Z_ےw4WIF"Stmoc3CkCwgnmIK-7$>ܠ|FbK76\] yR-=ij٬A' rqfE!@=zyc*`q,sna4yiv+B*hBu7SiP^%'%\S0ņ,PdC! ^I5zӭ?ARNedj9uT Gׯ4n4O.V9:*"QF$0XJ!%E kk,n [8x"x\tzl p [WCcx?)Jg~Ӷ LǖR4e@Tg[÷ YK;&:t 2>6{4yk((IntPoΩ ;m*8Q7bpe berb-:N(`ˮ(9XX?c)yCɩ "D"Ofp$Y:1&w\se2"Ac@O#R 8ֈ[lrW))3jCyNM q8_4]cܵɖ/+%qK+a 9cԀ`SLq 8$UY%5敆4+?A*W4+61` jwUӟw>8 (l#>ǞrTGbsӁRS/&ґ7EtFen-GCk-:98XSN&N.jW ^;%6nx[p9rI_]J~{ ~4 rR9FL"M Х]|d PS N86/R7^MD^['Kzm^nU|BF沲Xq b8A4sS"V鷆7hUܱVP'-([?(.Enڗbԣ*eFHJBϖ$ )HuJ?_Ķͤޠ} p#*XJ vw^>xT8ȑ}2"tS;r Lsk&8dJ1*^k94Ѯo[Ǒr[_i,ɦPEt˭0[5T.y"Y.j#1S9`{6q= Q`A=w4< \A;OѶ]Db,wIŵeg촾GHoG7JhA)Nʰ$Q^<ώq_b" %;ȩsd9"-H'Y2pHhu%<ãP9cAț~̻Rp.c؁^)%lyҌs& uF:N̷O$ OqVđIC׽y x+ ]IL]Υp֢d*Pۥe[ߚ/l!>uQw6#cc7zb¨x 8S YˤEJ[V b95EhDdϹ>M)Q?܋r`TԾ7ah?]݁aVzY l"fgHt)/*t K)b&kPlڙ~=] V..zBE@Z5Φ3r9Cr)8~YzH# qMeSQ3LmLNRCq'2ۚUf"mkq^Mբot=@tS"['jq)?ǧ=*\d`ҷ oeC,.Ѩo!djJP|F7{lxdz(4=};Z3R,bUC}q+N506`@a\سR3~U/KA:…F} +)ov@˦\Pa*ta9LV}4j`p;NPޏG{B T]vJE4$YjH rbE8.`ɔSlF3}cf\C[Z^G/DEY?Q`3jx]JW[3<}>_'Q/ XTZqZo~hpNRL[ZάʰƙYv/|}to<2F>~ ^}c}XnT> stream xڍveX. ̀twJ7=PC ]]R JtwwJJg93z׻zj uf1 PafgaH(+hh8Y8Ш5@;o v# 4@1ISvȻ9<ll66IS7@ vQK=AVh|Йy2AES56!5(bjvg 5 hU6@oq,h k*u%v s  )&0i_wXrrlh rXei ``hjڛL͠7HL5S3U%/7FK9XH_I{;b[o`a WGVM+PNBY!n666>`N5 `hގ`G%/b@]*[BcgX!3w( [n3]BvۯB`נYUT%5-_8`xyPS?P hS?A;@+0`f3~_& 7'iW;t_ S{?bBGR߶"jZ9)X 7 ry-T@s뿖?F9U._ 3zЇ thR`_0uv6Dc77z@@M }`g_Jy%.b]Ź8~6\l989h {?5fB/?V%nhls ]̯Z!BCy~IN=MVi;4Ko _jPoPկWO 4-IBah t@<V?DhSVdk9~m='vC+wt(C j}8@A{]gu\n8ڹNZ+0PCm]G;S?HB,kgc qa{!By!B;;ouЖ@z^B/?_F h67 6|gSJ؝ykGɕ -vm 1WO*!RQ,qɆԾV8A=e$$Hޓ=(ԥz'uRF"&+/GUޝǿ ,k*4M&04]aV^ t8&d ČXWmֺhWڝsWs ~yUD\%JJO q|)l.<2N`#=ZGWo<tA&޿ɕ Վړ kn<$|ے32#DսJ셬bՐ*92#:+KvL1'9aK}6OJZC#qv.JkP藍l3vDq1DpΟm0P P<,d<= Vީ~pt=! ۬IlγA< Qdfb|VCNP q<eTڜ+|ڸJ2LzDq\L} 2: fzZ0-l~,*hD]CHh$<F DIrh=?ؖ˭woЏ>BپmqQ"SBp,mU'd !5:ŤKUj[[:1/Y}[^V-V\DΝo/kR* P+aXeh5nU %9r2S 32yA\P>c!6+`^wԪMجxT<2LTshw) ll-=1ARR5&tnI;c<^,bASqհQn5!@ CseVen~AF:dW޷I(mѥsKu_ d_(Do{6Yۖ)BOS8n9**y:Wjmn!ixR*Y5~=ЄҌ0: wL G*oRP\u,\;-$7خe!{tp^lD)>Kd)>ߵcio<"l.&iٺVޛZ7hTJDSS(YG™^"Q^!%}g"/#dtmmg:݄+=ukʋ_Ҋ<2QtX^κܨ<Ҍa!s})jUN%t.?JŊ\dx}Z^밦N  E'%F;1ǻ-/.+~E<'¯# xI$?1^rqӧ=V|Pq퇏cIXbDi!Ҵ;\_Ti.`n2‡X*`tOy{Hg*$&?N |Kf: 88f1֡/GREϸ@d7=F|OJ17m̰m{Yˈ D~b/)cE;@b%+':J;Ad,!/}ߍzLsͲ3G|UVSG̻]Չĩ$Uͤ0XB8}36D4nCjs<^$Y@:7!aX8Ak b=ZZ@f('A`{4oi 01!q {V-Do_!nVm,nʭxRO0.}pG=|w@g">{Iw]s&|yxoRn}ObzC08Zqè,AZG1{yw_T18

?nT%̊`J.p ꖟZtr(lwe.j.~>`+f{#ۀA*aB0uONS;b =y_{8L;K0Ueb&0+^¹9sM_JV]Dӷ@XϠ20E+ŧ"5Azd|T@@25,9;2,3( av!SmNQZfDcna#oML_F#gQ+xD&B*F;T]F)H;!ou׫UGYT$zNU,#k^y3]#S͢ "}vk#@DlfAW{J 'CC+y'`zX;w;oiZֈ X.@z؈a$kmOky;Fd @fjk/=9<.,T"մD==Z La @>W8~m# pşDӽ;V?}5_-zjeq`}#-Ǣ5l!jUxc{ HP5X; Ω'uSq+Wǫ/ub?b_i kFW"Qptu'mϗ{Iv/,Rcr}w XW0q̂, R@~ vy  b[G@s+>H3q));ު|I\ż& <)UʵL8:rK0FZPPƿ]Őlzƞ O;da@D1q\yJq̩0n.5uK/Cɜ cmU]&h6nEۍi0 ֟ӏCvSjeΤOlG%}zSC(ktB<>5_ cq8YjOa;} f}q5fm3(3!I&F!-V^7zwU@5/ྪ$Glz!fr#ςMVRƾFEm/E[f|W.̠HXC|Lȝti C7 (YUUN0-2g,kCRS+E{}{Č!c}M-\T~-A5mjVMjCoFTCdA&h\}˿<u{I2cy6Pj !aΐ6A>of ͕3g y#94##36QvwniG0b  ;@)f*#l97"W4Y;W_[cy= ,SE?s;3bNd熩<61ao9xqa:fy띰˅4jS?;~G\#懱^I#mF۵ѱ=s'.|'"P]+$IIPW3+D+1Uxގ|E2q+ Z6z"%Rq"VМ߇̞[( V#Z#~i[⽺ř0mK_a^?q+Wط3=t}BhRi#V`>QPOfUu$?u>GXNIu΄L<'!jۼӷ9φ_CHj{^*1Nڇ=='`xftۗambMz%?OKXal6͗C>0VŕhT}nݐ%44C0>*+: zVӊ J%H2<}qv#V"N9]c30/D`Vy$sF+rz`ask1*zj/!f\,X ;HOcly9v&VbrRжoU q4 b-w`'z!PT UaXWC|}^ O>,1 ץ{IЯkqFL-~Ë?6 ȟr@Hf?}/(b\IC ݅I&yƯX){‰y1_~RIڍ^G{cM1e[ґ"VJe{n*,RSSs%|>JݣYĢw~dL.'[w]/Z(}P(e5YIL23sᑔ566%zTF^zMr`ctLtOHh g b>DN"&bX`В Z*Φ ec vu,HHrkv+Y>]tVrRE2RJAk1B=O6+?mلUmHF#E. ynL7lW6&u}&MV՝ءo]Rv|m{Lٴӷn3Vo -9"Gܺ&U+84-i_m\1WURiQ!>R*&\? cL1?߄^?&t%4a|@L쨘yޘg-l1A~*QDSb[|&I}|%h%pTinfUyDH?Shf-w, w`ґA_bb[ƼV[QXWy/FtPUKt=5--4,zú UB(= Tq_hfEyH3/?kAeզћuitï3ppU$9 \rmiL_7Qu_1>/p6cFuÂXDyKi 6W7[I*+'Vpf0m3{>`ͪAx+/,MF rLW%7N+J3H z-TN(1v b2vC.iqKKӨN烕J\T{"ңNh Q^S΋&+"De( R(Z^?%hTcIc*w\~+d/؋Z 7DsEd/:dO9&@JFg&.iujr1`JQ"n5 6aan#G@qn+}KAd3fE?$71h'9fom +* #ᨮ꺃weޤh)S%dX|ڟ*e6/0|K \hqNHmɳ'8M]gw*}楇/"@pi3[|59?>OyaL*bOlPZG%͗8.pަz8N4||6y~; vC8tة\hFfv"sEF{bЍ;GsJ[ޓr/k*^1·%0Q|xoɧ q<^Ҷj[懎_x&8ܭ_xHCG>$BB~E| ADU8E4ޘvW;6Ќq bb?uzk-C.[q= FWk!_<JS2Μq6,c1qb; VUܯqĵ p. O{0Ukuda(ỲyO;YN ;yޙOo iGY۴G+z΢7{͛d_;ONJ6<\]iH5 .5i*Z=>u:=XQОmFKm[׏a"0.x#W ҬgNoWbh`Dh ^4-МFuS].%C6.hN96cJg4a4RT*' cVL:e^N \}QӅbq  2}4Y-֯O{ h^暘`ɰB>Ⲉ>qKK]9D6Fc*_ `諒dziPjHG\\XB+``^M[@^ 4_x{)&H##1yBÅp$b\ecQ2a;DtQ'D{wB0 b|%r3DD_xT_SC帲C|C K׆GROJC3b:.fa]3$xc=nmv ݖw҂q2`]-l2º> [[|zZR\ςK+4X:mWhDYq֪!~ڝ]/4GcˈGelK4JZ"hz~ɻ/b +0x3cD=ILt"L믡FGFS ߑ݌hh]R;dGLa\vlpl:73qs*ijBJ;3hF]"wKL9WdY<\t8%tqjÂt%+*&aLO(b0j΅:4:?NoHdLd`ڤ7t8_'7} þ<sW1k / llucusCfCo]V!ڴGru Uϕ^gbrA6pkx5L"$:\sZ@( WZ-A*nbA!,|1חuٓNyUFu٣}M+VG">龨H j!`;[PԤ"aw/`,- xnsYEM_xy^EiԠiIc穷?7.^aZA ΓW5I'V#AOi{k WӕYr8^Ĺ?mEc3h~ .K O V=̅ԵnfuaK?4mFfT2;Ӝv<=U#X.^[u3Ԕ s>ܪrK##)XoI)&ٍk[4_絤TKW`Yv7s" Oz b>@,j*». BI|C+rHiDHTM`\`lܰKw> I-4X5,>QuoI OLjeW1sSa5c$ E80!4%vE&R5[kf?>XIWU ƶ=ɨau՚3VQgFZAdzT#ܡI'SDĶ^ VqmzgX[a%~ŭŊ0=/qF3$Kw"Ҋhq:z8b.'řEq=S^$YF G ӏ >H㉗~u6<7XL[#m˕iN;v2\a .dyJEʲ/u'XQx6 +*Ò_Jzb,[(q]#%վ E. <a[Me,5wd}BFGAϣ(}x߽ou)ZByΟ_awIR$Mlc~UJn⊔S. N>afUuӴke>lờ&~V2Wwv;UzhE'=S;prnH43 }=JÆ AFCAXxA`޵]NrU_?Â[!_~c~ h.+` +D0o^SC៩ .Qvt,~hh]|dFQnD6&غAVKe?96d|EkV1CN~n"/8GFZfx]z|-E!A\ra=#RlnNx(mYj$~"P4-X[ֲrb$m*s|je} b\oUT8(-$wjRtXU5'FþY`2@@^+׃iV>]Lb+[~ UIjŨٽͮ׶>Lj~4lgKPqDt'NGJǢVC J/i mDSƤ1XY m͐R?H)xn2_n䎷l!l: ~ %SO.4Op{!gPl}.6D)&%߲FGjN,-#߂?GR(kUl)tj%00T/\|Aӝ'uVq&tzwC#PciZϲɥcjTZ>^={.4 s8u>g!!87X>Y`dVXR%G z80/Q{ N:z-o^Rrcaog8r"Eg pR!iY!ˋ˾| -Mm"ԯo;W\/ǛN;PGCMv?%у-:\VV=_VPaK[Xlvz <q5+{uMAqYDֆ~(Gd"˅I-]{zܱf>1uP:MSצi^U_vԔ-c}L_nO#ς)uı[衢!sI@֛)^wUAߖwШ"غҺlZ3?ЙALOhQIƠ> bp?W׾À!%}8sJGCqw]F endstream endobj 20 0 obj << /Length1 1757 /Length2 11037 /Length3 0 /Length 12155 /Filter /FlateDecode >> stream xڍP-C$0 0`kp 'X>?bv@' `ڽD43ٛrA+h8𳰸193;Y 1@+* 4.hb4fd*O x؂̀`9 &+Pr$I`u96f#c33{;l JRw#lhblboj51}! @JL`R_99 5vrvv@0w~ '˽{\od[.ŁE rtJy!#B\#nf;%oK >^2> +qrx[ `2L 0?_@?K@=֗cd2a`[EBFW\Kᯒwx1qpعXll ]Re_ˣ,g/w!M_kC/ 3\f/%o{f$bkOGob2.PR.bZYˎ-_有O9Y 4WA̬*;~;/V{Y=3e/q߁ ;7e^eẂ8ly1wBXn^o "`1X89_z=/ oA. _%ͿKt_ XImlIӿ/u9C榶ac{Q9 / `7%}kvW\^2f/@3Y{3 ֛J1"7_c윯".6nHޞsDΉuNoEޮb>7hNǛm0`%t,¾%ѡk6;nTv!:Exk&}WZf,* //*LPXz[xt,a[۔avbFhZ k=;܄rST\}5uQȐ~&s%j(3 ۗ +C~J쫻)ͺ}das|z'MqwI4+޹WRA_1' ՝3RӰν~M t1S hh9`Rڃ.BEqzOOK=~4"MgiaiDzUuD0c~i?v߫5i$;UV.Mv}eMԋ:eq,rb3bix89F&C9-6D+9FՈElJxcchiWX0wOjŽ5 5rj٩_5UbEwءJJ9]r nj}]|s[I6r.;~TQ*"Z0bM}+ ky-M2ڰ_t!k\4kkq D6E_}D%sGWUrlwFە^sԾ2<as )$/BL?Jzu~?~۵TCy{Y[u6r Ɂ OamF;VeT eVBN,Wu PPn nȔ-rJZ4oh9i\9b>g7THB?/#yɌ$]=4g)1by!}j _P>g O>ۯ(:̋k |K2$RL6A(&}#oȃ?(nMZ)A Ps`9e2&7@sssGc8Q7\BAŒf878lr+ sISU wdls- xvJϵ-Izn}\}YvqG 6iCKE[Mӽ ;\; kJXlEAsMN,nQIBCrRb";gɠ7rN.bD9⛛Gͼ >+ʋò4Oﬖ`I[dG9>24q0whIH bJA%\;TTNRlWP]Mc~>m|, -g9LwRcsٛDJ'^nꯖ=)sMqT$=A52 \26q5,d1F*ׯ B$F૚q걂6ة5,.iO0R4̀%v S-^6O) \ٹŚ-1orwn>[}my*.r.ӾW{/5&5=H tUwWwbv; D$iOQc7;v'=^a梢M3SB[|"S2]杷. F#;MsD~$Az9IaYCY~i* 0X>_BJ5X"yZ94r7Ó nŭuqCe}SQ g=SæP}VW&JSHݲ@~̓Oذv㡊V4jpbiƗ SYJV 1/3["$%GډNuXQf&lp̵n# k (&cK!B|re:dw!~4Yw8"RD(S\)]|űj/K8Pfmov[[JeFkC|sd{Xэm덉ZTRr*@Bˮl j |=U5}l+0ʟ1ƛi9#w 1,Y@ ļ>>ZͱMx˚͕&m.w-DOfxP3z<4H'^)u"l={ޤ3- 0# 6~BΟ;R v{N xE8*K)nZJJ@tF1<92xG=KD[6 R!vvIua^zX}}[t#vZj n \muSNZs^;Yekpȏtlݙ{%5L& #ݺgFϷ};+s2+kB\ v&Nl?7*E%BiQ 3^CמU YC"G X^353? %-nV yG@8x[(Jb2oSbIrFa*@$+s4URhzhQgŧg~\? A3*#)6fcWkG^ Toa1Pv p\%Ĭv(iulV(TDt]߰} cŻyRy*_У}׃zCh@Ѻq|-f w ){@IB"Rdis<ȪOyPwOG@yc3q"T<ظ{?1݃FNGN3.wk)=ScQ;$Q[ӟSܾR&qdb]*6Dѭ9Yۛ븗Z=xS<meF]4ߌ`ޕg?/=GgN~ o}8<){b#Rߩ߭(z>I1&fI)䄈 # [J Pzۛ)-a;Q*?pVٮf=b(ïsoԹz]1.7PNۄ)F[0H î&ҿ}(>01z4PDшgY #y4"3 uݿ$CXF0 %Ej#ۦ+>!w3Yn_{:eNqsJx$m;t}Ō8v?:%*#-ONNGY%RTgk3pqGC<`m (Cx jU%#c`w~x.x YCiӷ$fi%%=emBpUOZnd9q`~^Y.7W6 Ϥ t-c5p//#^ yfhpg]z|nq t)qt ,dTYQm3IY"oB_>u(zw9pqwA?rBeA`'8/v;3!t{T%SRP{ՊFI%w#<1Dg'qM%6x H Vl%}\chݴ@qJT$Z4N)ub]}uUvS$l8/MH#;|WMA5v߷S;ꚾTRu3Fq_bfLʝ=B.LŬ*<*.eOz,'\X4(- S{Ov=EaT2eZzn yu{6 0g::C%+UQJd:j%vM_RRKN҉jN[>mUX޲A^YVc i޼$3Кy ɖ +Ƶ"1y!(A-_gI.`t/MABL_" _;$H([H\Zb69L -iuD<ȝpޚqB簯XN1,-j%i;E"ڞuv&أBa2S~J aGp04bVSF@AE(u.Pʿw,a'BP_!RMm6N{?-ջPE?La4=vEI\YsFP}0 ],^19]+$w_362Z7]@;PZ733ӟߤ?Y{?? J9oK_K.Lgxmz~kt4CDž?aofHt)jZt3\uD)V*>ܽӻL4` A}:/:u \ep_CqDj;&lZb(pR H")clF|s`jGEb(eqF 4nh L92H{U}5]θ/DM<ەgĖsA 3Ah츥3nJc\!Fi6–@ (w2+Xa;]bTUUɫ> /;cj9*c~16Y@lٓ ޻ρ&j^C*[Gv<{L]9GXeX8w\u.1Ж{du$OD.:ٱvq/(>Uk崴 .pׅ_2O4z.k%"h6/U֫LE#bIU2#z8f\ZRQDj#Xaa#:wZ\9·:$P#t0>觲'PZ8UF<'ohc}rhn5z4x]NVPΖ:,ſC1[c[fvEhch8FE[o 4VK6pτZ5+j;im zo8J  Qk7pC`ڿ=c1UxPcLiP+>ZZ8EL:qM͙fl3Hg#Nܦe9{۞I{ft_i{P 8ݐ"YJc G loQA?)rCm;`lCO9 sdJ&/{va@LA64\⏬?Hr ?硗 Be*T+Mrtb`bږ=e2.j1q |yIô^3WUðۍui&68j:uZ+x6 EASمfcW-Z:Yt YeV,&0EF쥡ϙh͛d ʔhR"H|3KhʻRʈ`Ψg9S$iWAd:[}G[70 W#1pKcU~iVKd*_)S&p5d)HzLng1Fkυibm#x0a`+ Vw N@E]o|5| X =*(dfF i.O {2a fۘ6J8;Mfc.+!P޾m?R^{pE5m5ƢZ<݌È|s?_R *L=)`ޥ@1l9:LƁW{owi[ie{m."(>{D4Uzs.;!~*(3,END9uCKCOS^4"JOi%e)IoChamemhS + c ɺ(zTHn&Mbh$:tFǭo|{IЋyŌ1jLA'Sq3ɗ$_`oY +j/ ddCn_&ܠ0=\D dÐIZeHNqA})%Yo'[9nD^N\uU% 7[sAH 9O@1= a22.D#ӥ!*63!VWxGY۠?crz`¢/ F5kD|bu>S֙a al|SbE2GE.uM&Yٛ*yjdt/\`IJnj+4F>k,wIW WHRmT8GL2OaK filǸy9}[\B2pjRAiU\@ {Au!U XMg-F`N`!aۨX[a}bsiyQ Ζ7mC'mΛ )=GibhTI> ]FYH4;Ah6=&m9^{\+}%,Ӣ,Հh+g;$pt5xLe~f8fu彿$f^/?m,cCف)S eAtmH;|aϬ F4|T36/׀ ^jL۳P3!1kvjMHOk¬-j{7ʘBCi 8_d_`Ooў(9E<> SsrAd1BuG`Dn4?A>ÚVjl~R/U]$l}a MБn:c?e9jH'Pny!mFA8)$TYNX9vpJ5|SWcDlW$bɆҾT"[ h "ཧb,N8aUgu=PSVsl{'^MS;HZF;`%V#e譞FJg1[.OeӊP>=Zgai"RnhvܫWnuI}I;:e-ԥQCN_}p @KqS3*Z^qn5.ЎZ~Dvr-M1̫f{o|FJׯ%ڍW/yf*["S& )5.؟4T7a$>qNض`7y)_-;^́ ,UI|kS,XDZA8AWk35{2//sMØ^W{"_: Q$ٔ8"?lF}n{" 0i)r>Sx~delaUJиZOUsD'1*x?l9GHwtX:{ W=O1rWb3urS34DP6xJp lb"!ޡqL-2AoOeW7/\ijOr#8;Nz"b=2K{ۚߺ]D*|ȔJ{(ݟ];ӧ̅ߢ 00rlf:*|uli^(\fy۪ëu[0CVoUh{ЬX_~ ,׎*Lk1h3?GzY33c*8:n-N_89{mTįKu=u9" M1Jdu8!X0_$(Ca}c5\yA0;ul)6UO_%"4>#m`@MJGaS/>>9֨ޤ"cyMؑgޏniƠ6O30:>W Co@t *t:q{E|7Ɔ )6> +"RJ!wRIsU,<=+?;(jʗ|7}&^B xA[ V $Pa p?{O}?S.Z2}UkB`Zw"³e;GZX\. KVcbE\p!y6|qo`A7U+YQmcupvwR!g-t79 G*4xD^!O6(m$fu HFJC: (+1=Nҡ7 5ʅʏHzh3z28 f2¢Vz?п6y'[RjU<\,MZ=໾$nmiAU/rpE,ӿu*Gz٣~AKJDy6RߘCF_ +aɥ3%+ yl%'!#c)?Z&-MV{ ENTۦ@'_"DnQ:`2|O4^:Bnhx{p*616(OCwJy( ]GB\vkn⊲ Z5MF(h̛5n{/l6>M"{#$`N,4O;9taM~BrNj E8> endobj 8 0 obj << /Type /ObjStm /N 16 /First 114 /Length 1266 /Filter /FlateDecode >> stream xWO8_;o[B+QP` xisM7IwamZ`j{:qN8I#Fƈ֖0 0a&p.l-=ID QDpΉ&BHM P -~a9oP{| qƙ|$7 04fzz`޽ `:wWW")ć$hU]@PGCBG!hF1ٗ2ӷhg+Z!GؾkGtWBH՟[1D/IDeCAXj1#E hc>X V~;Fi(FNY"vQ9R'YZWr(bb\]\=V"HV g M;#*IDh( V6cT{!.\B VbOбg uL-/֡@#t_ka _Wx|uT`LJ&]Z$a$Z#miYKG ppluVc-H]G:) 68q DBd& GECQ` `T/.mT rS-mX?c9Z9t̘-Gm*jR2RR#Rd<3i1yA _כdp-ŢXӲlByi=mmJܨݥ^׃Kj 60gxUw{د0OɇWY O^[5aLh>?;6o0̫4fn5~||m~ܻmbӽZgnM}9k/k\+pg>MvS0.'ӛ endstream endobj 24 0 obj << /Type /XRef /Index [0 25] /Size 25 /W [1 2 1] /Root 22 0 R /Info 23 0 R /ID [<8B3D41E87D608B2D7BB6C9DD83ABFDBD> <8B3D41E87D608B2D7BB6C9DD83ABFDBD>] /Length 81 /Filter /FlateDecode >> stream xȹ 0]É% )2br+ʁHs jQ&d{8iOWk:H/ endstream endobj startxref 55156 %%EOF numDeriv/tests/0000755000176000001440000000000012032321533013227 5ustar ripleyusersnumDeriv/tests/CSD.R0000644000176000001440000001216112032113564013767 0ustar ripleyusersrequire("numDeriv") ##### Example 0 set.seed(123) f <- function(x) { n <- length(x) f <- rep(NA, n) vec <- 1:(n-1) f[vec] <- x[vec]^2 + (-1)^vec * x[vec]*exp(x[vec+1]) f[n] <- x[n]*exp(x[n]) f } x0 <- runif(5) ans1 <- jacobian(func=f, x=x0, method="complex") print(ans1, digits=18) #max.diff1: 3.571277e-11 ans2 <- jacobian(func=f, x=x0) err <- max(abs(ans1 - ans2)) cat("max.diff1: ", err, "\n") if (1e-10 < err ) stop("Example 0 jacobian test failed.") ###### Example 1 broydt <- function(x, h=0.5) { n <- length(x) f <- numeric(n) f[1] <- ((3 - h*x[1]) * x[1]) - 2*x[2] + 1 tnm1 <- 2:(n-1) f[tnm1] <- ((3 - h*x[tnm1])*x[tnm1]) - x[tnm1-1] - 2*x[tnm1+1] + 1 f[n] <- ((3 - h*x[n]) * x[n]) - x[n-1] + 1 sum(f*f) } set.seed(123) p0 <- runif(10) ans1 <- grad(func=broydt, x=p0, method="complex") #print(ans1, digits=18) ans2 <- grad(func=broydt, x=p0) err <- max(abs(ans1 - ans2)) cat("max.diff1: ", err, "\n") #max.diff1: 4.977583e-10 ##max.diff1: 9.386859e-09 if (1e-8 < err ) stop("broydt gradient test failed.") h1 <- hessian(func=broydt, x=p0, method="complex") #print(h1, digits=18) h2 <- hessian(func=broydt, x=p0) #print(h2, digits=18) err <- max(abs(h1 - h2)) #print(err, digits=18) cat("max.diff1: ", err , "\n") #max.diff1: 9.386859e-09 ##max.diff1: 8.897979e-08 if (1e-7 < err ) stop("broydt hessian test failed.") ###### Example 2 sc2.f <- function(x){ n <- length(x) vec <- 1:n sum(vec * (exp(x) - x)) / n } sc2.g <- function(x){ n <- length(x) vec <- 1:n vec * (exp(x) - 1) / n } sc2.h <- function(x){ n <- length(x) hess <- matrix(0, n, n) vec <- 1:n diag(hess) <- vec*exp(x)/n hess } set.seed(123) #x0 <- rexp(10, rate=0.1) x0 <- rnorm(100) exact <- sc2.g(x0) ans1 <- grad(func=sc2.f, x=x0, method="complex") #print(ans1, digits=18) err <- max(abs(exact - ans1)/(1 + abs(exact))) err #[1] 0 if (1e-14 < err ) stop("sc2 grad complex test failed.") ans2 <- grad(func=sc2.f, x=x0) err <- max(abs(exact - ans2)/(1 + abs(exact))) err # [1] 9.968372e-08 ##[1] 9.968372e-08 if (1e-7 < err ) stop("sc2 grad Richardson test failed.") exact <- sc2.h(x0) system.time(ah1 <- hessian(func=sc2.f, x=x0, method="complex")) #elapsed 4.14 err <- max(abs(exact - ah1)/(1 + abs(exact))) err # [1] 1.13183e-13 ## [1] 1.13183e-13 if (1e-12 < err ) stop("sc2 hessian complex test failed.") system.time(ah2 <- hessian(func=sc2.f, x=x0)) #elapsed 2.537 err <- max(abs(exact - ah2)/(1 + abs(exact))) err # [1] 3.415308e-06 ##[1] 6.969096e-08 if (1e-5 < err ) stop("sc2 hessian Richardson test failed.") ###### Example 3 rosbkext.f <- function(p, cons=10){ n <- length(p) j <- 1: (n/2) tjm1 <- 2*j - 1 tj <- 2*j sum (cons^2*(p[tjm1]^2 - p[tj])^2 + (p[tj] - 1)^2) } rosbkext.g <- function(p, cons=10){ n <- length(p) g <- rep(NA, n) j <- 1: (n/2) tjm1 <- 2*j - 1 tj <- 2*j g[tjm1] <- 4*cons^2 * p[tjm1] * (p[tjm1]^2 - p[tj]) g[tj] <- -2*cons^2 * (p[tjm1]^2 - p[tj]) + 2 * (p[tj] - 1) g } set.seed(123) p0 <- runif(10) exact <- rosbkext.g(p0, cons=10) numd1 <- grad(func=rosbkext.f, x=p0, cons=10, method="complex") # not as good #print(numd1, digits=18) err <- max(abs(exact - numd1)/(1 + abs(exact))) err # [1] 1.203382e-16 ##[1] 1.691132e-16 if (1e-15 < err ) stop("rosbkext grad complex test failed.") numd2 <- grad(func=rosbkext.f, x=p0, cons=10) err <- max(abs(exact - numd2)/(1 + abs(exact))) err # [1] 5.825746e-11 ##[1] 4.020598e-10 if (1e-9 < err ) stop("rosbkext grad Richardson test failed.") ###### Example 4 genrose.f <- function(x, gs=100){ # objective function ## One generalization of the Rosenbrock banana valley function (n parameters) n <- length(x) 1.0 + sum (gs*(x[1:(n-1)]^2 - x[2:n])^2 + (x[2:n] - 1)^2) } genrose.g <- function(x, gs=100){ # vectorized gradient for genrose.f # Ravi Varadhan 2009-04-03 n <- length(x) gg <- as.vector(rep(0, n)) tn <- 2:n tn1 <- tn - 1 z1 <- x[tn] - x[tn1]^2 z2 <- 1 - x[tn] gg[tn] <- 2 * (gs * z1 - z2) gg[tn1] <- gg[tn1] - 4 * gs * x[tn1] * z1 return(gg) } #set.seed(123) #p0 <- runif(10) p0 <- rep(pi, 1000) exact <- genrose.g(p0, gs=100) numd1 <- grad(func=genrose.f, x=p0, gs=100, method="complex") err <- max(abs(exact - numd1)/(1 + abs(exact))) err # [1] 2.556789e-16 ##[1] 2.556789e-16 if (1e-15 < err ) stop("genrose grad complex test failed.") numd2 <- grad(func=genrose.f, x=p0, gs=100) err <- max(abs(exact - numd2)/(1 + abs(exact))) err # [1] 1.847244e-09 ##[1] 1.847244e-09 if (1e-8 < err ) stop("genrose grad Richardson test failed.") ##### Example 5 # function of single variable fchirp <- function(x, b, k) exp(-b*x) * sin(k*x^4) dchirp <- function(x, b, k) exp(-b*x) * (4 * k * x^3 * cos(k*x^4) - b * sin(k*x^4)) x <- seq(-3, 3, length=500) y <- dchirp(x, b=1, k=4) #plot(x, y, type="l") y1 <- grad(func=fchirp, x=x, b=1, k=4, method="complex") #lines(x, y1, col=2, lty=2) err <- max(abs(y-y1)) err # [1] 4.048388e-10 ##[1] 4.048388e-10 if (1e-9 < err ) stop("chirp grad complex test failed.") y2 <- grad(func=fchirp, x=x, b=1, k=4) #lines(x, y2, col=3, lty=2) err <- max(abs(y-y2)) err # [1] 5.219681e-08 ##[1] 5.219681e-08 if (1e-7 < err ) stop("chirp grad Richardson test failed.") numDeriv/tests/hessian01.R0000644000176000001440000000425010412646041015152 0ustar ripleyusers# check hessian if(!require("numDeriv"))stop("this test requires numDeriv.") #################################################################### # sin tests #################################################################### x <- 0.25 * pi print(calc.h <- hessian(sin, x) ) print(anal.h <- sin(x+pi)) cat("error: ", err <- max(abs(calc.h - anal.h)),"\n") if( err > 1e-4) stop("hessian test 1 FAILED") # 1e-8 with d=0.01 func1 <- function(x) sum(sin(x)) x <- (0:2)*2*pi/2 #x <- (0:10)*2*pi/10 print(anal.h <- matrix(0, length(x), length(x))) print(calc.h <- hessian(func1, x) ) cat("error: ", err <- max(abs(anal.h - calc.h)),"\n") if( err > 1e-10) stop("hessian test 2 FAILED") funcD1 <- function(x) grad(sin,x) print(calc.j <- jacobian(funcD1, x) ) cat("error: ", err <- max(abs(calc.h - calc.j)),"\n") if( err > 1e-5) stop("hessian test 3 FAILED") # 1e-8 with d=0.01 #################################################################### # exp tests #################################################################### fun1e <- function(x) exp(2*x) funD1e <- function(x) 2*exp(2*x) x <- 1 print(anal.h <- 4*exp(2*x) ) print(calc.h <- hessian(fun1e, x) ) cat("\nerror: ", err <- max(abs(calc.h - anal.h)),"\n") if( err > 1e-3) stop("hessian test 5 FAILED") # 1e-7 with d=0.01 print(calc.j <- jacobian(funD1e, x) ) cat("\nerror: ", err <- max(abs(calc.j - anal.h)),"\n") if( err > 1e-9) stop("hessian test 6 FAILED") # 1e-10 with d=0.01 fun1e <- function(x) sum(exp(2*x)) funD1e <- function(x) 2*exp(2*x) x <- c(1,3,5) print(anal.h <- diag(4*exp(2*x)) ) cat("\n************ d=0.01 works better here.*********\n") print(calc.h <- hessian(fun1e, x, method.args=list(d=0.01)) ) cat("\n relative error: \n") print( err <- (calc.h - anal.h) /(anal.h+1e-4)) cat("\n max relative error: ", err <- max(abs(err)),"\n") # above is 901.4512 with d=0.0001 cat("\n error: \n") print( err <- calc.h - anal.h) cat("\n max error: ", err <- max(abs(err)),"\n") # above is 0.1670381 with d=0.0001 if( err > 1e-5) stop("hessian test 7 FAILED") print(calc.j <- jacobian(funD1e, x) ) cat("error: ", err <- max(abs(calc.j - anal.h)),"\n") if( err > 1e-5) stop("hessian test 8 FAILED") # 1e-6 with d=0.01 numDeriv/tests/BWeg.R0000644000176000001440000000617110412646041014207 0ustar ripleyusersif(!require("numDeriv"))stop("this test requires numDeriv.") Sys.info() ####################################################################### # Test gradient and hessian calculation in genD using data for calculating # curvatures in Bates and Watts. #model A p329,data set 3 (table A1.3, p269) Bates & Watts (Puromycin example) ####################################################################### puromycin <- function(th){ x <- c(0.02,0.02,0.06,0.06,0.11,0.11,0.22,0.22,0.56,0.56,1.10,1.10) y <- c(76,47,97,107,123,139,159,152,191,201,207,200) ( (th[1] * x)/(th[2] + x) ) - y } D.anal <- function(th){ # analytic derivatives. Note numerical approximation gives a very good # estimate of these, but neither give D below exactly. The results are very # sensitive to th, so rounding error in the reported value of th could explain # the difference. But more likely th is correct and D has been rounded for # publication - and the analytic D with published th seems to work best. # th = c(212.70188549 , 0.06410027) is the nls est of th for BW published D. x <- c(0.02,0.02,0.06,0.06,0.11,0.11,0.22,0.22,0.56,0.56,1.10,1.10) y <- c(76,47,97,107,123,139,159,152,191,201,207,200) cbind(x/(th[2]+x), -th[1]*x/(th[2]+x)^2, 0, -x/(th[2]+x)^2, 2*th[1]*x/(th[2]+x)^3) } # D matrix from p235. This may be useful for rough comparisons, but rounding # used for publication introduces substantial errors. check D.anal1 - D.BW D.BW <- t(matrix(c( 0.237812, -601.458, 0, -2.82773, 14303.4, 0.237812, -601.458, 0, -2.82773, 14303.4, 0.483481, -828.658, 0, -3.89590, 13354.7, 0.483481, -828.658, 0, -3.89590, 13354.7, 0.631821, -771.903, 0, -3.62907, 8867.4, 0.631821, -771.903, 0, -3.62907, 8867.4, 0.774375, -579.759, 0, -2.72571, 4081.4, 0.774375, -579.759, 0, -2.72571, 4081.4, 0.897292, -305.807, 0, -1.43774, 980.0, 0.897292, -305.807, 0, -1.43774, 980.0, 0.944936, -172.655, 0, -0.81173, 296.6, 0.944936, -172.655, 0, -0.81173, 296.6), 5,12)) cat("\nanalytic D:\n") print( D.anal <- D.anal(c(212.7000, 0.0641)), digits=16) cat("\n********** note the results here are better with d=0.01 ********\n") cat("\n********** in both relative and absolute terms. ********\n") cat("\nnumerical D:\n") print( D.calc <- genD(puromycin,c(212.7000, 0.0641), method.args=list(d=0.01)), digits=16) # increasing r does not always help #D.calc <- genD(puromycin,c(212.7000, 0.0641), r=10)#compares to 0.01 below #D.calc <- genD(puromycin,c(212.7000, 0.0641), d=0.001) cat("\ndiff. between analytic and numerical D:\n") print( D.calc$D - D.anal, digits=16) cat("\nmax. abs. diff. between analtic and numerical D:\n") print( max(abs(D.calc$D - D.anal)), digits=16) # These are better tests except for 0 column, so add an epsilon cat("\nrelative diff. between numerical D and analytic D (plus epsilon):\n") print(z <- (D.calc$D - D.anal) / (D.anal + 1e-4), digits=16) # d=0.0001 [12,] 1.184044172787111e-04 7.451545953037876e-03 # d=0.01 [12,] 1.593395089728741e-08 2.814629092064831e-07 cat("\nmax. abs. relative diff. between analtic and numerical D:") print( max(abs(z)), digits=16) if(max(abs(z)) > 1e-6) stop("BW test FAILED") numDeriv/tests/jacobian01.R0000644000176000001440000000250110412646041015263 0ustar ripleyusers# check jacobian if(!require("numDeriv"))stop("this test requires numDeriv.") x <- pi print(j.calc <- jacobian(sin, x)) cat("error: ", err <- max(abs(j.calc - cos(x))),"\n") if( err > 1e-11) stop("jacobian matrix test 1 FAILED") # 1e-13 with d=0.01 x <- (1:2)*2*pi/2 print(j.calc <- jacobian(sin, x)) cat("error: ", err <- max(abs(j.calc - diag(cos(x)))),"\n") if( err > 1e-11) stop("jacobian matrix test 2 FAILED") # 1e-13 with d=0.01 func2 <- function(x) c(sin(x), cos(x)) x <- (1:2)*2*pi/2 print(j.calc <- jacobian(func2, x)) cat("error: ", err <- max(abs(j.calc - rbind(diag(cos(x)), diag(-sin(x))))),"\n") if( err > 1e-11) stop("jacobian matrix test 3 FAILED") # 1e-13 with d=0.01 x <- (0:1)*2*pi print(j.calc <- jacobian(func2, x)) cat("error: ", err <- max(abs(j.calc - rbind(diag(cos(x)), diag(-sin(x))))),"\n") if( err > 1e-11) stop("jacobian matrix test 4 FAILED") # 1e-13 with d=0.01 x <- (0:10)*2*pi/10 print(j.calc <- jacobian(func2, x)) cat("error: ", err <- max(abs(j.calc - rbind(diag(cos(x)), diag(-sin(x))))),"\n") if( err > 1e-10) stop("jacobian matrix test 5 FAILED")# 1e-12 with d=0.01 func3 <- function(x) sum(sin(x)) # R^n -> R x <- (1:2)*2*pi/2 print(j.calc <- jacobian(func3, x)) cat("error: ", err <- max(abs(j.calc - cos(x))),"\n") if( err > 1e-11) stop("jacobian matrix test 6 FAILED")# 1e-13 with d=0.01 numDeriv/tests/trig01.R0000644000176000001440000000330010412605401014453 0ustar ripleyusersif(!require("numDeriv"))stop("this test requires numDeriv.") ################################################################### # 3 test functions to test the accuracy of numerical derivatives # in "numDeriv" package written by Paul Gilbert # Author: Ravi Varadhan # March 27, 2006 ################################################################### options(digits=12) ################################################################### # asin test ################################################################### func1 <- function(x){asin(x)} x <- c(0.9,0.99,0.999) exact <- 1/sqrt(1 - x^2) # With d = 0.0001 print(g.calcS <- grad(func1, x,method.args=list(d=0.0001))) rel.err <- g.calcS/exact - 1 cbind(x, g.calcS, exact, rel.err) if(any(rel.err > 1e-10)) stop("trig01 test 1 FAILED") ################################################################### # sin test ################################################################### func2 <- function(x){sin(1/x)} x <- c(0.1,0.01,0.001,0.0001) exact <- cos(1/x) * (-1/x^2) # With d = 0.0001 print(g.calcS <- grad(func2, x,method.args=list(d=0.0001))) rel.err <- g.calcS/exact - 1 cbind(x, g.calcS, exact, rel.err) if(any(rel.err > 1e-10)) stop("trig02 test 1 FAILED") ################################################################### # power test ################################################################### func3 <- function(x){(x-100)^2 + 1.e-06 * (x - 300)^3} x <- c(100.001,300.001) exact <- 2*(x-100) + 3.e-06*(x-300)^2 # With d = 0.0001 print(g.calcS <- grad(func3, x,method.args=list(d=0.0001))) rel.err <- g.calcS/exact - 1 cbind(x, g.calcS, exact, rel.err) if(any(rel.err > 1e-10)) stop("trig03 test 1 FAILED") numDeriv/tests/grad01.R0000644000176000001440000000327710412646041014445 0ustar ripleyusersif(!require("numDeriv"))stop("this test requires numDeriv.") ################################################################### # sin test. scalar valued function with scalar arg ################################################################### print(g.anal <- cos(pi)) print(g.calcR <- grad(sin, pi, method="Richardson")) cat("error: ", err <- max(abs(g.calcR - g.anal)),"\n") if(err > 1e-11) stop("grad01 test 1 FAILED") # 1e-13 with d=0.01 print(g.calcS <- grad(sin, pi, method="simple")) cat("error: ", err <- max(abs(g.calcS - g.anal)),"\n") if(err > 1e-8) stop("grad01 test 2 FAILED") ################################################################### # sin test. vector argument, scalar result ################################################################### func2a <- function(x) sum(sin(x)) x <- (0:10)*2*pi/10 print(g.anal <- cos(x)) print(g.calcR <- grad(func2a, x, method="Richardson")) cat("error: ", err <- max(abs(g.calcR - g.anal)),"\n") if(err > 1e-10) stop("grad01 test 3 FAILED") print(g.calcS <- grad(func2a, x, method="simple")) cat("error: ", err <- max(abs(g.calcS - g.anal)),"\n") if(err > 1e-4) stop("grad01 test 4 FAILED") ################################################################### # sin test. vector argument, vector result ################################################################### x <- (0:10)*2*pi/10 print(g.anal <- cos(x)) print(g.calcR <- grad(sin, x, method="Richardson")) cat("error: ", err <- max(abs(g.calcR - g.anal)),"\n") if(err > 1e-10) stop("grad01 test 5 FAILED")# 1e-12 with d=0.01 print(g.calcS <- grad(sin, x, method="simple")) cat("error: ", err <- max(abs(g.calcS - g.anal)),"\n") if(err > 1e-4) stop("grad01 test 6 FAILED") numDeriv/NAMESPACE0000644000176000001440000000027510406167726013327 0ustar ripleyusersexport("grad") S3method("grad", "default") export("jacobian") S3method("jacobian", "default") export("hessian") S3method("hessian", "default") export("genD") S3method("genD", "default") numDeriv/R/0000755000176000001440000000000012032321533012266 5ustar ripleyusersnumDeriv/R/numDeriv.R0000644000176000001440000002671512032156352014222 0ustar ripleyusers############################################################################ # functions for gradient calculation ############################################################################ grad <- function (func, x, method="Richardson", method.args=list(), ...) UseMethod("grad") grad.default <- function(func, x, method="Richardson", method.args=list(), ...){ # modified by Paul Gilbert from code by Xingqiao Liu. # case 1/ scalar arg, scalar result (case 2/ or 3/ code should work) # case 2/ vector arg, scalar result (same as special case jacobian) # case 3/ vector arg, vector result (of same length, really 1/ applied multiple times)) f <- func(x, ...) n <- length(x) #number of variables in argument case1or3 <- n == length(f) if((1 != length(f)) & !case1or3) stop("grad assumes a scalar valued function.") if(method=="simple"){ # very simple numerical approximation args <- list(eps=1e-4) # default args[names(method.args)] <- method.args eps <- args$eps if(case1or3) return((func(x+eps, ...)-f)/eps) # now case 2 df <- rep(NA,n) for (i in 1:n) { dx <- x dx[i] <- dx[i] +eps df[i] <- (func(dx, ...)-f)/eps } return(df) } else if(method=="complex"){ # Complex step gradient eps <- .Machine$double.eps v <- try(func(x + eps * 1i, ...)) if(inherits(v, "try-error")) stop("function does not accept complex argument as required by method 'complex'.") if(!is.complex(v)) stop("function does not return a complex value as required by method 'complex'.") if(case1or3) return(Im(v)/eps) # now case 2 h0 <- rep(0, n) g <- rep(NA, n) for (i in 1:n) { h0[i] <- eps * 1i g[i] <- Im(func(x+h0, ...))/eps h0[i] <- 0 } return(g) } else if(method=="Richardson"){ args <- list(eps=1e-4, d=0.0001, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2, show.details=FALSE) # default args[names(method.args)] <- method.args eps <- args$eps d <- args$d r <- args$r v <- args$v show.details <- args$show.details a <- matrix(NA, r, n) #b <- matrix(NA, (r - 1), n) # first order derivatives are stored in the matrix a[k,i], # where the indexing variables k for rows(1 to r), i for columns (1 to n), # r is the number of iterations, and n is the number of variables. h <- abs(d*x)+eps*(abs(x) < args$zero.tol) for(k in 1:r) { # successively reduce h if(case1or3) a[k,] <- (func(x + h, ...) - func(x - h, ...))/(2*h) else for(i in 1:n) { if((k != 1) && (abs(a[(k-1),i]) < 1e-20)) a[k,i] <- 0 #some func are unstable near zero else a[k,i] <- (func(x + h*(i==seq(n)), ...) - func(x - h*(i==seq(n)), ...))/(2*h[i]) } if (any(is.na(a[k,]))) stop("function returns NA at ", h," distance from x.") h <- h/v # Reduced h by 1/v. } if(show.details) { cat("\n","first order approximations", "\n") print(a, 12) } #------------------------------------------------------------------------ # 1 Applying Richardson Extrapolation to improve the accuracy of # the first and second order derivatives. The algorithm as follows: # # -- For each column of the derivative matrix a, # say, A1, A2, ..., Ar, by Richardson Extrapolation, to calculate a # new sequence of approximations B1, B2, ..., Br used the formula # # B(i) =( A(i+1)*4^m - A(i) ) / (4^m - 1) , i=1,2,...,r-m # # N.B. This formula assumes v=2. # # -- Initially m is taken as 1 and then the process is repeated # restarting with the latest improved values and increasing the # value of m by one each until m equals r-1 # # 2 Display the improved derivatives for each # m from 1 to r-1 if the argument show.details=T. # # 3 Return the final improved derivative vector. #------------------------------------------------------------------------- for(m in 1:(r - 1)) { a <- (a[2:(r+1-m),,drop=FALSE]*(4^m)-a[1:(r-m),,drop=FALSE])/(4^m-1) if(show.details & m!=(r-1) ) { cat("\n","Richarson improvement group No. ", m, "\n") print(a[1:(r-m),,drop=FALSE], 12) } } return(c(a)) } else stop("indicated method ", method, "not supported.") } jacobian <- function (func, x, method="Richardson", method.args=list(), ...) UseMethod("jacobian") jacobian.default <- function(func, x, method="Richardson", method.args=list(), ...){ f <- func(x, ...) n <- length(x) #number of variables. if(method=="simple"){ # very simple numerical approximation args <- list(eps=1e-4) # default args[names(method.args)] <- method.args eps <- args$eps df <-matrix(NA, length(f), n) for (i in 1:n) { dx <- x dx[i] <- dx[i] +eps df[,i] <- (func(dx, ...)-f)/eps } return(df) } else if(method=="complex"){ # Complex step gradient # Complex step Jacobian eps <- .Machine$double.eps h0 <- rep(0, n) h0[1] <- eps * 1i v <- try(func(x+h0, ...)) if(inherits(v, "try-error")) stop("function does not accept complex argument as required by method 'complex'.") if(!is.complex(v)) stop("function does not return a complex value as required by method 'complex'.") h0[1] <- 0 jac <- matrix(NA, length(v), n) jac[, 1] <- Im(v)/eps if (n == 1) return(jac) for (i in 2:n) { h0[i] <- eps * 1i jac[, i] <- Im(func(x+h0, ...))/eps h0[i] <- 0 } return(jac) } else if(method=="Richardson"){ args <- list(eps=1e-4, d=0.0001, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2, show.details=FALSE) # default args[names(method.args)] <- method.args eps <- args$eps d <- args$d r <- args$r v <- args$v a <- array(NA, c(length(f),r, n) ) h <- abs(d*x)+eps*(abs(x) < args$zero.tol) for(k in 1:r) { # successively reduce h for(i in 1:n) { a[,k,i] <- (func(x + h*(i==seq(n)), ...) - func(x - h*(i==seq(n)), ...))/(2*h[i]) #if((k != 1)) a[,(abs(a[,(k-1),i]) < 1e-20)] <- 0 #some func are unstable near zero } h <- h/v # Reduced h by 1/v. } for(m in 1:(r - 1)) { a <- (a[,2:(r+1-m),,drop=FALSE]*(4^m)-a[,1:(r-m),,drop=FALSE])/(4^m-1) } # drop second dim of a, which is now 1 (but not other dim's even if they are 1 return(array(a, dim(a)[c(1,3)])) } else stop("indicated method ", method, "not supported.") } hessian <- function (func, x, method="Richardson", method.args=list(), ...) UseMethod("hessian") hessian.default <- function(func, x, method="Richardson", method.args=list(), ...){ if(1!=length(func(x, ...))) stop("Richardson method for hessian assumes a scalar valued function.") if(method=="complex"){ # Complex step hessian args <- list(eps=1e-4, d=0.1, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2) args[names(method.args)] <- method.args # the CSD part of this uses eps=.Machine$double.eps # but the jacobian is Richardson and uses method.args return(jacobian(func=function(fn, x, ...){grad(func=fn, x=x, method="complex", method.args=list(eps=.Machine$double.eps), ...)}, x=x, fn=func, method.args=args, ...)) } else if(method != "Richardson") stop("method not implemented.") args <- list(eps=1e-4, d=0.1, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2, show.details=FALSE) # default args[names(method.args)] <- method.args D <- genD(func, x, method=method, method.args=args, ...)$D if(1!=nrow(D)) stop("BUG! should not get here.") H <- diag(NA,length(x)) u <- length(x) for(i in 1:length(x)) {for(j in 1:i) {u <- u + 1 H[i,j] <- D[,u] } } H <- H +t(H) diag(H) <- diag(H)/2 H } ####################################################################### # Bates & Watts D matrix calculation ####################################################################### genD <- function(func, x, method="Richardson", method.args=list(), ...)UseMethod("genD") genD.default <- function(func, x, method="Richardson", method.args=list(), ...){ # additional cleanup by Paul Gilbert (March, 2006) # modified substantially by Paul Gilbert (May, 1992) # from original code by Xingqiao Liu, May, 1991. # This function is not optimized for S speed, but is organized in # the same way it could be (was) implemented in C, to facilitate checking. # v reduction factor for Richardson iterations. This could # be a parameter but the way the formula is coded it is assumed to be 2. if(method != "Richardson") stop("method not implemented.") args <- list(eps=1e-4, d=0.0001, zero.tol=sqrt(.Machine$double.eps/7e-7), r=4, v=2) # default args[names(method.args)] <- method.args eps <- args$eps d <- args$d r <- args$r v <- args$v if (v!=2) stop("The current code assumes v is 2 (the default).") #func.args <- list(...) #f0 <- do.call("func",append(list(x), func.args)) f0 <- func(x, ...) # f0 is the value of the function at x. p <- length(x) # number of parameters (theta) h0 <- abs(d*x)+eps*(abs(x) < args$zero.tol) D <- matrix(0, length(f0),(p*(p + 3))/2) #length(f0) is the dim of the sample space #(p*(p + 3))/2 is the number of columns of matrix D.( first # der. & lower triangle of Hessian) Daprox <- matrix(0, length(f0),r) Hdiag <- matrix(0,length(f0),p) Haprox <- matrix(0,length(f0),r) for(i in 1:p) # each parameter - first deriv. & hessian diagonal {h <-h0 for(k in 1:r) # successively reduce h {f1 <- func(x+(i==(1:p))*h, ...) f2 <- func(x-(i==(1:p))*h, ...) #f1 <- do.call("func",append(list(x+(i==(1:p))*h), func.args)) #f2 <- do.call("func",append(list(x-(i==(1:p))*h), func.args)) Daprox[,k] <- (f1 - f2) / (2*h[i]) # F'(i) Haprox[,k] <- (f1-2*f0+f2)/ h[i]^2 # F''(i,i) hessian diagonal h <- h/v # Reduced h by 1/v. NULL } for(m in 1:(r - 1)) for ( k in 1:(r-m)) {Daprox[,k]<-(Daprox[,k+1]*(4^m)-Daprox[,k])/(4^m-1) Haprox[,k]<-(Haprox[,k+1]*(4^m)-Haprox[,k])/(4^m-1) NULL } D[,i] <- Daprox[,1] Hdiag[,i] <- Haprox[,1] NULL } u <- p for(i in 1:p) # 2nd derivative - do lower half of hessian only {for(j in 1:i) {u <- u + 1 if (i==j) { D[,u] <- Hdiag[,i]; NULL} else {h <-h0 for(k in 1:r) # successively reduce h {f1 <- func(x+(i==(1:p))*h + (j==(1:p))*h, ...) f2 <- func(x-(i==(1:p))*h - (j==(1:p))*h, ...) #f1 <- do.call("func", append( # list(x+(i==(1:p))*h + (j==(1:p))*h), func.args)) #f2 <- do.call("func",append( # list(x-(i==(1:p))*h - (j==(1:p))*h), func.args)) Daprox[,k]<- (f1 - 2*f0 + f2 - Hdiag[,i]*h[i]^2 - Hdiag[,j]*h[j]^2)/(2*h[i]*h[j]) # F''(i,j) h <- h/v # Reduced h by 1/v. } for(m in 1:(r - 1)) for ( k in 1:(r-m)) {Daprox[,k]<-(Daprox[,k+1]*(4^m)-Daprox[,k])/(4^m-1); NULL} D[,u] <- Daprox[,1] NULL } } } D <- list(D=D, p=length(x), f0=f0, func=func, x=x, d=d, method=method, method.args=args)# Darray constructor (genD.default) class(D) <- "Darray" invisible(D) }