dfoptim/0000755000176200001440000000000013743611342011720 5ustar liggesusersdfoptim/NAMESPACE0000644000176200001440000000021413743153630013135 0ustar liggesusers## ## NAMESPACE ## # Public routines export(hjk, hjkb) export(nmk, nmkb) export(mads) importFrom("stats", "runif") # S4 Methods dfoptim/demo/0000755000176200001440000000000013742122451012641 5ustar liggesusersdfoptim/demo/00Index0000644000176200001440000000004613216472314013775 0ustar liggesusersdfoptim derivative-free optimization dfoptim/demo/dfoptim.R0000644000176200001440000001124313743163304014432 0ustar liggesusers ####################################################################################################### rosbkext <- function(x){ # Extended Rosenbrock function n <- length(x) sum (100*(x[1:(n-1)]^2 - x[2:n])^2 + (x[1:(n-1)] - 1)^2) } np <- 10 set.seed(123) p.0 <- rnorm(np) xm1 <- nmk(fn=rosbkext, par=p.0) # maximum `fevals' is not sufficient to find correct minimum xm2 <- nmk(fn=rosbkext, par=p.0, control=list(maxfeval=5000)) # finds the correct minimum ans.optim <- optim(fn=rosbkext, par=p.0, method="Nelder-Mead", control=list(maxit=5000)) # terminates with inferior estimates ans.hj <- hjk(fn=rosbkext, par=p.0) # Hooke-Jeeves algorithm xmb <- nmkb(fn=rosbkext, par=p.0, lower=-2, upper=2) ####################################################################################################### ### A non-smooth problem nsf <- function(x) { f1 <- x[1]^2 + x[2]^2 f2 <- x[1]^2 + x[2]^2 + 10 * (-4*x[1] - x[2] + 4) f3 <- x[1]^2 + x[2]^2 + 10 * (-x[1] - 2*x[2] + 6) max(f1, f2, f3) } p0 <- rnorm(3) xm3 <- nmk(fn=nsf, par=p0) xm3.hj <- hjk(fn=nsf, par=p0) ans3 <- mads(p0, nsf, control=list(trace=FALSE)) c(xm3$value, xm3.hj$value, ans3$val) ####################################################################################################### ### Another non-smooth problem rosen <- function(x) { # Rosen JB & Suzuki S (1965), Construction of non-linear programming test problems, Comm. ACM, 8, p. 113 f1 <- x[1]^2 + x[2]^2 + 2*x[3]^2 + x[4]^2 - 5*x[1] - 5*x[2] - 21*x[3] + 7*x[4] f2 <- f1 + 10 * (sum(x^2) + x[1] - x[2] + x[3] - x[4] - 8) f3 <- f1 + 10 * (sum(x^2) + x[2]^2 + x[4]^2 - x[1] - x[4] - 10) f4 <- f1 + 10 * (sum(x^2) + x[1]^2 - x[4]^2 + 2*x[1] - x[2] - x[4] - 5) max(f1, f2, f3, f4) } # Global minimum value is -44 @ (0, 1, 2, -1) p0 <- rnorm(4) xm4 <- nmk(fn=rosen, par=p0) xm4.hj <- hjk(fn=rosen, par=p0) xm4b <- nmkb(fn=rosen, par=p0, lower=-2, upper=3) ans3 <- mads(p0, rosen, control=list(trace=FALSE)) ####################################################################################################### ### Non-smooth problem #3 hald <- function(x) { #Hald J & Madsen K (1981), Combined LP and quasi-Newton methods for minimax optimization, Mathematical Programming, 20, p.42-62. i <- 1:21 t <- -1 + (i - 1)/10 f <- (x[1] + x[2] * t) / ( 1 + x[3]*t + x[4]*t^2 + x[5]*t^3) - exp(t) max(abs(f)) } # Correct solution: x* = ( # Minimum value = 0.002 p0 <- runif(5) xm5 <- nmk(fn=hald, par=p0) xm5.hj <- hjk(fn=hald, par=p0) xm5b <- nmkb(fn=hald, par=p0, lower=c(0,0,0,0,-2), upper=4) ans3 <- mads(p0, hald, control=list(trace=FALSE)) ################################# ## Rosenbrock Banana function # fr <- function(x) { n <- length(x) x1 <- x[2:n] x2 <- x[1:(n-1)] sum(100 * (x2 - x1 * x1)^2 + (1 - x1)^2) } n <- 10 p0 <- runif(n, 0, 2) ans1 <- nmk(p0, fr, control=list(maxfeval=20000)) ans2 <- hjk(p0, fr, control=list(maxfeval=20000)) ans3 <- mads(p0, fr, control=list(trace=FALSE)) c(ans1$value, ans2$value, ans3$val) ################################################ # EVD52 evd52 <- function(x){ f <- rep(NA, 6) f[1] <- sum(x[1:3]^2) - 1 f[2] <- sum(x[1:2]^2) + (x[3] - 2)^2 f[3] <- sum(x[1:3]) - 1 f[4] <- x[1] + x[2] - x[3] + 1 f[5] <- 2*x[1]^3 + 6*x[2]^2 + 2*(5*x[3] - x[1] + 1)^2 f[6] <- x[1]^2 - 9*x[3] return(max(f)) } # True mimimum = 3.5997193 p0 <- runif(6) ans1 <- nmk(p0, evd52, control=list(maxfeval=20000)) ans2 <- hjk(p0, evd52, control=list(maxfeval=20000)) ans3 <- mads(p0, evd52, control=list(trace=FALSE)) c(ans1$value, ans2$value, ans3$val) ################################################### hs78 <- function(x){ f <- rep(NA, 3) f[1] <- sum(x^2) - 10 f[2] <- x[2]*x[3] - 5*x[4]*x[5] f[3] <- x[1]^3 + x[2]^3 + 1 F <- prod(x) + 10*sum(abs(f)) return(F) } # True mimimum = -2.9197004 p0 <- c(-2,1.5,2,-1,-1) + runif(5) ans1 <- nmk(p0, hs78, control=list(maxfeval=20000)) ans2 <- hjk(p0, hs78, control=list(maxfeval=20000)) ans3 <- mads(p0, hs78, control=list(trace=FALSE)) c(ans1$value, ans2$value, ans3$val) ################################################### elattar <- function(x){ i <- 1:51 ti <- 0.1*(i-1) yi <- 0.5*exp(-ti) - exp(-2*ti) + 0.5*exp(-3*ti) + 1.5*exp(-1.5*ti)*sin(7*ti) + exp(-2.5*ti)*sin(5*ti) F <- sum(abs(x[1]*exp(-x[2]*ti)*cos(x[3]*ti + x[4]) + x[5]*exp(-x[6]*ti) - yi)) return(F) } # True mimimum = 0.5598131 p0 <- c(2,2,7,0,-2,1) + runif(6) ans1 <- nmk(p0, elattar, control=list(maxfeval=20000, regsimp=TRUE)) ans2 <- hjk(p0, elattar, control=list(maxfeval=20000)) ans3 <- mads(p0, elattar, control=list(trace=FALSE)) c(ans1$value, ans2$value, ans3$val) dfoptim/man/0000755000176200001440000000000013742122451012470 5ustar liggesusersdfoptim/man/nmkb.Rd0000644000176200001440000001122613743157060013715 0ustar liggesusers\name{nmk} \alias{nmk} \alias{nmkb} \title{ Nelder-Mead optimziation algorithm for derivative-free optimization } \description{ An implementation of the Nelder-Mead algorithm for derivative-free optimization. This allows bounds to be placed on parameters. Bounds are enforced by means of a parameter transformation.} \usage{ nmk(par, fn, control = list(), ...) nmkb(par, fn, lower=-Inf, upper=Inf, control = list(), ...) } \arguments{ \item{par}{A starting vector of parameter values. Must be feasible, i.e. lie strictly between lower and upper bounds.} \item{fn}{ Nonlinear objective function that is to be optimized. A scalar function that takes a real vector as argument and returns a scalar that is the value of the function at that point (see details).} \item{lower}{Lower bounds on the parameters. A vector of the same length as the parameters. If a single value is specified, it is assumed that the same lower bound applies to all parameters.} \item{upper}{Upper bounds on the parameters. A vector of the same length as the parameters. If a single value is specified, it is assumed that the same upper bound applies to all parameters.} \item{control}{A list of control parameters. See *Details* for more information. } \item{\dots}{Additional arguments passed to \code{fn} } } \details{ Argument \code{control} is a list specifing any changes to default values of algorithm control parameters for the outer loop. Note that the names of these must be specified completely. Partial matching will not work. The list items are as follows: \code{tol} Convergence tolerance. Iteration is terminated when the absolute difference in function value between successive iteration is below \code{tol}. Default is 1.e-06. \code{maxfeval}: Maximum number of objective function evaluations allowed. Default is min(5000, max(1500, 20*length(par)^2)). \code{regsimp} A logical variable indicating whether the starting parameter configuration is a regular simplex. Default is TRUE. \code{maximize} A logical variable indicating whether the objective function should be maximized. Default is FALSE. \code{restarts.max} Maximum number of times the algorithm should be restarted before declaring failure. Default is 3. \code{trace} A logical variable indicating whether the starting parameter configuration is a regular simplex. Default is FALSE. } \value{ A list with the following components: \item{par}{Best estimate of the parameter vector found by the algorithm.} \item{value}{The value of the objective function at termination.} \item{feval}{The number of times the objective \code{fn} was evaluated. } \item{restarts}{The number of times the algorithm had to be restarted when it stagnated. } \item{convergence}{An integer code indicating type of convergence. \code{0} indicates successful convergence. Positive integer codes indicate failure to converge. } \item{message}{Text message indicating the type of convergence or failure. } } \references{ C.T. Kelley (1999), Iterative Methods for Optimization, SIAM. } \author{ Ravi Varadhan , Johns Hopkins University URL:http://www.jhsph.edu/agingandhealth/People/Faculty_personal_pages/Varadhan.html } \note{ This algorithm is based on the Matlab code of Prof. C.T. Kelley, given in his book "Iterative methods for optimization". It is implemented here with the permission of Prof. Kelley and SIAM. However, there are some non-trivial modifications of the algorithm. } \seealso{ \code{\link{optim}}, \code{\link{hjk}}, \code{\link{mads}} } \examples{ rosbkext <- function(x){ # Extended Rosenbrock function n <- length(x) sum (100*(x[1:(n-1)]^2 - x[2:n])^2 + (x[1:(n-1)] - 1)^2) } np <- 10 set.seed(123) p0 <- rnorm(np) xm1 <- nmk(fn=rosbkext, par=p0) # maximum `fevals' is not sufficient to find correct minimum xm1b <- nmkb(fn=rosbkext, par=p0, lower=-2, upper=2) ### A non-smooth problem hald <- function(x) { #Hald J & Madsen K (1981), Combined LP and quasi-Newton methods #for minimax optimization, Mathematical Programming, 20, p.42-62. i <- 1:21 t <- -1 + (i - 1)/10 f <- (x[1] + x[2] * t) / ( 1 + x[3]*t + x[4]*t^2 + x[5]*t^3) - exp(t) max(abs(f)) } p0 <- runif(5) xm2 <- nmk(fn=hald, par=p0) xm2b <- nmkb(fn=hald, par=p0, lower=c(0,0,0,0,-2), upper=4) ## Another non-smooth functions nsf <- function(x) { f1 <- x[1]^2 + x[2]^2 f2 <- x[1]^2 + x[2]^2 + 10 * (-4*x[1] - x[2] + 4) f3 <- x[1]^2 + x[2]^2 + 10 * (-x[1] - 2*x[2] + 6) max(f1, f2, f3) } par0 <- c(1, 1) # true min 7.2 at (1.2, 2.4) nmk(par0, nsf) # fmin=8 at xmin=(2,2) } \keyword{optimize} dfoptim/man/dfoptim-package.Rd0000644000176200001440000000256713743073620016030 0ustar liggesusers\name{dfoptim} \alias{dfoptim} \docType{package} \title{ Derivative-Free Optimization } \description{ Derivative-Free optimization algorithms. These algorithms do not require gradient information. More importantly, they can be used to solve non-smooth optimization problems. They can also handle box constraints on parameters. } \details{ \tabular{ll}{ Package: \tab dfoptim\cr Type: \tab Package\cr Version: \tab 2016.7-1\cr Date: \tab 2016-07-08\cr License: \tab GPL-2 or greater\cr LazyLoad: \tab yes\cr } Derivative-Free optimization algorithms. These algorithms do not require gradient information. More importantly, they can be used to solve non-smooth optimization problems. These algorithms were translated from the Matlab code of Prof. C.T. Kelley, given in his book "Iterative methods for optimization". However, there are some non-trivial modifications of the algorithm. \cr Currently, the Nelder-Mead and Hooke-Jeeves algorithms is implemented. In future, more derivative-free algorithms may be added. } \author{ Ravi Varadhan, Johns Hopkins University \cr URL: http://www.jhsph.edu/agingandhealth/People/Faculty_personal_pages/Varadhan.html \cr Hans W. Borchers, ABB Corporate Research \cr Maintainer: Ravi Varadhan } \references{ C.T. Kelley (1999), Iterative Methods for Optimization, SIAM. } \keyword{optimize} dfoptim/man/hookejeeves.Rd0000644000176200001440000001050713743073522015276 0ustar liggesusers\name{hjk} \alias{hjk} \alias{hjkb} \title{ Hooke-Jeeves derivative-free minimization algorithm } \description{ An implementation of the Hooke-Jeeves algorithm for derivative-free optimization. A bounded and an unbounded version are provided. } \usage{ hjk(par, fn, control = list(), ...) hjkb(par, fn, lower = -Inf, upper = Inf, control = list(), ...) } \arguments{ \item{par}{Starting vector of parameter values. The initial vector may lie on the boundary. If \code{lower[i]=upper[i]} for some \code{i}, the \code{i}-th component of the solution vector will simply be kept fixed.} \item{fn}{Nonlinear objective function that is to be optimized. A scalar function that takes a real vector as argument and returns a scalar that is the value of the function at that point.} \item{lower, upper}{Lower and upper bounds on the parameters. A vector of the same length as the parameters. If a single value is specified, it is assumed that the same bound applies to all parameters. The starting parameter values must lie within the bounds.} \item{control}{A list of control parameters. See \bold{Details} for more information.} \item{\dots}{Additional arguments passed to \code{fn}.} } \details{ Argument \code{control} is a list specifing changes to default values of algorithm control parameters. Note that parameter names may be abbreviated as long as they are unique. The list items are as follows: \describe{ \item{\code{tol}}{Convergence tolerance. Iteration is terminated when the step length of the main loop becomes smaller than \code{tol}. This does \emph{not} imply that the optimum is found with the same accuracy. Default is 1.e-06.} \item{\code{maxfeval}}{Maximum number of objective function evaluations allowed. Default is Inf, that is no restriction at all.} \item{\code{maximize}}{A logical indicating whether the objective function is to be maximized (TRUE) or minimized (FALSE). Default is FALSE.} \item{\code{target}}{A real number restricting the absolute function value. The procedure stops if this value is exceeded. Default is Inf, that is no restriction.} \item{\code{info}}{A logical variable indicating whether the step number, number of function calls, best function value, and the first component of the solution vector will be printed to the console. Default is FALSE.} } If the minimization process threatens to go into an infinite loop, set either \code{maxfeval} or \code{target}. } \value{ A list with the following components: \item{par}{Best estimate of the parameter vector found by the algorithm.} \item{value}{value of the objective function at termination.} \item{convergence}{indicates convergence (\code{=0}) or not (\code{=1}).} \item{feval}{number of times the objective \code{fn} was evaluated.} \item{niter}{number of iterations in the main loop.} } \references{ C.T. Kelley (1999), Iterative Methods for Optimization, SIAM. Quarteroni, Sacco, and Saleri (2007), Numerical Mathematics, Springer. } \author{ Hans W Borchers } \note{ This algorithm is based on the Matlab code of Prof. C. T. Kelley, given in his book ``Iterative methods for optimization". It is implemented here with the permission of Prof. Kelley. This version does not (yet) implement a cache for storing function values that have already been computed as searching the cache makes it slower. } \seealso{ \code{\link{optim}}, \code{\link{nmk}} } \examples{ ## Hooke-Jeeves solves high-dim. Rosenbrock function rosenbrock <- function(x){ n <- length(x) sum (100*(x[1:(n-1)]^2 - x[2:n])^2 + (x[1:(n-1)] - 1)^2) } par0 <- rep(0, 10) hjk(par0, rosenbrock) hjkb(c(0, 0, 0), rosenbrock, upper = 0.5) # $par # [1] 0.50000000 0.25742722 0.06626892 ## Hooke-Jeeves does not work well on non-smooth functions nsf <- function(x) { f1 <- x[1]^2 + x[2]^2 f2 <- x[1]^2 + x[2]^2 + 10 * (-4*x[1] - x[2] + 4) f3 <- x[1]^2 + x[2]^2 + 10 * (-x[1] - 2*x[2] + 6) max(f1, f2, f3) } par0 <- c(1, 1) # true min 7.2 at (1.2, 2.4) hjk(par0, nsf) # fmin=8 at xmin=(2,2) } \keyword{ optimize } dfoptim/man/mads.Rd0000644000176200001440000001266113743156772013727 0ustar liggesusers\name{mads} \alias{mads} \alias{mads} \title{ Mesh Adaptive Direct Searches (MADS) algorithm for derivative-free and black-box optimization } \description{ An implementation of the Mesh Adaptive Direct Searches (MADS) algorithm for derivative-free and black-box optimization. It uses a series of variable size meshes to search the space and to converge to (local) minima with mathematical proof of convergence. It is usable on unbounded and bounded unconstrained problems. The objective function can return ``NA'' if out-of-bound or violating constraints (strict barrier approach for constraints), or a penalty can be added to the objective function.} \usage{ mads(par, fn, lower=-Inf, upper=Inf, scale=1, control = list(), ...) } \arguments{ \item{par}{A starting vector of parameter values. Must be feasible, i.e. lie strictly between lower and upper bounds.} \item{fn}{ Noisy, non-differentiable, non-convex, piecewise or nonlinear objective function that is to be optimized. It takes a real vector as argument and returns a scalar or ``NA'' that is the value of the function at that point (see details).} \item{lower}{Lower bounds on the parameters. A vector of the same length as the parameters. If a single value is specified, it is assumed that the same lower bound applies to all parameters. If all lower bounds are -Inf and all upper bounds are Inf, then the problem is treated as unbounded.} \item{upper}{Upper bounds on the parameters. A vector of the same length as the parameters. If a single value is specified, it is assumed that the same upper bound applies to all parameters. If all lower bounds are -Inf and all upper bounds are Inf, then the problem is treated as unbounded.} \item{scale}{Optional scaling, default is 1. A vector of the same length as the parameters. If a single value is specified, it is assumed that the same scale factor applies to all parameters. This scale factor can be customized for each parameter allowing non-proportional moves in the space (normally used for unbounded problems). } \item{control}{A list of control parameters. See *Details* for more information. } \item{\dots}{Additional arguments passed to \code{fn} } } \details{ Argument \code{control} is a list specifing any changes to default values of algorithm control parameters for the outer loop. The list items are as follows: \code{tol} Convergence tolerance. Iteration is terminated when the absolute difference in function value between successive iteration is below \code{tol}. Default is 1.e-06. \code{maxfeval}: Maximum number of objective function evaluations allowed. Default is 10000). \code{trace} A logical variable indicating whether information is printed on the console during execution. Default is TRUE. \code{maximize} A logical variable indicating whether the objective function should be maximized. Default is FALSE (hence default is minimization). \code{pollStyle} A string variable indicating density of the poll set, or, number of vectors in the positive basis. Choices are: ``lite'' (n+1 points) or ``full'' (2n points). Default is ``lite''. \code{deltaInit} A numerical value specifying the initial mesh size, between ``tol'' and 1 (mesh size is limited to 1). Default is 0.01. \code{expand} A numerical value >1 specifying the expansion (is success) and contraction (if no success) factor of the mesh at the end of an iteration. Default is 4. \code{lineSearch} A integer value indicating the maximum of search steps to consider. Line search is performed at the end of a successful poll set evaluation, along the line going from last to new ``best'' solution. Stepsize will be automatically increased according to the Fibonacci series. Default is 20. Set to -1 to disable the feature. \code{seed} Seed value for the internal pseudo random numbers generator. Default is 1138. } \value{ A list with the following components: \item{par}{Best estimate of the parameter vector found by the algorithm.} \item{value}{The value of the objective function at termination.} \item{feval}{The number of times the objective \code{fn} was evaluated. } \item{convergence}{Final mesh size, should be , HEC Montreal (Montreal University) URL:https://www.linkedin.com/in/vincentbechard } \note{ This algorithm is based on the Lower Triangular method described in the reference. } \seealso{ \code{\link{optim}}, \code{\link{hjk}}, \code{\link{nmk}} } \examples{ rosbkext <- function(x){ # Extended Rosenbrock function n <- length(x) sum (100*(x[1:(n-1)]^2 - x[2:n])^2 + (x[1:(n-1)] - 1)^2) } np <- 10 p0 <- rnorm(np) ans1 <- mads(fn=rosbkext, par=p0, lower=-10, upper=10, scale=1, control=list(trace=FALSE)) ### A non-smooth problem from Hock & Schittkowski #78 hs78 <- function(x){ f <- rep(NA, 3) f[1] <- sum(x^2) - 10 f[2] <- x[2]*x[3] - 5*x[4]*x[5] f[3] <- x[1]^3 + x[2]^3 + 1 F <- prod(x) + 10*sum(abs(f)) return(F) } p0 <- c(-2,1.5,2,-1,-1) ans2 <- mads(p0, hs78, control=list(trace=FALSE)) #minimum value around -2.81 } \keyword{optimize} dfoptim/DESCRIPTION0000644000176200001440000000137613743611342013435 0ustar liggesusersPackage: dfoptim Type: Package Title: Derivative-Free Optimization Description: Derivative-Free optimization algorithms. These algorithms do not require gradient information. More importantly, they can be used to solve non-smooth optimization problems. Depends: R (>= 2.10.1) Version: 2020.10-1 Date: 2020-10-19 Author: Ravi Varadhan[aut, cre], Johns Hopkins University, Hans W. Borchers[aut], ABB Corporate Research, and Vincent Bechard[aut], HEC Montreal (Montreal University) Maintainer: Ravi Varadhan URL: https://coah.jhu.edu/people/Faculty_personal_Pages/Varadhan.html License: GPL (>= 2) LazyLoad: yes Repository: CRAN NeedsCompilation: no Packaged: 2020-10-19 21:43:04 UTC; rvaradhan Date/Publication: 2020-10-20 16:40:02 UTC dfoptim/NEWS0000644000176200001440000000270613743403740012425 0ustar liggesusers------------------------------------------------------------------------------ dfoptim NEWS ------------------------------------------------------------------------------ Changes in version 2020.10-1 (2020-10-19) o Added a new derivative free optimizer called MADS: mesh-adaptive direct solver (contributed by Vincent Bechard of Montreal University) o In Nelder-Mead, changed the `sgrad' computation back to the original cross-product rather than the (correct) inverse, since this seems to provide better performance Changes in version 2018.2-1 (2018-4-01) o Set oshrink=1 to enable "restarting" of Nelder-Mead due to stagnation (thanks to Simon Wessing) Changes in version 2017.12-1 (2017-12-20) o fixed a bug in the code, which impacts the "restarting" of Nelder-Mead due to stagnation (thanks to Simon Wessing) Changes in version 2016.7-1 (2011-07-08) o Used a slightly modified code for hjk() and hjkb() Changes in version 2011.8-1 (2011-08-12) o Bounds constrained Hooke-Jeeves hjkb() Changes in version 2011.7-2 (2011-07-26) o Bounds constrained Nelder-Mead nmkb(). Changes in version 2011.7-1 o Hooke-Jeeves minimization routine hjk(). Changes in version 2011.5-1 o Fixed minor bug in the re-definition of objective function inside for maximization. Initial version o Nelder-Mead minimization routine nmk(). dfoptim/R/0000755000176200001440000000000013742122451012116 5ustar liggesusersdfoptim/R/nmk.R0000644000176200001440000001077313743403474013046 0ustar liggesusersnmk <- function(par, fn, control=list(), ...) { ctrl <- list(tol=1.e-06, maxfeval = min(5000, max(1500, 20*length(par)^2)), regsimp=TRUE, maximize=FALSE, restarts.max=3, trace=FALSE) namc <- match.arg(names(control), choices=names(ctrl), several.ok=TRUE) if (!all(namc %in% names(ctrl))) stop("unknown names in control: ", namc[!(namc %in% names(ctrl))]) if (!is.null(names(control))) ctrl[namc] <- control ftol <- ctrl$tol maxfeval <- ctrl$maxfeval regsimp <- ctrl$regsimp restarts.max <- ctrl$restarts.max maximize <- ctrl$maximize trace <- ctrl$trace if (maximize) fnm <- function(par, ...) -fn(par, ...) else fnm <- function(par, ...) fn(par, ...) x0 <- par n <- length(par) if (n == 1) stop(call. = FALSE, "Use `optimize' for univariate optimization") if (n > 30) warning("Nelder-Mead should not be used for high-dimensional optimization") V <- cbind(rep(0, n), diag(n)) f <- rep(0, n+1) f[1] <- fnm(x0, ...) V[, 1] <- x0 scale <- max(1, sqrt(sum(x0^2))) if (regsimp) { alpha <- scale / (n * sqrt(2)) * c(sqrt(n+1) + n - 1, sqrt(n+1) -1) V[, -1] <- (x0 + alpha[2]) diag(V[, -1]) <- x0[1:n] + alpha[1] for (j in 2:ncol(V)) f[j] <- fnm(V[,j], ...) } else { V[, -1] <- x0 + scale * V[, -1] for (j in 2:ncol(V)) f[j] <- fnm(V[,j], ...) } f[is.nan(f)] <- Inf nf <- n + 1 ord <- order(f) f <- f[ord] V <- V[, ord] rho <- 1 gamma <- 0.5 chi <- 2 sigma <- 0.5 conv <- 1 oshrink <- 1 restarts <- 0 orth <- 0 dist <- f[n+1] - f[1] v <- V[, -1] - V[, 1] delf <- f[-1] - f[1] diam <- sqrt(colSums(v^2)) # sgrad <- c(solve(t(v), delf)) sgrad <- c(crossprod(t(v), delf)) alpha <- 1.e-04 * max(diam) / sqrt(sum(sgrad^2)) simplex.size <- sum(abs(V[, -1] - V[, 1])) / max(1, sum(abs(V[, 1]))) itc <- 0 conv <- 0 message <- "Succesful convergence" while (nf < maxfeval & restarts < restarts.max & dist > ftol & simplex.size > 1.e-06) { fbc <- mean(f) happy <- 0 itc <- itc + 1 xbar <- rowMeans(V[, 1:n]) xr <- (1 + rho) * xbar - rho * V[, n+1] fr <- fnm(xr, ...) nf <- nf + 1 if(is.nan(fr)) fr <- Inf if (fr >= f[1] & fr < f[n]) { happy <- 1 xnew <- xr fnew <- fr } else if (fr < f[1]) { xe <- (1 + rho * chi) * xbar - rho * chi * V[, n+1] fe <- fnm(xe, ...) if(is.nan(fe)) fe <- Inf nf <- nf + 1 if (fe < fr) { xnew <- xe fnew <- fe happy <- 1 } else { xnew <- xr fnew <- fr happy <- 1 } } else if (fr >= f[n] & fr < f[n+1]) { xc <- (1 + rho * gamma) * xbar - rho * gamma * V[, n+1] fc <- fnm(xc, ...) if(is.nan(fc)) fc <- Inf nf <- nf + 1 if (fc <= fr) { xnew <- xc fnew <- fc happy <- 1 } } else if (fr >= f[n+1]) { xc <- (1 - gamma) * xbar + gamma * V[, n+1] fc <- fnm(xc, ...) if(is.nan(fc)) fc <- Inf nf <- nf + 1 if (fc < f[n+1]) { xnew <- xc fnew <- fc happy <- 1 } } if (happy == 1 & oshrink == 1) { fbt <- mean(c(f[1:n], fnew)) delfb <- fbt - fbc armtst <- alpha * sum(sgrad^2) if (delfb > - armtst/n) { if (trace) cat("Trouble - restarting: \n") restarts <- restarts + 1 orth <- 1 diams <- min(diam) sx <- sign(0.5 * sign(sgrad)) happy <- 0 V[, -1] <- V[, 1] diag(V[, -1]) <- diag(V[, -1]) - diams * sx[1:n] } } if (happy == 1) { V[, n+1] <- xnew f[n+1] <- fnew ord <- order(f) V <- V[, ord] f <- f[ord] } else if (happy == 0 & restarts < restarts.max) { if (orth == 0) orth <- 1 V[, -1] <- V[, 1] - sigma * (V [, -1] - V[, 1]) for (j in 2:ncol(V)) f[j] <- fnm(V[,j], ...) ## kmm change nf <- nf + n ord <- order(f) V <- V[, ord] f <- f[ord] } v <- V[, -1] - V[, 1] delf <- f[-1] - f[1] diam <- sqrt(colSums(v^2)) simplex.size <- sum(abs(v)) / max(1, sum(abs(V[, 1]))) f[is.nan(f)] <- Inf dist <- f[n+1] - f[1] # sgrad <- c(solve(t(v), delf)) sgrad <- c(crossprod(t(v), delf)) if (trace & !(itc %% 2)) cat("iter: ", itc, "\n", "value: ", f[1], "\n") } if (dist <= ftol | simplex.size <= 1.e-06) { conv <- 0 message <- "Successful convergence" } else if (nf >= maxfeval) { conv <- 1 message <- "Maximum number of fevals exceeded" } else if (restarts >= restarts.max) { conv <- 2 message <- "Stagnation in Nelder-Mead" } return(list(par = V[, 1], value=f[1]*(-1)^maximize, feval=nf, restarts=restarts, convergence=conv, message=message)) } dfoptim/R/mads.R0000644000176200001440000002174713743154102013177 0ustar liggesusers mads <- (function(par, fn, lower = -Inf, upper = Inf, scale = 1.0, control = list(), ...){ # par : initial solution, where to start from, x0 # fn : name of the function to optimize, f(x) # lower, upper: optional bounds; can be single value (same for each component in x) or vectors same length of x # scaling: multiples of delta for unbalanced variables (usually for unbounded problems); one value for all or one value per component # control: see descriptions in mads.control() mads.unbounded <- function(par, fn, scale = 1.0, control = list(), ...){ return(mads(par, fn, -Inf, Inf, scale, control)) } mads.bounded <- function(par, fn, lower = -Inf, upper = Inf, scale = 1.0, control = list(), ...){ return(mads(par, fn, lower, upper, scale, control)) } #-- Check initial solution and bounds if (!is.numeric(par)) stop("Argument 'par' must be a numeric vector.", call. = FALSE) nvar <- length(par) if (nvar == 1) stop("For univariate functions use some different method.", call. = FALSE) #-- Handle bounds if(!is.numeric(lower) || !is.numeric(upper)) stop("Lower and upper limits must be numeric.", call. = FALSE) if (length(lower) == 1) lower <- rep(lower, nvar) if (length(upper) == 1) upper <- rep(upper, nvar) if (!all(lower <= upper)) stop("All lower limits must be smaller than upper limits.", call. = FALSE) if (!all(lower <= par) || !all(par <= upper)) stop("Infeasible starting values -- check limits.", call. = FALSE) if(!is.numeric(scale)) stop("Scaling factor must be numeric, put 1 if you don't know what to do", call. = FALSE) if (length(scale) == 1) scale <- rep(scale, nvar) isBounded = TRUE offset = rep(0, nvar) if(sum(is.finite(lower))==0 & sum(is.finite(upper))==0) isBounded=FALSE else if(sum(is.finite(lower))param$tol)) stop("Scaling factor smaller than tolerance, please revise.", call. = FALSE) #-- Prepare black-box ---------- set.seed(param$seed) fun <- match.fun(fn) if(isBounded){ blackbox <- (function(x, lb=offset, span=scaling) {ifelse(param$maximize,-1,1) * fun(lb+(x+1)*span/2, ...)}) best_x = 2*(par-offset)/scaling-1 # solver will work in [-1, 1]^n space } else { blackbox <- (function(x, lb=offset, span=scaling) {ifelse(param$maximize,-1,1) * fun(lb+x*span, ...)}) best_x = (par-offset)/scaling # solver will work in [-1, 1]^n space } #-- Prepare the solver and the inputs delta = param$deltaInit # current mesh size pollSize = ifelse(param$pollStyle=="lite", nvar+1, 2*nvar) n = 1 # number of objective calls fibSeq <- NULL # fibonacci sequence for exploding exploration step sizes (feel lucky? go faster!) zoom = param$expand if(param$lineSearch>2){ fibSeq <- c(1,2) for(k in 3:param$lineSearch) fibSeq <- c(fibSeq, fibSeq[k-1]+fibSeq[k-2]) } #best_x = (par-offset)/scaling # solver will work in [-1, 1]^n space best_f = blackbox(best_x) # let's start by this! # Trigger iterations goAhead = TRUE output <- list(par=best_x, value=best_f, feval=1, convergence=param$deltaInit, iterlog=NULL) iterlog <- data.frame(n=1, delta=param$deltaInit, searchSuccess=0, f=best_f) while(goAhead){ foundBetter = FALSE searchSuccess = 0 current_x <- best_x # get a pollset and evaluate it pollX = .pollSet(best_x, pollSize, delta, scale) pollY = apply(pollX, 1, (function(x){ ifelse(all(lower<=x & x<=upper), blackbox(x), NA) })) n = n + sum(!is.na(pollY)) pollY.min = which.min(pollY) if(length(pollY.min)>0) {if(pollY[pollY.min] < best_f){ foundBetter = TRUE best_f = pollY[pollY.min] best_x = pollX[pollY.min,] }} # possibly run the line search (stop when decrease stops) if(foundBetter & !is.null(fibSeq)){ lsearchX = .linesearchSet(best_x, best_x - current_x, delta, fibSeq, scale) for(k in 1:length(fibSeq)) { lsearchY = ifelse(all(lower<=lsearchX[k,] & lsearchX[k,]<=upper), blackbox(lsearchX[k,]), NA) if(is.numeric(lsearchY)) { n = n+1 if(lsearchY1) { delta = 1 } #prevent useless iteration because delta is too large } else{ delta = delta / sqrt(zoom) } if(delta<=param$tol | n>=param$maxfeval) { goAhead = FALSE } iterlog <- rbind(iterlog, c(n, delta, searchSuccess, best_f)) if(param$trace) { message(paste("n=", n, " lsearch=", searchSuccess, " f=", best_f, " delta=", delta)) } } # Terminate solver if(isBounded) output$par = offset+(best_x+1)*scaling/2 else output$par = offset+best_x*scaling output$value = best_f output$feval = n output$convergence = delta output$iterlog= if (param$trace) iterlog else NA return(output) }) # # --------Internal functions, don't call, don't touch------------------------------------------------------------------- # .pollSet <- (function(center, npoints, meshSize, customScale){ # Build positive basis and finalize the pollset # - center is where to build around in [-1,1]^n # - npoints is the size of the basis (nvar+1 or 2*nvar) # - delta is the zoom factor (all points within +-delta) n = npoints m = length(center) # lower triangular, then shuffle all, then squeeze in [-1, 1]^m (original LT-MADS implementation) tempx = matrix(0, nrow=m, ncol=m) stepsize = ifelse(meshSize>1, 1, floor(m/(meshSize))) for(i in 1:m) { # dominant terms on the diagonal tempx[i,i] = stepsize * sign(1-2*runif(1)) # lesser influent terms on lower triangle for(j in 1:i-1) { tempx[i,j] = floor(stepsize*(1-2*runif(1))) } } tempx = apply(tempx,1,function(x) {sample(x)}) #shuffle rows tempx = apply(tempx,1,function(x) {sample(x)}) #shuffle columns, remember sample() returns transpose! # complete to positive basis if(n == m+1) { # lite poll set, usually works fine, to use in high dimensions tempx = meshSize * tempx avrg = colMeans(tempx) tempx = rbind(tempx, -avrg/sqrt(avrg*avrg)) } else { # exhaustive poll set tempx = meshSize * rbind(tempx, -tempx) } # return centered poll set return(t(apply(tempx,1,function(x, offset=center, zoom=meshSize*customScale) {zoom*x + offset}))) }) .linesearchSet <- (function(center, slope, delta, increments, customScale){ # Build series of points along "slope" # - center is where to start from # - slope is the search direction # - delta is the zoom factor # - increments is an integer vector of increasing step counts, ex. the Fibonacci sequence tempx = matrix(0, nrow=length(increments), ncol=length(center)) for(k in 1:nrow(tempx)) { for(j in 1:ncol(tempx)){ tempx[k,j] = center[j] + delta*customScale[j]*increments[k]*slope[j] } } return((tempx)) }) dfoptim/R/hjk.R0000644000176200001440000001063213216472314013021 0ustar liggesusers## ## h o o k e j e e v e s . R Hooke-Jeeves Minimization Algorithm ## hjk <- function(par, fn, control = list(), ...) { if (!is.numeric(par)) stop("Argument 'par' must be a numeric vector.", call. = FALSE) n <- length(par) if (n == 1) stop("For univariate functions use some different method.", call. = FALSE) #-- Control list handling ---------- cntrl <- list(tol = 1.e-06, maxfeval = Inf, # set to Inf if no limit wanted maximize = FALSE, # set to TRUE for maximization target = Inf, # set to Inf for no restriction info = FALSE) # for printing interim information nmsCo <- match.arg(names(control), choices = names(cntrl), several.ok = TRUE) if (!is.null(names(control))) cntrl[nmsCo] <- control tol <- cntrl$tol; maxfeval <- cntrl$maxfeval maximize <- cntrl$maximize target <- cntrl$target info <- cntrl$info scale <- if (maximize) -1 else 1 fun <- match.fun(fn) f <- function(x) scale * fun(x, ...) #-- Setting steps and stepsize ----- nsteps <- floor(log2(1/tol)) # number of steps steps <- 2^c(-(0:(nsteps-1))) # decreasing step size dir <- diag(1, n, n) # orthogonal directions x <- par # start point fx <- fbest <- f(x) # smallest value so far fcount <- 1 # counts number of function calls if (info) cat("step\tnofc\tfmin\txpar\n") # info header #-- Start the main loop ------------ ns <- 0 while (ns < nsteps && fcount < maxfeval && abs(fx) < target) { ns <- ns + 1 hjs <- .hjsearch(x, f, steps[ns], dir, fcount, maxfeval, target) x <- hjs$x fx <- hjs$fx sf <- hjs$sf fcount <- fcount + hjs$finc if (info) cat(ns, "\t", fcount, "\t", fx/scale, "\t", x[1], "...\n") } if (fcount > maxfeval) { warning("Function evaluation limit exceeded -- may not converge.") conv <- 1 } else if (abs(fx) > target) { warning("Function exceeds min/max value -- may not converge.") conv <- 1 } else { conv <- 0 } fx <- fx / scale # undo scaling return(list(par = x, value = fx, convergence = conv, feval = fcount, niter = ns)) } ## Search with a single scale ----------------------------- .hjsearch <- function(xb, f, h, dir, fcount, maxfeval, target) { x <- xb xc <- x sf <- 0 finc <- 0 hje <- .hjexplore(xb, xc, f, h, dir) x <- hje$x fx <- hje$fx sf <- hje$sf finc <- finc + hje$numf # Pattern move while (sf == 1) { d <- x-xb xb <- x xc <- x+d fb <- fx hje <- .hjexplore(xb, xc, f, h, dir, fb) x <- hje$x fx <- hje$fx sf <- hje$sf finc <- finc + hje$numf if (sf == 0) { # pattern move failed hje <- .hjexplore(xb, xb, f, h, dir, fb) x <- hje$x fx <- hje$fx sf <- hje$sf finc <- finc + hje$numf } if (fcount + finc > maxfeval || abs(fx) > target) break } return(list(x = x, fx = fx, sf = sf, finc = finc)) } ## Exploratory move --------------------------------------- .hjexplore <- function(xb, xc, f, h, dir, fbold) { n <- length(xb) x <- xb if (missing(fbold)) { fb <- f(x) numf <- 1 } else { fb <- fbold numf <- 0 } fx <- fb xt <- xc sf <- 0 # do we find a better point ? dirh <- h * dir fbold <- fx for (k in sample.int(n, n)) { # resample orthogonal directions p1 <- xt + dirh[, k] ft1 <- f(p1) numf <- numf + 1 p2 <- xt - dirh[, k] ft2 <- f(p2) numf <- numf + 1 if (min(ft1, ft2) < fb) { sf <- 1 if (ft1 < ft2) { xt <- p1 fb <- ft1 } else { xt <- p2 fb <- ft2 } } } if (sf == 1) { x <- xt fx <- fb } return(list(x = x, fx = fx, sf = sf, numf = numf)) } dfoptim/R/hjkb.R0000644000176200001440000001231513216472314013163 0ustar liggesusers## ## h o o k e j e e v e s . R Hooke-Jeeves Minimization Algorithm ## hjkb <- function(par, fn, lower = -Inf, upper = Inf, control = list(), ...) { if (!is.numeric(par)) stop("Argument 'par' must be a numeric vector.", call. = FALSE) n <- length(par) if (n == 1) stop("For univariate functions use some different method.", call. = FALSE) if(!is.numeric(lower) || !is.numeric(upper)) stop("Lower and upper limits must be numeric.", call. = FALSE) if (length(lower) == 1) lower <- rep(lower, n) if (length(upper) == 1) upper <- rep(upper, n) if (!all(lower <= upper)) stop("All lower limits must be smaller than upper limits.", call. = FALSE) if (!all(lower <= par) || !all(par <= upper)) stop("Infeasible starting values -- check limits.", call. = FALSE) #-- Control list handling ---------- cntrl <- list(tol = 1.e-06, maxfeval = Inf, # set to Inf if no limit wanted maximize = FALSE, # set to TRUE for maximization target = Inf, # set to Inf for no restriction info = FALSE) # for printing interim information nmsCo <- match.arg(names(control), choices = names(cntrl), several.ok = TRUE) if (!is.null(names(control))) cntrl[nmsCo] <- control tol <- cntrl$tol; maxfeval <- cntrl$maxfeval maximize <- cntrl$maximize target <- cntrl$target info <- cntrl$info scale <- if (maximize) -1 else 1 fun <- match.fun(fn) f <- function(x) scale * fun(x, ...) #-- Setting steps and stepsize ----- nsteps <- floor(log2(1/tol)) # number of steps steps <- 2^c(-(0:(nsteps-1))) # decreasing step size dir <- diag(1, n, n) # orthogonal directions x <- par # start point fx <- fbest <- f(x) # smallest value so far fcount <- 1 # counts number of function calls if (info) cat("step\tnofc\tfmin\txpar\n") # info header #-- Start the main loop ------------ ns <- 0 while (ns < nsteps && fcount < maxfeval && abs(fx) < target) { ns <- ns + 1 hjs <- .hjbsearch(x, f, lower, upper, steps[ns], dir, fcount, maxfeval, target) x <- hjs$x fx <- hjs$fx sf <- hjs$sf fcount <- fcount + hjs$finc if (info) cat(ns, "\t", fcount, "\t", fx/scale, "\t", x[1], "...\n") } if (fcount > maxfeval) { warning("Function evaluation limit exceeded -- may not converge.") conv <- 1 } else if (abs(fx) > target) { warning("Function exceeds min/max value -- may not converge.") conv <- 1 } else { conv <- 0 } fx <- fx / scale # undo scaling return(list(par = x, value = fx, convergence = conv, feval = fcount, niter = ns)) } ## Search with a single scale ----------------------------- .hjbsearch <- function(xb, f, lo, up, h, dir, fcount, maxfeval, target) { x <- xb xc <- x sf <- 0 finc <- 0 hje <- .hjbexplore(xb, xc, f, lo, up, h, dir) x <- hje$x fx <- hje$fx sf <- hje$sf finc <- finc + hje$numf # Pattern move while (sf == 1) { d <- x-xb xb <- x xc <- x+d xc <- pmax(pmin(xc, up), lo) fb <- fx hje <- .hjbexplore(xb, xc, f, lo, up, h, dir, fb) x <- hje$x fx <- hje$fx sf <- hje$sf finc <- finc + hje$numf if (sf == 0) { # pattern move failed hje <- .hjbexplore(xb, xb, f, lo, up, h, dir, fb) x <- hje$x fx <- hje$fx sf <- hje$sf finc <- finc + hje$numf } if (fcount + finc > maxfeval || abs(fx) > target) break } return(list(x = x, fx = fx, sf = sf, finc = finc)) } ## Exploratory move --------------------------------------- .hjbexplore <- function(xb, xc, f, lo, up, h, dir, fbold) { n <- length(xb) x <- xb if (missing(fbold)) { fb <- f(x) numf <- 1 } else { fb <- fbold numf <- 0 } fx <- fb xt <- xc sf <- 0 # do we find a better point ? dirh <- h * dir fbold <- fx for (k in sample.int(n, n)) { # resample orthogonal directions p1 <- xt + dirh[, k] if ( p1[k] <= up[k] ) { ft1 <- f(p1) numf <- numf + 1 } else { ft1 <- fb } p2 <- xt - dirh[, k] if ( lo[k] <= p2[k] ) { ft2 <- f(p2) numf <- numf + 1 } else { ft2 <- fb } if (min(ft1, ft2) < fb) { sf <- 1 if (ft1 < ft2) { xt <- p1 fb <- ft1 } else { xt <- p2 fb <- ft2 } } } if (sf == 1) { x <- xt fx <- fb } return(list(x = x, fx = fx, sf = sf, numf = numf)) } dfoptim/R/nmkb.R0000644000176200001440000001544513743403530013202 0ustar liggesusers# Nelder-Mead with Box constraints nmkb <- function (par, fn, lower = -Inf, upper = Inf, control = list(), ...) { ctrl <- list(tol = 1e-06, maxfeval = min(5000, max(1500, 20 * length(par)^2)), regsimp = TRUE, maximize = FALSE, restarts.max = 3, trace = FALSE) namc <- match.arg(names(control), choices = names(ctrl), several.ok = TRUE) if (!all(namc %in% names(ctrl))) stop("unknown names in control: ", namc[!(namc %in% names(ctrl))]) if (!is.null(names(control))) ctrl[namc] <- control ftol <- ctrl$tol maxfeval <- ctrl$maxfeval regsimp <- ctrl$regsimp restarts.max <- ctrl$restarts.max maximize <- ctrl$maximize trace <- ctrl$trace n <- length(par) g <- function(x) { gx <- x gx[c1] <- atanh(2 * (x[c1] - lower[c1]) / (upper[c1] - lower[c1]) - 1) gx[c3] <- log(x[c3] - lower[c3]) gx[c4] <- log(upper[c4] - x[c4]) gx } ginv <- function(x) { gix <- x gix[c1] <- lower[c1] + (upper[c1] - lower[c1])/2 * (1 + tanh(x[c1])) gix[c3] <- lower[c3] + exp(x[c3]) gix[c4] <- upper[c4] - exp(x[c4]) gix } if (length(lower) == 1) lower <- rep(lower, n) if (length(upper) == 1) upper <- rep(upper, n) if (any(c(par < lower, upper < par))) stop("Infeasible starting values!", call.=FALSE) low.finite <- is.finite(lower) upp.finite <- is.finite(upper) c1 <- low.finite & upp.finite # both lower and upper bounds are finite c2 <- !(low.finite | upp.finite) # both lower and upper bounds are infinite c3 <- !(c1 | c2) & low.finite # finite lower bound, but infinite upper bound c4 <- !(c1 | c2) & upp.finite # finite upper bound, but infinite lower bound if (all(c2)) stop("Use `nmk()' for unconstrained optimization!", call.=FALSE) if (maximize) fnmb <- function(par) -fn(ginv(par), ...) else fnmb <- function(par) fn(ginv(par), ...) x0 <- g(par) if (n == 1) stop(call. = FALSE, "Use `optimize' for univariate optimization") if (n > 30) warning("Nelder-Mead should not be used for high-dimensional optimization") V <- cbind(rep(0, n), diag(n)) f <- rep(0, n + 1) f[1] <- fnmb(x0) V[, 1] <- x0 scale <- max(1, sqrt(sum(x0^2))) if (regsimp) { alpha <- scale/(n * sqrt(2)) * c(sqrt(n + 1) + n - 1, sqrt(n + 1) - 1) V[, -1] <- (x0 + alpha[2]) diag(V[, -1]) <- x0[1:n] + alpha[1] for (j in 2:ncol(V)) f[j] <- fnmb(V[, j]) } else { V[, -1] <- x0 + scale * V[, -1] for (j in 2:ncol(V)) f[j] <- fnmb(V[, j]) } f[is.nan(f)] <- Inf nf <- n + 1 ord <- order(f) f <- f[ord] V <- V[, ord] rho <- 1 gamma <- 0.5 chi <- 2 sigma <- 0.5 conv <- 1 oshrink <- 1 restarts <- 0 orth <- 0 dist <- f[n + 1] - f[1] v <- V[, -1] - V[, 1] delf <- f[-1] - f[1] diam <- sqrt(colSums(v^2)) # sgrad <- c(solve(t(v), delf)) sgrad <- c(crossprod(t(v), delf)) alpha <- 1e-04 * max(diam)/sqrt(sum(sgrad^2)) simplex.size <- sum(abs(V[, -1] - V[, 1]))/max(1, sum(abs(V[, 1]))) itc <- 0 conv <- 0 message <- "Succesful convergence" while (nf < maxfeval & restarts < restarts.max & dist > ftol & simplex.size > 1e-06) { fbc <- mean(f) happy <- 0 itc <- itc + 1 xbar <- rowMeans(V[, 1:n]) xr <- (1 + rho) * xbar - rho * V[, n + 1] fr <- fnmb(xr) nf <- nf + 1 if (is.nan(fr)) fr <- Inf if (fr >= f[1] & fr < f[n]) { happy <- 1 xnew <- xr fnew <- fr } else if (fr < f[1]) { xe <- (1 + rho * chi) * xbar - rho * chi * V[, n + 1] fe <- fnmb(xe) if (is.nan(fe)) fe <- Inf nf <- nf + 1 if (fe < fr) { xnew <- xe fnew <- fe happy <- 1 } else { xnew <- xr fnew <- fr happy <- 1 } } else if (fr >= f[n] & fr < f[n + 1]) { xc <- (1 + rho * gamma) * xbar - rho * gamma * V[, n + 1] fc <- fnmb(xc) if (is.nan(fc)) fc <- Inf nf <- nf + 1 if (fc <= fr) { xnew <- xc fnew <- fc happy <- 1 } } else if (fr >= f[n + 1]) { xc <- (1 - gamma) * xbar + gamma * V[, n + 1] fc <- fnmb(xc) if (is.nan(fc)) fc <- Inf nf <- nf + 1 if (fc < f[n + 1]) { xnew <- xc fnew <- fc happy <- 1 } } if (happy == 1 & oshrink == 1) { fbt <- mean(c(f[1:n], fnew)) delfb <- fbt - fbc armtst <- alpha * sum(sgrad^2) if (delfb > -armtst/n) { if (trace) cat("Trouble - restarting: \n") restarts <- restarts + 1 orth <- 1 diams <- min(diam) sx <- sign(0.5 * sign(sgrad)) happy <- 0 V[, -1] <- V[, 1] diag(V[, -1]) <- diag(V[, -1]) - diams * sx[1:n] } } if (happy == 1) { V[, n + 1] <- xnew f[n + 1] <- fnew ord <- order(f) V <- V[, ord] f <- f[ord] } else if (happy == 0 & restarts < restarts.max) { if (orth == 0) orth <- 1 V[, -1] <- V[, 1] - sigma * (V[, -1] - V[, 1]) for (j in 2:ncol(V)) f[j] <- fnmb(V[, j]) nf <- nf + n ord <- order(f) V <- V[, ord] f <- f[ord] } v <- V[, -1] - V[, 1] delf <- f[-1] - f[1] diam <- sqrt(colSums(v^2)) simplex.size <- sum(abs(v))/max(1, sum(abs(V[, 1]))) f[is.nan(f)] <- Inf dist <- f[n + 1] - f[1] # sgrad <- c(solve(t(v), delf)) sgrad <- c(crossprod(t(v), delf)) if (trace & !(itc%%2)) cat("iter: ", itc, "\n", "value: ", f[1], "\n") } if (dist <= ftol | simplex.size <= 1e-06) { conv <- 0 message <- "Successful convergence" } else if (nf >= maxfeval) { conv <- 1 message <- "Maximum number of fevals exceeded" } else if (restarts >= restarts.max) { conv <- 2 message <- "Stagnation in Nelder-Mead" } return(list(par = ginv(V[, 1]), value = f[1] * (-1)^maximize, feval = nf, restarts = restarts, convergence = conv, message = message)) } dfoptim/MD50000644000176200001440000000120013743611342012221 0ustar liggesuserse3deac1d57832c2df173584532ffa521 *DESCRIPTION 1150f3d974421c3f72b635c6064dbe0c *NAMESPACE 8eefe08d5c3bc33ed215c06ef2b63d90 *NEWS 80da33dbbd0aa6557ef6a2587b4670a3 *R/hjk.R 0cad5c4682c2d96debd6027666f45bb6 *R/hjkb.R cfe82b7f30a423ed7818b7606a5d3db3 *R/mads.R c2b224902e1a75617ba0dc18812e0180 *R/nmk.R 75d855a8147daf1b94ccf22623183273 *R/nmkb.R 28d4e9ca263c97509eea042d2912f0d1 *demo/00Index 3c7b29c1add225631156d6ea258ac55a *demo/dfoptim.R cf2fd32ac3d47caa742ce647cfa416ba *man/dfoptim-package.Rd 087d0d63cd6e4b8408ccb796b401cfe4 *man/hookejeeves.Rd 933ad2c4e2d62fd21cb879dc53216cf3 *man/mads.Rd e4a6723cb2be20dd83d03c2a6b0c84e8 *man/nmkb.Rd