maxLik/0000755000176000001440000000000012231414345011526 5ustar ripleyusersmaxLik/inst/0000755000176000001440000000000012215561663012513 5ustar ripleyusersmaxLik/inst/CITATION0000644000176000001440000000150212215563565013651 0ustar ripleyuserscitHeader("To cite package 'maxLik' in publications use:") citEntry( entry = "Article", title = "maxLik: A package for maximum likelihood estimation in {R}", author = personList( as.person( "Arne Henningsen" ), as.person( "Ott Toomet" ) ), journal = "Computational Statistics", year = "2011", volume = "26", number = "3", pages = "443-458", doi = "10.1007/s00180-010-0217-1", url = "http://dx.doi.org/10.1007/s00180-010-0217-1", textVersion = paste( "Arne Henningsen and Ott Toomet (2011).", "maxLik: A package for maximum likelihood estimation in R.", "Computational Statistics 26(3), 443-458.", "DOI 10.1007/s00180-010-0217-1." ) ) maxLik/tests/0000755000176000001440000000000012231400472012664 5ustar ripleyusersmaxLik/tests/fitGammaDist.R0000644000176000001440000001370312222467201015367 0ustar ripleyusers## the idea and most commands were provided by Marco J. Maier, Institute for ## Statistics and Mathematics, Vienna University of Economics and Business library(maxLik) options(warn = -1, digits = 4 ) set.seed(5) some_data <- rgamma(1e4, shape = 5, scale = 2) # log-likelihood function(s) logLL <- function(x, X) # per observation for maxLik dgamma(x = X, shape = exp(x[1]), scale = exp(x[2]), log = TRUE) logLL_sum <- function(x, X) # negative sum for nlm() -sum(dgamma(x = X, shape = exp(x[1]), scale = exp(x[2]), log = TRUE)) sum(logLL(log(c(5,2)),some_data)) logLL_sum(log(c(5,2)),some_data) all.equal( sum(logLL(log(c(5,2)),some_data)), -logLL_sum(log(c(5,2)),some_data)) # gradient of log-likelihood function d_logLL <- function(x, X){ # analytic 1. derivatives cbind(shape=exp(x[1])*(-x[2]-psigamma(exp(x[1]),0)+log(X)), scale= X / exp(x[2]) - exp(x[1])) } d_logLLNum <- function(x, X){ numericGradient( logLL, x, X = X ) } colSums(d_logLL(log(c(5,2)),some_data)) colSums(d_logLLNum(log(c(5,2)),some_data)) all.equal( d_logLL(log(c(5,2)),some_data), d_logLLNum(log(c(5,2)),some_data), check.attributes=FALSE) # Hessian of log-likelihood function dd_logLL <- function(x, X){ # analytic 2. derivatives grad <- d_logLL( x, X ) hessian <- matrix(0, 2, 2) hessian[1,1] <- sum( grad[,1] - exp(x[1])^2 * psigamma(exp(x[1]), 1) ) hessian[2,2] <- - sum( X / exp(x[2]) ) hessian[cbind(c(2,1), c(1,2))] <- -exp(x[1]) * length(X) return(hessian) } dd_logLLNum <- function(x, X){ numericHessian( function(x,X) sum(logLL(x,X)), t0=x, X = X ) } dd_logLLNumGrad <- function(x, X){ numericHessian( function(x,X) sum(logLL(x,X)), grad = function(x,X) colSums(d_logLL(x,X)), x, X = X ) } dd_logLL(log(c(5,2)),some_data) dd_logLLNum(log(c(5,2)),some_data) all.equal(dd_logLL(log(c(5,2)),some_data), dd_logLLNum(log(c(5,2)),some_data)) dd_logLLNumGrad(log(c(5,2)),some_data) all.equal(dd_logLL(log(c(5,2)),some_data), dd_logLLNumGrad(log(c(5,2)),some_data), check.attributes=FALSE) # estimation with nlm() t_nlm <- system.time( r_nlm <- nlm(logLL_sum, c(0,0), X=some_data, hessian=TRUE) ) # estimation with nlm() and gradients logLL_grad <- function(x, X) { result <- logLL_sum( x, X ) attr( result, "gradient" ) <- - colSums( d_logLL( x, X ) ) return( result ) } t_nlmg <- system.time( r_nlmg <- nlm(logLL_grad, c(0,0), X=some_data, hessian=TRUE) ) # estimation with nlm() and gradients and Hessian logLL_hess <- function(x, X) { result <- logLL_sum( x, X ) attr( result, "gradient" ) <- - colSums( d_logLL( x, X ) ) attr( result, "hessian" ) <- - dd_logLL( x, X ) return( result ) } t_nlmgh <- system.time( r_nlmgh <- nlm(logLL_hess, c(0,0), X=some_data, hessian=TRUE) ) # estimation with optim() / BFGS t_bfgs <- system.time( r_bfgs <- optim(c(0,0), logLL_sum, X=some_data, method="BFGS", hessian=TRUE) ) # estimation with maxLik() / BFGS t_bfgsM <- system.time( r_bfgsM <- maxLik( logLL, start = c(0,0), method="BFGS", X=some_data ) ) # estimation with maxLik() / BFGS with gradients t_bfgsMg <- system.time( r_bfgsMg <- maxLik( logLL, d_logLL, start = c(0,0), method="BFGS", X=some_data ) ) # estimation with maxLik() / BHHH t_bhhh <- system.time( r_bhhh <- maxLik( logLL, start = c(0,0), method="BHHH", X=some_data ) ) # estimation with maxLik() / BHHH with gradients t_bhhhg <- system.time( r_bhhhg <- maxLik( logLL, d_logLL, start = c(0,0), method="BHHH", X=some_data ) ) # estimation with maxLik() / NR t_NRn <- system.time( r_NRn <- maxLik( logLL, start = c(0,0), method="NR", X=some_data ) ) # estimation with maxLik() / NR with gradients t_NRg <- system.time( r_NRg <- maxLik( logLL, d_logLL, start = c(0,0), method="NR", X=some_data ) ) # estimation with maxLik() / NR with gradients and Hessian t_NRgh <- system.time( r_NRgh <- maxLik( logLL, d_logLL, dd_logLL, start = c(0,0), method="NR", X=some_data ) ) # log likelihood values rbind(NLM=-r_nlm$minimum, NLM_grad=-r_nlmg$minimum, NLM_gradHess=-r_nlmgh$minimum, BFGS=-r_bfgs$value, maxLikBfgs = logLik( r_bfgsM ), maxLikBfgs_grad = logLik( r_bfgsMg ), BHHH = logLik( r_bhhh ), BHHH_grad = logLik( r_bhhhg ), NR_numeric= logLik( r_NRn ), NR_grad= logLik( r_NRg ), NR_gradHess= logLik( r_NRgh ) ) # estimated coefficients pp <- exp(rbind(NLM=r_nlm$estimate, NLM_grad=r_nlmg$estimate, NLM_gradHess=r_nlmgh$estimate, BFGS=r_bfgs$par, maxLikBfgs = coef( r_bfgsM ), maxLikBfgs_grad = coef( r_bfgsMg ), BHHH = coef( r_bhhh ), BHHH_grad = coef( r_bhhhg ), NR_numeric= coef( r_NRn ), NR_grad= coef( r_NRg ), NR_gradHess= coef( r_NRgh ) )) colnames(pp) <- c("shape_alpha", "scale_theta") pp # some Hessians -100*round(r_nlm$hessian/100,0) round(solve(r_nlm$hessian),5) -100*round(r_nlmg$hessian/100,0) round(solve(r_nlmg$hessian),5) -100*round(r_nlmgh$hessian/100,0) round(solve(r_nlmgh$hessian),5) -100*round(r_bfgs$hessian/100,0) round(solve(r_bfgs$hessian),5) 100*round(r_NRn$hessian/100,0) round(solve(-r_NRn$hessian),5) 100*round(r_NRg$hessian/100,0) round(solve(-r_NRg$hessian),5) # standard errors se <- exp(rbind(NLM=sqrt(diag( solve(r_nlm$hessian) )), NLM_grad=sqrt(diag( solve(r_nlmg$hessian) )), NLM_gradHess=sqrt(diag( solve(r_nlmgh$hessian) )), BFGS=sqrt(diag( solve(r_bfgs$hessian) )), maxLikBfgs = stdEr( r_bfgsM ), maxLikBfgs_grad = stdEr( r_bfgsMg ), BHHH = stdEr( r_bhhh ), BHHH_grad = stdEr( r_bhhhg ), NR_numeric= stdEr( r_NRn ), NR_grad= stdEr( r_NRg ), NR_gradHess= stdEr( r_NRgh ) )) colnames(se) <- c("shape_alpha", "scale_theta") se # execution times tt <- rbind(t_nlm, t_nlmg, t_nlmgh, t_bfgs, t_bfgsM, t_bfgsMg, t_bhhh, t_bhhhg, t_NRn, t_NRg, t_NRgh ) # tt maxLik/tests/examples.Rout.save0000644000176000001440000013360612222552473016334 0ustar ripleyusers R version 3.0.2 (2013-09-25) -- "Frisbee Sailing" Copyright (C) 2013 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library( maxLik ) Loading required package: miscTools Please cite the 'maxLik' package as: Henningsen, Arne and Toomet, Ott (2011). maxLik: A package for maximum likelihood estimation in R. Computational Statistics 26(3), 443-458. DOI 10.1007/s00180-010-0217-1. If you have questions, suggestions, or comments regarding the 'maxLik' package, please use a forum or 'tracker' at maxLik's R-Forge site: https://r-forge.r-project.org/projects/maxlik/ > options(digits=4) > > printRounded <- function( x ) { + for( i in names( x ) ) { + cat ( "$", i, "\n", sep = "" ) + if( is.numeric( x[[i]] ) ) { + print( round( x[[i]], 4 ) ) + } else { + print( x[[i]] ) + } + cat( "\n" ) + } + cat( "attr(,\"class\")\n" ) + print( class( x ) ) + } > > > ### activePar > # a simple two-dimensional exponential hat > f <- function(a) exp(-(a[1]-2)^2 - (a[2]-4)^2) > # > # maximize wrt. both parameters > free <- maxNR(f, start=1:2) > printRounded( free ) $maximum [1] 1 $estimate [1] 2 4 $gradient [1] 0 0 $hessian [,1] [,2] [1,] -2e+00 1e-04 [2,] 1e-04 -2e+00 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE FALSE $iterations [1] 7 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > summary(free) # results should be close to (2,4) -------------------------------------------- Newton-Raphson maximisation Number of iterations: 7 Return code: 1 gradient close to zero Function value: 1 Estimates: estimate gradient [1,] 2 0 [2,] 4 0 -------------------------------------------- > activePar(free) [1] TRUE TRUE > # allow only the second parameter to vary > cons <- maxNR(f, start=1:2, activePar=c(FALSE,TRUE)) > printRounded( cons ) $maximum [1] 0.3679 $estimate [1] 1 4 $gradient [1] NA 0 $hessian [,1] [,2] [1,] NA NA [2,] NA -0.7359 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] TRUE FALSE $iterations [1] 4 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > summary(cons) # result should be around (1,4) -------------------------------------------- Newton-Raphson maximisation Number of iterations: 4 Return code: 1 gradient close to zero Function value: 0.3679 Estimates: estimate gradient [1,] 1 NA [2,] 4 5.944e-07 -------------------------------------------- > activePar(cons) [1] FALSE TRUE > # specify fixed par in different ways > cons2 <- maxNR(f, start=1:2, fixed=1) > all.equal( cons, cons2 ) [1] TRUE > cons3 <- maxNR(f, start=1:2, fixed=c(TRUE,FALSE)) > all.equal( cons, cons3 ) [1] TRUE > cons4 <- maxNR(f, start=c(a=1, b=2), fixed="a") > print(summary(cons4)) -------------------------------------------- Newton-Raphson maximisation Number of iterations: 4 Return code: 1 gradient close to zero Function value: 0.3679 Estimates: estimate gradient a 1 NA b 4 5.944e-07 -------------------------------------------- > all.equal( cons, cons4 ) [1] "Component 2: names for current but not for target" [2] "Component 3: names for current but not for target" [3] "Component 4: Attributes: < Length mismatch: comparison on first 1 components >" [4] "Component 8: names for current but not for target" > > ### compareDerivatives > set.seed( 2 ) > ## A simple example with sin(x)' = cos(x) > f <- sin > compareDerivatives(f, cos, t0=1) -------- compare derivatives -------- Note: analytic gradient is vector. Transforming into a matrix form Function value: [1] 0.8415 Dim of analytic gradient: 1 1 numeric : 1 1 param theta 0 analytic numeric rel.diff [1,] 1 0.5403 0.5403 -5.13e-11 Max relative difference: 5.13e-11 -------- END of compare derivatives -------- > ## > ## Example of log-likelihood of normal density. Two-parameter > ## function. > x <- rnorm(100, 1, 2) # generate rnorm x > l <- function(b) sum(log(dnorm((x-b[1])/b[2])/b[2])) > # b[1] - mu, b[2] - sigma > gradl <- function(b) { + c(sum(x - b[1])/b[2]^2, + sum((x - b[1])^2/b[2]^3 - 1/b[2])) + } > compareDerivatives(l, gradl, t0=c(1,2)) -------- compare derivatives -------- Note: analytic gradient is vector. Transforming into a matrix form Function value: [1] -227.9 Dim of analytic gradient: 1 2 numeric : 1 2 t0 [1] 1 2 analytic gradient [,1] [,2] [1,] -1.535 16.68 numeric gradient [,1] [,2] [1,] -1.535 16.68 (anal-num)/(0.5*(abs(anal)+abs(num))) [,1] [,2] [1,] -1.989e-09 -2.089e-10 Max relative difference: 1.989e-09 -------- END of compare derivatives -------- > > > ### hessian > set.seed( 3 ) > # log-likelihood for normal density > # a[1] - mean > # a[2] - standard deviation > ll <- function(a) sum(-log(a[2]) - (x - a[1])^2/(2*a[2]^2)) > x <- rnorm(1000) # sample from standard normal > ml <- maxLik(ll, start=c(1,1)) > # ignore eventual warnings "NaNs produced in: log(x)" > printRounded( ml ) $maximum [1] -497.6 $estimate [1] 0.0064 0.9976 $gradient [1] 0 0 $hessian [,1] [,2] [1,] -1005 0 [2,] 0 -2010 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE FALSE $iterations [1] 7 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxLik" "maxim" "list" > print( ml ) Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -497.6 (2 free parameter(s)) Estimate(s): 0.006397 0.9976 > summary(ml) # result should be close to c(0,1) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -497.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.0064 0.0316 0.2 0.84 [2,] 0.9976 0.0223 44.7 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > hessian(ml) # How the Hessian looks like [,1] [,2] [1,] -1005 0 [2,] 0 -2010 > sqrt(-solve(hessian(ml))) # Note: standard deviations are on the diagonal [,1] [,2] [1,] 0.03155 0.00000 [2,] 0.00000 0.02231 > print(stdEr(ml)) [1] 0.03155 0.02231 > # test vector of stdEr-s > # > # Now run the same example while fixing a[2] = 1 > mlf <- maxLik(ll, start=c(1,1), activePar=c(TRUE, FALSE)) > printRounded( mlf ) $maximum [1] -497.6 $estimate [1] 0.0064 1.0000 $gradient [1] 0 NA $hessian [,1] [,2] [1,] -1000 NA [2,] NA NA $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE TRUE $iterations [1] 3 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxLik" "maxim" "list" > print( mlf ) Maximum Likelihood estimation Newton-Raphson maximisation, 3 iterations Return code 1: gradient close to zero Log-Likelihood: -497.6 (1 free parameter(s)) Estimate(s): 0.006397 1 > summary(mlf) # first parameter close to 0, the second exactly 1.0 -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 3 iterations Return code 1: gradient close to zero Log-Likelihood: -497.6 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.0064 0.0316 0.2 0.84 [2,] 1.0000 0.0000 NA NA -------------------------------------------- > hessian(mlf) [,1] [,2] [1,] -1000 NA [2,] NA NA > # now invert only the free parameter part of the Hessian > sqrt(-solve(hessian(mlf)[activePar(mlf), activePar(mlf)])) [,1] [1,] 0.03162 > # gives the standard deviation for the mean > print(stdEr(mlf)) [1] 0.03162 0.00000 > # test standard errors with fixed par > > > ### maxBFGS > set.seed( 5 ) > # Maximum Likelihood estimation of the parameter of Poissonian distribution > n <- rpois(100, 3) > loglik <- function(l) n*log(l) - l - lfactorial(n) > # we use numeric gradient > a <- maxBFGS(loglik, start=1) > print( a ) $maximum [1] -199.2 $estimate [1] 3.19 $gradient [1] 1.583e-05 $hessian [,1] [1,] -31.29 $code [1] 0 $message [1] "successful convergence " $last.step NULL $fixed [1] FALSE $iterations function 29 $type [1] "BFGS maximisation" $constraints NULL $gradientObs [,1] [1,] -0.37304 [2,] 0.25392 [3,] 0.88088 [4,] -0.37304 [5,] -0.68652 [6,] 0.25392 [7,] -0.05956 [8,] 0.25392 [9,] 0.88088 [10,] -0.68652 [11,] -0.37304 [12,] -0.05956 [13,] -0.37304 [14,] -0.05956 [15,] -0.37304 [16,] -0.37304 [17,] -0.37304 [18,] 0.56740 [19,] -0.05956 [20,] 0.56740 [21,] 0.56740 [22,] 0.25392 [23,] -0.37304 [24,] -0.37304 [25,] -0.68652 [26,] -0.05956 [27,] -0.05956 [28,] 0.88088 [29,] -0.68652 [30,] 0.88088 [31,] -0.05956 [32,] -0.68652 [33,] -0.37304 [34,] -1.00000 [35,] -1.00000 [36,] -0.05956 [37,] -0.05956 [38,] -0.05956 [39,] -0.37304 [40,] -0.37304 [41,] 0.56740 [42,] -0.37304 [43,] 0.56740 [44,] -0.05956 [45,] 0.88088 [46,] -0.05956 [47,] 0.25392 [48,] -0.68652 [49,] 0.25392 [50,] -0.05956 [51,] -0.37304 [52,] -0.05956 [53,] 0.88088 [54,] 1.19436 [55,] 0.88088 [56,] -0.37304 [57,] -0.37304 [58,] -0.37304 [59,] -0.68652 [60,] -0.68652 [61,] -0.05956 [62,] -0.68652 [63,] 0.56740 [64,] 1.50784 [65,] 1.19436 [66,] 0.56740 [67,] 0.56740 [68,] -0.68652 [69,] 0.88088 [70,] 0.56740 [71,] 0.56740 [72,] 0.25392 [73,] -0.68652 [74,] 0.56740 [75,] -1.00000 [76,] 0.88088 [77,] -1.00000 [78,] 0.25392 [79,] -0.05956 [80,] -0.37304 [81,] 0.88088 [82,] 0.56740 [83,] -0.37304 [84,] -0.68652 [85,] -0.05956 [86,] -0.68652 [87,] 1.19436 [88,] 0.25392 [89,] -0.37304 [90,] -0.05956 [91,] 0.25392 [92,] -0.37304 [93,] -0.68652 [94,] -0.37304 [95,] -0.37304 [96,] 0.56740 [97,] -0.37304 [98,] -0.05956 [99,] -0.05956 [100,] -0.05956 attr(,"class") [1] "maxim" > summary( a ) -------------------------------------------- BFGS maximisation Number of iterations: 29 Return code: 0 successful convergence Function value: -199.2 Estimates: estimate gradient [1,] 3.19 1.583e-05 -------------------------------------------- > # you would probably prefer mean(n) instead of that ;-) > # Note also that maxLik is better suited for Maximum Likelihood > > > ### logLik.maxLik > set.seed( 4 ) > ## ML estimation of exponential duration model: > t <- rexp(100, 2) > loglik <- function(theta) log(theta) - theta*t > gradlik <- function(theta) 1/theta - t > hesslik <- function(theta) -100/theta^2 > ## Estimate with analytic gradient and hessian > a <- maxLik(loglik, gradlik, hesslik, start=1) > printRounded( a ) $maximum [1] -25.05 $estimate [1] 2.116 $gradient [1] 0 $hessian [,1] [1,] -22.34 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 5 $type [1] "Newton-Raphson maximisation" $gradientObs [,1] [1,] 0.3868 [2,] -1.6794 [3,] 0.0386 [4,] 0.0713 [5,] 0.1590 [6,] 0.1052 [7,] 0.2482 [8,] 0.4473 [9,] 0.2179 [10,] 0.0540 [11,] -0.8675 [12,] 0.3286 [13,] 0.2702 [14,] 0.2581 [15,] 0.3028 [16,] -0.0520 [17,] 0.4428 [18,] 0.4055 [19,] -0.4474 [20,] -0.0334 [21,] 0.3506 [22,] -0.1508 [23,] -2.2973 [24,] 0.3887 [25,] -0.4441 [26,] 0.4434 [27,] 0.2769 [28,] -0.1512 [29,] 0.2267 [30,] 0.1922 [31,] -0.2164 [32,] -0.4273 [33,] -0.4157 [34,] 0.2782 [35,] -0.6370 [36,] 0.3945 [37,] 0.3441 [38,] -0.6203 [39,] 0.4578 [40,] 0.1672 [41,] 0.3538 [42,] -0.0653 [43,] 0.1477 [44,] 0.2827 [45,] -0.0152 [46,] 0.0799 [47,] 0.2744 [48,] 0.4523 [49,] -1.1449 [50,] 0.4053 [51,] -0.2277 [52,] 0.4333 [53,] 0.0814 [54,] -0.0811 [55,] -0.7399 [56,] 0.2072 [57,] 0.1135 [58,] 0.1192 [59,] 0.3430 [60,] 0.0932 [61,] 0.4402 [62,] -0.0730 [63,] -0.5010 [64,] 0.0754 [65,] -0.1722 [66,] 0.0454 [67,] -0.0258 [68,] 0.1817 [69,] 0.4480 [70,] -0.1601 [71,] 0.4398 [72,] 0.2483 [73,] 0.4031 [74,] -0.1907 [75,] -0.4727 [76,] -0.0651 [77,] -0.4552 [78,] 0.1595 [79,] 0.3768 [80,] 0.1216 [81,] 0.3019 [82,] -0.0012 [83,] 0.4141 [84,] 0.4010 [85,] 0.3493 [86,] -0.9970 [87,] 0.3787 [88,] 0.3850 [89,] -0.3168 [90,] 0.1926 [91,] 0.3287 [92,] -0.0422 [93,] 0.0606 [94,] -0.6449 [95,] -0.6326 [96,] -0.3563 [97,] -0.3240 [98,] 0.2205 [99,] -0.8326 [100,] 0.3611 attr(,"class") [1] "maxLik" "maxim" "list" > print( a ) Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -25.05 (1 free parameter(s)) Estimate(s): 2.116 > ## print log likelihood value > logLik( a ) [1] -25.05 > ## compare with log likelihood value of summary object > all.equal( logLik( a ), logLik( summary( a ) ) ) [1] TRUE > > > ### maxBHHH > set.seed( 6 ) > ## ML estimation of exponential duration model: > t <- rexp(100, 2) > ## Estimate with numeric gradient and hessian > a <- maxBHHH(loglik, start=1, print.level=2) ----- Initial parameters: ----- fcn value: -45.5 parameter initial gradient free [1,] 1 54.5 1 Condition number of the (active) hessian: 1 -----Iteration 1 ----- -----Iteration 2 ----- -----Iteration 3 ----- -----Iteration 4 ----- -----Iteration 5 ----- -----Iteration 6 ----- -------------- successive function values within tolerance limit 6 iterations estimate: 2.198 Function value: -21.25 > print( a ) $maximum [1] -21.25 $estimate [1] 2.198 $gradient [1] -4.775e-05 $hessian [,1] [1,] -18.42 attr(,"type") [1] "BHHH" $code [1] 2 $message [1] "successive function values within tolerance limit" $last.step NULL $fixed [1] FALSE $iterations [1] 6 $type [1] "BHHH maximisation" $gradientObs [,1] [1,] 0.34872 [2,] 0.36337 [3,] 0.14750 [4,] 0.27835 [5,] -0.60055 [6,] 0.31141 [7,] 0.42198 [8,] 0.18505 [9,] 0.09662 [10,] 0.43709 [11,] 0.27713 [12,] -0.32707 [13,] 0.25446 [14,] 0.41365 [15,] -0.34761 [16,] -0.10404 [17,] 0.35988 [18,] 0.43321 [19,] -0.24284 [20,] 0.40754 [21,] 0.43446 [22,] 0.21306 [23,] -0.72492 [24,] 0.16847 [25,] -0.73113 [26,] 0.41303 [27,] 0.13127 [28,] 0.30142 [29,] 0.03316 [30,] -0.32514 [31,] 0.26619 [32,] 0.33719 [33,] -0.63494 [34,] 0.42639 [35,] 0.41133 [36,] 0.21917 [37,] -0.23050 [38,] 0.42825 [39,] 0.43629 [40,] -0.49030 [41,] -0.86638 [42,] -0.05709 [43,] 0.17051 [44,] -0.06489 [45,] -0.04142 [46,] 0.21592 [47,] -0.27990 [48,] -0.04167 [49,] 0.44931 [50,] 0.28868 [51,] 0.38041 [52,] -0.29423 [53,] -0.12650 [54,] -0.52837 [55,] 0.05775 [56,] 0.39261 [57,] 0.41130 [58,] 0.21081 [59,] 0.43310 [60,] -0.11065 [61,] -1.08886 [62,] 0.28892 [63,] 0.41071 [64,] -0.57920 [65,] 0.37020 [66,] -0.10011 [67,] -0.31689 [68,] 0.31029 [69,] -1.05872 [70,] 0.17639 [71,] 0.37379 [72,] 0.02796 [73,] -0.46422 [74,] -0.65735 [75,] -0.11963 [76,] -0.08873 [77,] -0.35161 [78,] 0.09842 [79,] -0.14749 [80,] 0.36913 [81,] -0.23146 [82,] 0.18956 [83,] 0.18225 [84,] 0.12718 [85,] 0.44356 [86,] 0.28875 [87,] 0.38631 [88,] -0.96036 [89,] 0.45398 [90,] 0.27526 [91,] -0.13580 [92,] -0.19583 [93,] -0.24698 [94,] -0.81480 [95,] 0.17887 [96,] -1.18545 [97,] 0.41696 [98,] 0.38062 [99,] -1.16810 [100,] -0.63346 attr(,"class") [1] "maxim" "list" > summary(a) -------------------------------------------- BHHH maximisation Number of iterations: 6 Return code: 2 successive function values within tolerance limit Function value: -21.25 Estimates: estimate gradient [1,] 2.198 -4.775e-05 -------------------------------------------- > ## Estimate with analytic gradient > a <- maxBHHH(loglik, gradlik, start=1) > print( a ) $maximum [1] -21.25 $estimate [1] 2.198 $gradient [1] -4.775e-05 $hessian [,1] [1,] -18.42 attr(,"type") [1] "BHHH" $code [1] 2 $message [1] "successive function values within tolerance limit" $last.step NULL $fixed [1] FALSE $iterations [1] 6 $type [1] "BHHH maximisation" $gradientObs [,1] [1,] 0.34872 [2,] 0.36337 [3,] 0.14750 [4,] 0.27835 [5,] -0.60055 [6,] 0.31141 [7,] 0.42198 [8,] 0.18505 [9,] 0.09662 [10,] 0.43709 [11,] 0.27713 [12,] -0.32707 [13,] 0.25446 [14,] 0.41365 [15,] -0.34761 [16,] -0.10404 [17,] 0.35988 [18,] 0.43321 [19,] -0.24284 [20,] 0.40754 [21,] 0.43446 [22,] 0.21306 [23,] -0.72492 [24,] 0.16847 [25,] -0.73113 [26,] 0.41303 [27,] 0.13127 [28,] 0.30142 [29,] 0.03316 [30,] -0.32514 [31,] 0.26619 [32,] 0.33719 [33,] -0.63494 [34,] 0.42639 [35,] 0.41133 [36,] 0.21917 [37,] -0.23050 [38,] 0.42825 [39,] 0.43629 [40,] -0.49030 [41,] -0.86638 [42,] -0.05709 [43,] 0.17051 [44,] -0.06489 [45,] -0.04142 [46,] 0.21592 [47,] -0.27990 [48,] -0.04167 [49,] 0.44931 [50,] 0.28868 [51,] 0.38041 [52,] -0.29423 [53,] -0.12650 [54,] -0.52837 [55,] 0.05775 [56,] 0.39261 [57,] 0.41130 [58,] 0.21081 [59,] 0.43310 [60,] -0.11065 [61,] -1.08886 [62,] 0.28892 [63,] 0.41071 [64,] -0.57920 [65,] 0.37020 [66,] -0.10011 [67,] -0.31689 [68,] 0.31029 [69,] -1.05872 [70,] 0.17639 [71,] 0.37379 [72,] 0.02796 [73,] -0.46422 [74,] -0.65735 [75,] -0.11963 [76,] -0.08873 [77,] -0.35161 [78,] 0.09842 [79,] -0.14749 [80,] 0.36913 [81,] -0.23146 [82,] 0.18956 [83,] 0.18225 [84,] 0.12718 [85,] 0.44356 [86,] 0.28875 [87,] 0.38631 [88,] -0.96036 [89,] 0.45398 [90,] 0.27526 [91,] -0.13580 [92,] -0.19583 [93,] -0.24698 [94,] -0.81480 [95,] 0.17887 [96,] -1.18545 [97,] 0.41696 [98,] 0.38062 [99,] -1.16810 [100,] -0.63346 attr(,"class") [1] "maxim" "list" > summary(a) -------------------------------------------- BHHH maximisation Number of iterations: 6 Return code: 2 successive function values within tolerance limit Function value: -21.25 Estimates: estimate gradient [1,] 2.198 -4.775e-05 -------------------------------------------- > > > ### maxLik > set.seed( 7 ) > ## ML estimation of exponential duration model: > t <- rexp(100, 2) > ## Estimate with numeric gradient and hessian > a <- maxLik(loglik, start=1, print.level=2) ----- Initial parameters: ----- fcn value: -47.72 parameter initial gradient free [1,] 1 52.28 1 Condition number of the (active) hessian: 1 -----Iteration 1 ----- -----Iteration 2 ----- -----Iteration 3 ----- -----Iteration 4 ----- -----Iteration 5 ----- -------------- gradient close to zero 5 iterations estimate: 2.095 Function value: -26.03 > printRounded( a ) $maximum [1] -26.03 $estimate [1] 2.095 $gradient [1] 0 $hessian [,1] [1,] -22.76 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 5 $type [1] "Newton-Raphson maximisation" $gradientObs [,1] [1,] 0.4531 [2,] -0.3338 [3,] -0.3793 [4,] 0.0712 [5,] 0.2044 [6,] -0.8329 [7,] 0.1013 [8,] -1.6593 [9,] 0.3749 [10,] 0.4545 [11,] 0.0051 [12,] 0.1032 [13,] -0.4588 [14,] -0.4565 [15,] -0.1279 [16,] 0.3046 [17,] 0.1388 [18,] -0.0017 [19,] 0.2175 [20,] 0.0199 [21,] -0.7230 [22,] 0.1610 [23,] 0.2786 [24,] 0.2572 [25,] 0.2032 [26,] 0.3495 [27,] 0.2543 [28,] -0.1430 [29,] -0.3671 [30,] -0.4648 [31,] -0.1518 [32,] 0.0829 [33,] -0.4836 [34,] 0.4729 [35,] 0.2134 [36,] 0.2836 [37,] 0.4332 [38,] 0.3182 [39,] 0.0139 [40,] 0.0312 [41,] 0.1365 [42,] 0.3564 [43,] -0.2541 [44,] 0.4095 [45,] -0.0211 [46,] 0.1728 [47,] -0.2433 [48,] -0.8212 [49,] 0.2151 [50,] -0.0651 [51,] -0.2029 [52,] -0.0574 [53,] -1.6405 [54,] 0.4141 [55,] 0.4053 [56,] 0.4372 [57,] -0.0792 [58,] 0.1532 [59,] -0.4604 [60,] -0.1621 [61,] -0.0035 [62,] -0.0570 [63,] 0.4624 [64,] -0.5310 [65,] 0.2925 [66,] -0.1617 [67,] -1.1256 [68,] 0.3481 [69,] 0.4113 [70,] 0.1879 [71,] -0.4522 [72,] 0.1446 [73,] 0.4468 [74,] 0.0540 [75,] 0.2706 [76,] -0.0117 [77,] 0.2177 [78,] -0.3570 [79,] 0.3521 [80,] -0.8323 [81,] 0.1982 [82,] 0.4722 [83,] 0.2493 [84,] 0.1677 [85,] 0.2190 [86,] 0.1717 [87,] 0.3099 [88,] 0.4640 [89,] -0.4283 [90,] 0.4145 [91,] 0.0925 [92,] 0.2582 [93,] -0.4882 [94,] 0.4597 [95,] -0.8020 [96,] 0.4124 [97,] 0.0536 [98,] 0.1786 [99,] -0.1365 [100,] 0.1661 attr(,"class") [1] "maxLik" "maxim" "list" > print( a ) Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -26.03 (1 free parameter(s)) Estimate(s): 2.095 > summary(a) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -26.03 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.10 0.21 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > ## Estimate with analytic gradient and hessian > a <- maxLik(loglik, gradlik, hesslik, start=1) > printRounded( a ) $maximum [1] -26.03 $estimate [1] 2.095 $gradient [1] 0 $hessian [,1] [1,] -22.78 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 5 $type [1] "Newton-Raphson maximisation" $gradientObs [,1] [1,] 0.4531 [2,] -0.3338 [3,] -0.3793 [4,] 0.0712 [5,] 0.2044 [6,] -0.8329 [7,] 0.1013 [8,] -1.6593 [9,] 0.3749 [10,] 0.4545 [11,] 0.0051 [12,] 0.1032 [13,] -0.4588 [14,] -0.4565 [15,] -0.1279 [16,] 0.3046 [17,] 0.1388 [18,] -0.0017 [19,] 0.2175 [20,] 0.0199 [21,] -0.7230 [22,] 0.1610 [23,] 0.2786 [24,] 0.2572 [25,] 0.2032 [26,] 0.3495 [27,] 0.2543 [28,] -0.1430 [29,] -0.3671 [30,] -0.4648 [31,] -0.1518 [32,] 0.0829 [33,] -0.4836 [34,] 0.4729 [35,] 0.2134 [36,] 0.2836 [37,] 0.4332 [38,] 0.3182 [39,] 0.0139 [40,] 0.0312 [41,] 0.1365 [42,] 0.3564 [43,] -0.2541 [44,] 0.4095 [45,] -0.0211 [46,] 0.1728 [47,] -0.2433 [48,] -0.8212 [49,] 0.2151 [50,] -0.0651 [51,] -0.2029 [52,] -0.0574 [53,] -1.6405 [54,] 0.4141 [55,] 0.4053 [56,] 0.4372 [57,] -0.0792 [58,] 0.1532 [59,] -0.4604 [60,] -0.1621 [61,] -0.0035 [62,] -0.0570 [63,] 0.4624 [64,] -0.5310 [65,] 0.2925 [66,] -0.1617 [67,] -1.1256 [68,] 0.3481 [69,] 0.4113 [70,] 0.1879 [71,] -0.4522 [72,] 0.1446 [73,] 0.4468 [74,] 0.0540 [75,] 0.2706 [76,] -0.0117 [77,] 0.2177 [78,] -0.3570 [79,] 0.3521 [80,] -0.8323 [81,] 0.1982 [82,] 0.4722 [83,] 0.2493 [84,] 0.1677 [85,] 0.2190 [86,] 0.1717 [87,] 0.3099 [88,] 0.4640 [89,] -0.4283 [90,] 0.4145 [91,] 0.0925 [92,] 0.2582 [93,] -0.4882 [94,] 0.4597 [95,] -0.8020 [96,] 0.4124 [97,] 0.0536 [98,] 0.1786 [99,] -0.1365 [100,] 0.1661 attr(,"class") [1] "maxLik" "maxim" "list" > print( a ) Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -26.03 (1 free parameter(s)) Estimate(s): 2.095 > summary(a) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -26.03 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.10 0.21 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > > > ### maxNR > set.seed( 8 ) > ## ML estimation of exponential duration model: > t <- rexp(100, 2) > loglikSum <- function(theta) sum(log(theta) - theta*t) > ## Note the log-likelihood and gradient are summed over observations > gradlikSum <- function(theta) sum(1/theta - t) > ## Estimate with numeric gradient and Hessian > a <- maxNR(loglikSum, start=1, print.level=2) ----- Initial parameters: ----- fcn value: -46.49 parameter initial gradient free [1,] 1 53.51 1 Condition number of the (active) hessian: 1 -----Iteration 1 ----- -----Iteration 2 ----- -----Iteration 3 ----- -----Iteration 4 ----- -----Iteration 5 ----- -------------- gradient close to zero 5 iterations estimate: 2.151 Function value: -23.41 > print( a ) $maximum [1] -23.41 $estimate [1] 2.151 $gradient [1] -2.416e-07 $hessian [,1] [1,] -21.62 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 5 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > summary(a) -------------------------------------------- Newton-Raphson maximisation Number of iterations: 5 Return code: 1 gradient close to zero Function value: -23.41 Estimates: estimate gradient [1,] 2.151 -2.416e-07 -------------------------------------------- > ## You would probably prefer 1/mean(t) instead ;-) > ## Estimate with analytic gradient and Hessian > a <- maxNR(loglikSum, gradlikSum, hesslik, start=1) > print( a ) $maximum [1] -23.41 $estimate [1] 2.151 $gradient [1] 9.493e-08 $hessian [,1] [1,] -21.61 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 5 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > summary(a) -------------------------------------------- Newton-Raphson maximisation Number of iterations: 5 Return code: 1 gradient close to zero Function value: -23.41 Estimates: estimate gradient [1,] 2.151 9.493e-08 -------------------------------------------- > > > ### maximType > ## maximise two-dimensional exponential hat. Maximum is at c(2,1): > f <- function(a) exp(-(a[1] - 2)^2 - (a[2] - 1)^2) > m <- maxNR(f, start=c(0,0)) > print( m ) $maximum [1] 1 $estimate [1] 2 1 $gradient [1] 1.11e-10 0.00e+00 $hessian [,1] [,2] [1,] -2 0 [2,] 0 -2 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE FALSE $iterations [1] 7 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > summary(m) -------------------------------------------- Newton-Raphson maximisation Number of iterations: 7 Return code: 1 gradient close to zero Function value: 1 Estimates: estimate gradient [1,] 2 1.11e-10 [2,] 1 0.00e+00 -------------------------------------------- > maximType(m) [1] "Newton-Raphson maximisation" > ## Now use BFGS maximisation. > m <- maxBFGS(f, start=c(0,0)) > print( m ) $maximum [1] 1 $estimate [1] 2 1 $gradient [1] 1.088e-08 5.329e-09 $hessian [,1] [,2] [1,] -2 0 [2,] 0 -2 $code [1] 0 $message [1] "successful convergence " $last.step NULL $fixed [1] FALSE FALSE $iterations function 26 $type [1] "BFGS maximisation" $constraints NULL attr(,"class") [1] "maxim" > summary(m) -------------------------------------------- BFGS maximisation Number of iterations: 26 Return code: 0 successful convergence Function value: 1 Estimates: estimate gradient [1,] 2 1.088e-08 [2,] 1 5.329e-09 -------------------------------------------- > maximType(m) [1] "BFGS maximisation" > > ### Test maxNR with 0 iterations. Should perform no iterations > ### Request by Yves Croissant > f <- function(a) exp(-(a[1] - 2)^2 - (a[2] - 1)^2) > m0 <- maxNR(f, start=c(1.1, 2.1), iterlim=0) > summary(m0) -------------------------------------------- Newton-Raphson maximisation Number of iterations: 0 Return code: 4 Iteration limit exceeded. Function value: 0.1327 Estimates: estimate gradient [1,] 1.1 0.2388 [2,] 2.1 -0.2918 -------------------------------------------- > > ### nObs > set.seed( 10 ) > # Construct a simple OLS regression: > x1 <- runif(100) > x2 <- runif(100) > y <- 3 + 4*x1 + 5*x2 + rnorm(100) > m <- lm(y~x1+x2) # estimate it > nObs(m) [1] 100 > > > ### nParam > set.seed( 11 ) > # Construct a simple OLS regression: > x1 <- runif(100) > x2 <- runif(100) > y <- 3 + 4*x1 + 5*x2 + rnorm(100) > m <- lm(y~x1+x2) # estimate it > summary(m) Call: lm(formula = y ~ x1 + x2) Residuals: Min 1Q Median 3Q Max -2.3436 -0.5338 -0.0291 0.5501 2.6934 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 3.242 0.287 11.3 <2e-16 *** x1 3.974 0.395 10.1 <2e-16 *** x2 4.783 0.367 13.0 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.99 on 97 degrees of freedom Multiple R-squared: 0.702, Adjusted R-squared: 0.696 F-statistic: 114 on 2 and 97 DF, p-value: <2e-16 > nParam(m) # you get 3 [1] 3 > > > ### numericGradient > # A simple example with Gaussian bell > f0 <- function(t0) exp(-t0[1]^2 - t0[2]^2) > numericGradient(f0, c(1,2)) [,1] [,2] [1,] -0.01348 -0.02695 > numericHessian(f0, t0=c(1,2)) [,1] [,2] [1,] 0.01349 0.05390 [2,] 0.05390 0.09433 > # An example with the analytic gradient > gradf0 <- function(t0) -2*t0*f0(t0) > numericHessian(f0, gradf0, t0=c(1,2)) [,1] [,2] [1,] 0.01348 0.05390 [2,] 0.05390 0.09433 > # The results should be similar as in the previous case > # The central numeric derivatives have usually quite a high precision > compareDerivatives(f0, gradf0, t0=1:2) -------- compare derivatives -------- Note: analytic gradient is vector. Transforming into a matrix form Function value: [1] 0.006738 Dim of analytic gradient: 1 2 numeric : 1 2 t0 [1] 1 2 analytic gradient [,1] [,2] [1,] -0.01348 -0.02695 numeric gradient [,1] [,2] [1,] -0.01348 -0.02695 (anal-num)/(0.5*(abs(anal)+abs(num))) [,1] [,2] [1,] -2.764e-10 -5.108e-11 Max relative difference: 2.764e-10 -------- END of compare derivatives -------- > # The differenc is around 1e-10 > > > ### returnCode > ## maximise the exponential bell > f1 <- function(x) exp(-x^2) > a <- maxNR(f1, start=2) > print( a ) $maximum [1] 1 $estimate [1] 3.632e-10 $gradient [1] -6.661e-10 $hessian [,1] [1,] -2 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 4 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > returnCode(a) # should be success (1 or 2) [1] 1 > ## Now try to maximise log() function > f2 <- function(x) log(x) > a <- maxNR(f2, start=2) > print( a ) $maximum [1] 9.277 $estimate [1] 10685 $gradient [1] 9.359e-05 $hessian [,1] [1,] 0.001776 $code [1] 4 $message [1] "Iteration limit exceeded." $last.step NULL $fixed [1] FALSE $iterations [1] 150 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > returnCode(a) # should give a failure (4) [1] 4 > > > ### returnMessage > ## maximise the exponential bell > f1 <- function(x) exp(-x^2) > a <- maxNR(f1, start=2) > print( a ) $maximum [1] 1 $estimate [1] 3.632e-10 $gradient [1] -6.661e-10 $hessian [,1] [1,] -2 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 4 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > returnMessage(a) # should be success (1 or 2) [1] "gradient close to zero" > ## Now try to maximise log() function > f2 <- function(x) log(x) > a <- maxNR(f2, start=2) > print( a ) $maximum [1] 9.277 $estimate [1] 10685 $gradient [1] 9.359e-05 $hessian [,1] [1,] 0.001776 $code [1] 4 $message [1] "Iteration limit exceeded." $last.step NULL $fixed [1] FALSE $iterations [1] 150 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > returnMessage(a) # should give a failure (4) [1] "Iteration limit exceeded." > > > ### summary.maxLik > set.seed( 15 ) > ## ML estimation of exponential duration model: > t <- rexp(100, 2) > loglik <- function(theta) log(theta) - theta*t > gradlik <- function(theta) 1/theta - t > hesslik <- function(theta) -100/theta^2 > ## Estimate with numeric gradient and hessian > a <- maxLik(loglik, start=1, print.level=2) ----- Initial parameters: ----- fcn value: -41.56 parameter initial gradient free [1,] 1 58.44 1 Condition number of the (active) hessian: 1 -----Iteration 1 ----- -----Iteration 2 ----- -----Iteration 3 ----- -----Iteration 4 ----- -----Iteration 5 ----- -------------- gradient close to zero 5 iterations estimate: 2.406 Function value: -12.2 > printRounded( a ) $maximum [1] -12.2 $estimate [1] 2.406 $gradient [1] 0 $hessian [,1] [1,] -17.28 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 5 $type [1] "Newton-Raphson maximisation" $gradientObs [,1] [1,] 0.3135 [2,] -0.5577 [3,] 0.2884 [4,] 0.3276 [5,] 0.0842 [6,] -0.9615 [7,] 0.2695 [8,] 0.4065 [9,] 0.2090 [10,] 0.2605 [11,] 0.3667 [12,] 0.1119 [13,] 0.1223 [14,] -0.1461 [15,] -1.1668 [16,] -0.6755 [17,] -0.0199 [18,] 0.0283 [19,] -0.7287 [20,] 0.2571 [21,] 0.0508 [22,] -0.1185 [23,] -0.0454 [24,] 0.0717 [25,] -1.8597 [26,] 0.2489 [27,] 0.2015 [28,] 0.1471 [29,] 0.3296 [30,] 0.2877 [31,] 0.1395 [32,] 0.0978 [33,] 0.0220 [34,] -0.0746 [35,] 0.2417 [36,] 0.1414 [37,] -0.0928 [38,] 0.0826 [39,] 0.1798 [40,] -0.2406 [41,] 0.2347 [42,] 0.3251 [43,] -0.3101 [44,] 0.2743 [45,] 0.1506 [46,] 0.3594 [47,] -0.1660 [48,] 0.1167 [49,] 0.4114 [50,] -0.8556 [51,] 0.3692 [52,] 0.0118 [53,] 0.0907 [54,] -0.4185 [55,] 0.1627 [56,] 0.3810 [57,] -0.2409 [58,] 0.3932 [59,] 0.1873 [60,] -0.0696 [61,] -0.5260 [62,] 0.3675 [63,] 0.2171 [64,] 0.2191 [65,] 0.2576 [66,] 0.3980 [67,] 0.2982 [68,] -0.0309 [69,] -0.0999 [70,] -0.6567 [71,] 0.0518 [72,] -0.6713 [73,] 0.3240 [74,] -0.7522 [75,] 0.2094 [76,] -1.0502 [77,] 0.3936 [78,] -0.1305 [79,] -1.3492 [80,] -0.0502 [81,] 0.2368 [82,] -0.0170 [83,] 0.1539 [84,] 0.2750 [85,] 0.1577 [86,] -0.4297 [87,] 0.2148 [88,] 0.4143 [89,] 0.2181 [90,] 0.2211 [91,] -0.0725 [92,] 0.2698 [93,] -0.0696 [94,] 0.3083 [95,] -0.1782 [96,] 0.1525 [97,] 0.1850 [98,] 0.0750 [99,] 0.3648 [100,] 0.0938 attr(,"class") [1] "maxLik" "maxim" "list" > print( a ) Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -12.2 (1 free parameter(s)) Estimate(s): 2.406 > summary(a) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -12.2 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.406 0.241 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > ## Estimate with analytic gradient and hessian > a <- maxLik(loglik, gradlik, hesslik, start=1) > printRounded( a ) $maximum [1] -12.2 $estimate [1] 2.406 $gradient [1] 0 $hessian [,1] [1,] -17.27 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 6 $type [1] "Newton-Raphson maximisation" $gradientObs [,1] [1,] 0.3135 [2,] -0.5577 [3,] 0.2884 [4,] 0.3276 [5,] 0.0842 [6,] -0.9615 [7,] 0.2695 [8,] 0.4065 [9,] 0.2090 [10,] 0.2605 [11,] 0.3667 [12,] 0.1119 [13,] 0.1223 [14,] -0.1461 [15,] -1.1668 [16,] -0.6755 [17,] -0.0199 [18,] 0.0283 [19,] -0.7287 [20,] 0.2571 [21,] 0.0508 [22,] -0.1185 [23,] -0.0454 [24,] 0.0717 [25,] -1.8597 [26,] 0.2489 [27,] 0.2015 [28,] 0.1471 [29,] 0.3296 [30,] 0.2877 [31,] 0.1395 [32,] 0.0978 [33,] 0.0220 [34,] -0.0746 [35,] 0.2417 [36,] 0.1414 [37,] -0.0928 [38,] 0.0826 [39,] 0.1798 [40,] -0.2406 [41,] 0.2347 [42,] 0.3251 [43,] -0.3101 [44,] 0.2743 [45,] 0.1506 [46,] 0.3594 [47,] -0.1660 [48,] 0.1167 [49,] 0.4114 [50,] -0.8556 [51,] 0.3692 [52,] 0.0118 [53,] 0.0907 [54,] -0.4185 [55,] 0.1627 [56,] 0.3810 [57,] -0.2409 [58,] 0.3932 [59,] 0.1873 [60,] -0.0696 [61,] -0.5260 [62,] 0.3675 [63,] 0.2171 [64,] 0.2191 [65,] 0.2576 [66,] 0.3980 [67,] 0.2982 [68,] -0.0309 [69,] -0.0999 [70,] -0.6567 [71,] 0.0518 [72,] -0.6713 [73,] 0.3240 [74,] -0.7522 [75,] 0.2094 [76,] -1.0502 [77,] 0.3936 [78,] -0.1305 [79,] -1.3492 [80,] -0.0502 [81,] 0.2368 [82,] -0.0170 [83,] 0.1539 [84,] 0.2750 [85,] 0.1577 [86,] -0.4297 [87,] 0.2148 [88,] 0.4143 [89,] 0.2181 [90,] 0.2211 [91,] -0.0725 [92,] 0.2698 [93,] -0.0696 [94,] 0.3083 [95,] -0.1782 [96,] 0.1525 [97,] 0.1850 [98,] 0.0750 [99,] 0.3648 [100,] 0.0938 attr(,"class") [1] "maxLik" "maxim" "list" > print( a ) Maximum Likelihood estimation Newton-Raphson maximisation, 6 iterations Return code 1: gradient close to zero Log-Likelihood: -12.2 (1 free parameter(s)) Estimate(s): 2.406 > summary(a) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 6 iterations Return code 1: gradient close to zero Log-Likelihood: -12.2 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.406 0.241 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > > > ### summary.maxim and for "gradient"/"hessian" attributes > ### Test for infinity > ## maximize a 2D quadratic function: > f <- function(b) { + x <- b[1]; y <- b[2]; + val <- (x - 2)^2 + (y - 3)^2 + attr(val, "gradient") <- c(2*x - 4, 2*y - 6) + attr(val, "hessian") <- matrix(c(2, 0, 0, 2), 2, 2) + val + } > ## Use c(0,0) as initial value. > result1 <- maxNR( f, start = c(0,0) ) > print( result1 ) $maximum [1] Inf $estimate [1] -7.035e+155 -1.055e+156 $gradient [1] -1.407e+156 -2.110e+156 $hessian [,1] [,2] [1,] 2 0 [2,] 0 2 $code [1] 5 $message [1] "Infinite value" $last.step NULL $fixed [1] FALSE FALSE $iterations [1] 25 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > summary( result1 ) -------------------------------------------- Newton-Raphson maximisation Number of iterations: 25 Return code: 5 Infinite value Function value: Inf Estimates: estimate gradient [1,] -7.035e+155 -1.407e+156 [2,] -1.055e+156 -2.110e+156 -------------------------------------------- > ## Now use c(1000000, -777777) as initial value and ask for hessian > result2 <- maxNR( f, start = c( 1000000, -777777)) > print( result2 ) $maximum [1] Inf $estimate [1] 2.110e+155 -1.641e+155 $gradient [1] 4.221e+155 -3.283e+155 $hessian [,1] [,2] [1,] 2 0 [2,] 0 2 $code [1] 5 $message [1] "Infinite value" $last.step NULL $fixed [1] FALSE FALSE $iterations [1] 24 $type [1] "Newton-Raphson maximisation" attr(,"class") [1] "maxim" "list" > summary( result2 ) -------------------------------------------- Newton-Raphson maximisation Number of iterations: 24 Return code: 5 Infinite value Function value: Inf Estimates: estimate gradient [1,] 2.110e+155 4.221e+155 [2,] -1.641e+155 -3.283e+155 -------------------------------------------- > > > ### Test for "gradient"/"hessian" attributes. A case which converges. > hub <- function(x) { + v <- exp(-sum(x*x)) + val <- v + attr(val, "gradient") <- -2*x*v + attr(val, "hessian") <- 4*(x %*% t(x))*v - diag(2*c(v, v)) + val + } > summary(a <- maxNR(hub, start=c(2,1))) -------------------------------------------- Newton-Raphson maximisation Number of iterations: 7 Return code: 1 gradient close to zero Function value: 1 Estimates: estimate gradient [1,] -7.448e-18 1.490e-17 [2,] -3.724e-18 7.448e-18 -------------------------------------------- > ## Now test "gradient" attribute for BHHH/3-parameter probit > N <- 1000 > loglikProbit <- function( beta) { + xb <- x %*% beta + loglik <- ifelse(y == 0, + pnorm( xb, log=TRUE, lower.tail=FALSE), + pnorm( xb, log.p=TRUE)) + grad <- ifelse(y == 0, + -dnorm(xb)/pnorm(xb, lower.tail=FALSE), + dnorm(xb)/pnorm(xb)) + grad <- grad*x + attr(loglik, "gradient") <- grad + loglik + } > x <- runif(N) > x <- cbind(x, x - runif(N), x - runif(N)) > y <- x[,1] + 2*x[,2] - x[,3] + rnorm(N) > 0 > summary(maxLik(loglikProbit, start=c(0,0,0), method="bhhh")) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 8 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -508.4 3 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.8578 0.0904 9.49 < 2e-16 *** [2,] 1.9389 0.1514 12.81 < 2e-16 *** [3,] -0.8253 0.1339 -6.16 7.2e-10 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > > > > ### vcov.maxLik > set.seed( 17 ) > ## ML estimation of exponential duration model: > t <- rexp(100, 2) > ## Estimate with numeric gradient and hessian > a <- maxLik(loglik, start=1, print.level=2) ----- Initial parameters: ----- fcn value: -53.67 parameter initial gradient free [1,] 1 46.33 1 Condition number of the (active) hessian: 1 -----Iteration 1 ----- -----Iteration 2 ----- -----Iteration 3 ----- -----Iteration 4 ----- -----Iteration 5 ----- -------------- gradient close to zero 5 iterations estimate: 1.863 Function value: -37.76 > printRounded( a ) $maximum [1] -37.76 $estimate [1] 1.863 $gradient [1] 0 $hessian [,1] [1,] -28.79 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 5 $type [1] "Newton-Raphson maximisation" $gradientObs [,1] [1,] -0.2767 [2,] 0.3953 [3,] 0.4979 [4,] -0.4840 [5,] -0.4049 [6,] 0.2567 [7,] -0.4323 [8,] 0.1893 [9,] 0.2066 [10,] 0.3285 [11,] 0.1665 [12,] 0.3936 [13,] 0.5104 [14,] 0.3261 [15,] 0.2335 [16,] 0.5046 [17,] 0.4612 [18,] 0.4608 [19,] -1.0643 [20,] 0.2375 [21,] -0.0465 [22,] 0.4734 [23,] -0.4004 [24,] 0.2348 [25,] 0.2846 [26,] 0.4029 [27,] -0.2378 [28,] 0.4411 [29,] 0.4821 [30,] 0.4955 [31,] -0.3652 [32,] 0.3878 [33,] -0.4067 [34,] -0.1809 [35,] 0.4185 [36,] -0.3304 [37,] -0.2404 [38,] -0.4153 [39,] 0.4612 [40,] -3.8930 [41,] 0.0334 [42,] -0.6293 [43,] 0.4325 [44,] 0.0365 [45,] 0.2462 [46,] -0.2268 [47,] 0.5306 [48,] 0.5160 [49,] -0.6772 [50,] 0.1526 [51,] 0.2219 [52,] 0.4363 [53,] -1.5633 [54,] -0.0502 [55,] 0.3124 [56,] -0.1465 [57,] -0.3567 [58,] 0.4810 [59,] 0.3985 [60,] 0.4233 [61,] 0.3301 [62,] 0.3526 [63,] -0.3018 [64,] 0.4708 [65,] 0.2910 [66,] 0.1197 [67,] 0.5074 [68,] -0.2495 [69,] -0.0505 [70,] 0.2800 [71,] 0.5255 [72,] -0.3740 [73,] 0.0235 [74,] -0.4133 [75,] -0.3585 [76,] 0.4766 [77,] 0.2416 [78,] -0.0411 [79,] -1.3477 [80,] -0.2567 [81,] 0.3243 [82,] 0.3452 [83,] -0.9342 [84,] 0.3324 [85,] -0.8498 [86,] 0.3158 [87,] 0.4752 [88,] 0.3726 [89,] -0.0253 [90,] 0.3766 [91,] -2.1083 [92,] -0.4496 [93,] 0.1682 [94,] 0.3510 [95,] 0.5254 [96,] -0.0662 [97,] 0.3878 [98,] 0.2535 [99,] -0.7967 [100,] 0.1333 attr(,"class") [1] "maxLik" "maxim" "list" > print( a ) Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -37.76 (1 free parameter(s)) Estimate(s): 1.863 > vcov(a) [,1] [1,] 0.03473 > ## Estimate with analytic gradient and hessian > a <- maxLik(loglik, gradlik, hesslik, start=1) > printRounded( a ) $maximum [1] -37.76 $estimate [1] 1.863 $gradient [1] 0 $hessian [,1] [1,] -28.8 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 5 $type [1] "Newton-Raphson maximisation" $gradientObs [,1] [1,] -0.2767 [2,] 0.3953 [3,] 0.4979 [4,] -0.4840 [5,] -0.4049 [6,] 0.2567 [7,] -0.4323 [8,] 0.1893 [9,] 0.2066 [10,] 0.3285 [11,] 0.1665 [12,] 0.3936 [13,] 0.5104 [14,] 0.3261 [15,] 0.2335 [16,] 0.5046 [17,] 0.4612 [18,] 0.4608 [19,] -1.0643 [20,] 0.2375 [21,] -0.0465 [22,] 0.4734 [23,] -0.4004 [24,] 0.2348 [25,] 0.2846 [26,] 0.4029 [27,] -0.2378 [28,] 0.4411 [29,] 0.4821 [30,] 0.4955 [31,] -0.3652 [32,] 0.3878 [33,] -0.4067 [34,] -0.1809 [35,] 0.4185 [36,] -0.3304 [37,] -0.2404 [38,] -0.4153 [39,] 0.4612 [40,] -3.8930 [41,] 0.0334 [42,] -0.6293 [43,] 0.4325 [44,] 0.0365 [45,] 0.2462 [46,] -0.2268 [47,] 0.5306 [48,] 0.5160 [49,] -0.6772 [50,] 0.1526 [51,] 0.2219 [52,] 0.4363 [53,] -1.5633 [54,] -0.0502 [55,] 0.3124 [56,] -0.1465 [57,] -0.3567 [58,] 0.4810 [59,] 0.3985 [60,] 0.4233 [61,] 0.3301 [62,] 0.3526 [63,] -0.3018 [64,] 0.4708 [65,] 0.2910 [66,] 0.1197 [67,] 0.5074 [68,] -0.2495 [69,] -0.0505 [70,] 0.2800 [71,] 0.5255 [72,] -0.3740 [73,] 0.0235 [74,] -0.4133 [75,] -0.3585 [76,] 0.4766 [77,] 0.2416 [78,] -0.0411 [79,] -1.3477 [80,] -0.2567 [81,] 0.3243 [82,] 0.3452 [83,] -0.9342 [84,] 0.3324 [85,] -0.8498 [86,] 0.3158 [87,] 0.4752 [88,] 0.3726 [89,] -0.0253 [90,] 0.3766 [91,] -2.1083 [92,] -0.4496 [93,] 0.1682 [94,] 0.3510 [95,] 0.5254 [96,] -0.0662 [97,] 0.3878 [98,] 0.2535 [99,] -0.7967 [100,] 0.1333 attr(,"class") [1] "maxLik" "maxim" "list" > print( a ) Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -37.76 (1 free parameter(s)) Estimate(s): 1.863 > vcov(a) [,1] [1,] 0.03472 > print(stdEr(a)) [1] 0.1863 > # test single stdEr > > proc.time() user system elapsed 0.580 0.040 0.606 maxLik/tests/finalHessian.Rout.save0000644000176000001440000002652712215577101017122 0ustar ripleyusers R version 3.0.1 (2013-05-16) -- "Good Sport" Copyright (C) 2013 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ### Test the 'finalHessian' argument of optimization routines > > library(maxLik) Loading required package: miscTools Please cite the 'maxLik' package as: Henningsen, Arne and Toomet, Ott (2011). maxLik: A package for maximum likelihood estimation in R. Computational Statistics 26(3), 443-458. DOI 10.1007/s00180-010-0217-1. If you have questions, suggestions, or comments regarding the 'maxLik' package, please use a forum or 'tracker' at maxLik's R-Forge site: https://r-forge.r-project.org/projects/maxlik/ > set.seed( 4 ) > > # log-likelihood function, gradient, and Hessian for 1-parameter case (exponential distribution) > ll1i <- function(theta) { + if(!all(theta > 0)) + return(NA) + log(theta) - theta*t + } > ll1 <- function(theta) sum( log(theta) - theta*t ) > gr1i <- function(theta) 1/theta - t > gr1 <- function(theta) sum( 1/theta - t ) > hs1 <- function(theta) -100/theta^2 > t <- rexp( 100, 2 ) > > ## the same functions for 2-variable case (normal distribution) > ll2 <- function( param ) { + ## log likelihood function + mu <- param[ 1 ] + sigma <- param[ 2 ] + if(!(sigma > 0)) + return(NA) + # to avoid warnings in the output + N <- length( x ) + llValue <- -0.5 * N * log( 2 * pi ) - N * log( sigma ) - + 0.5 * sum( ( x - mu )^2 / sigma^2 ) + return( llValue ) + } > > ## log likelihood function (individual observations) > ll2i <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + if(!(sigma > 0)) + return(NA) + # to avoid warnings in the output + llValues <- -0.5 * log( 2 * pi ) - log( sigma ) - + 0.5 * ( x - mu )^2 / sigma^2 + return( llValues ) + } > > gr2 <- function( param ) { + ## function to calculate analytical gradients + mu <- param[ 1 ] + sigma <- param[ 2 ] + N <- length( x ) + llGrad <- c( sum( ( x - mu ) / sigma^2 ), + - N / sigma + sum( ( x - mu )^2 / sigma^3 ) ) + return( llGrad ) + } > > ## function to calculate analytical gradients (individual observations) > gr2i <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + llGrads <- cbind( ( x - mu ) / sigma^2, + - 1 / sigma + ( x - mu )^2 / sigma^3 ) + return( llGrads ) + } > > ## function to calculate analytical Hessians > hs2 <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + N <- length( x ) + llHess <- matrix( c( + N * ( - 1 / sigma^2 ), + sum( - 2 * ( x - mu ) / sigma^3 ), + sum( - 2 * ( x - mu ) / sigma^3 ), + N / sigma^2 + sum( - 3 * ( x - mu )^2 / sigma^4 ) ), + nrow = 2, ncol = 2 ) + return( llHess ) + } > x <- rnorm(100, 1, 2) > > > ## NR > # Estimate with only function values (single parameter) > a <- maxLik( ll1i, gr1i, start = 1, method = "NR" ) > summary(a ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -25.05386 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.11586 0.21159 10 < 2.2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > b <- maxLik( ll1i, gr1i, start = 1, method = "NR", finalHessian="bhhh") > # should issue a warning as BHHH not possible > summary(b ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -25.05386 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.11586 0.21453 9.8629 < 2.2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > c <- maxLik( ll1i, gr1i, start = 1, method = "NR", finalHessian=FALSE) > summary(c) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -25.05386 1 free parameters Estimates: Estimate t value Pr(> t) [1,] 2.1159 NA NA -------------------------------------------- > ## (vector parameter) > a <- maxLik( ll2, gr2, start = c(0,1), method = "NR" ) > summary(a ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -212.7524 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.85317 0.20311 4.2005 2.663e-05 *** [2,] 2.03112 0.14362 14.1421 < 2.2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > b <- maxLik( ll2, gr2, start = c(0,1), method = "NR", finalHessian="bhhh") Warning message: In maxNRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, hessOrig = hess, : For computing the final Hessian by 'BHHH' method, the log-likelihood or gradient must be supplied by observations > # should issue a warning as BHHH not possible > summary(b ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -212.7524 2 free parameters Estimates: Estimate t value Pr(> t) [1,] 0.85317 NA NA [2,] 2.03112 NA NA -------------------------------------------- > c <- maxLik( ll2, gr2, start = c(0,1), method = "NR", finalHessian=FALSE) > summary(c) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -212.7524 2 free parameters Estimates: Estimate t value Pr(> t) [1,] 0.85317 NA NA [2,] 2.03112 NA NA -------------------------------------------- > > ## BFGSR > # Estimate with only function values (single parameter) > a <- maxLik( ll1i, gr1i, start = 1, method = "BFGSR" ) > summary(a ) -------------------------------------------- Maximum Likelihood estimation BFGSR maximization, 26 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -25.05386 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.11587 0.21159 10 < 2.2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > b <- maxLik( ll1i, gr1i, start = 1, method = "BFGSR", finalHessian="bhhh") > # should issue a warning as BHHH not possible > summary(b ) -------------------------------------------- Maximum Likelihood estimation BFGSR maximization, 26 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -25.05386 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.11587 0.21453 9.8629 < 2.2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > c <- maxLik( ll1i, gr1i, start = 1, method = "BFGSR", finalHessian=FALSE) > summary(c) -------------------------------------------- Maximum Likelihood estimation BFGSR maximization, 26 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -25.05386 1 free parameters Estimates: Estimate t value Pr(> t) [1,] 2.1159 NA NA -------------------------------------------- > # Estimate with only function values (vector parameter) > a <- maxLik( ll2, gr2, start = c(0,1), method = "BFGSR" ) > summary(a ) -------------------------------------------- Maximum Likelihood estimation BFGSR maximization, 22 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -212.7524 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.85282 0.20309 4.1992 2.679e-05 *** [2,] 2.03094 0.14359 14.1440 < 2.2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > b <- maxLik( ll2, gr2, start = c(0,1), method = "BFGSR", finalHessian="bhhh") Warning message: In maxBFGSRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, : For computing the final Hessian by 'BHHH' method, the log-likelihood or gradient must be supplied by observations > # should issue a warning as BHHH not possible > summary(b ) -------------------------------------------- Maximum Likelihood estimation BFGSR maximization, 22 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -212.7524 2 free parameters Estimates: Estimate t value Pr(> t) [1,] 0.85282 NA NA [2,] 2.03094 NA NA -------------------------------------------- > c <- maxLik( ll2, gr2, start = c(0,1), method = "BFGSR", finalHessian=FALSE) > summary(c) -------------------------------------------- Maximum Likelihood estimation BFGSR maximization, 22 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -212.7524 2 free parameters Estimates: Estimate t value Pr(> t) [1,] 0.85282 NA NA [2,] 2.03094 NA NA -------------------------------------------- > > > ### Nelder-Mead > ## Individual observations only > b <- maxLik( ll2i, start = c(0,1), method = "NM", finalHessian="bhhh") > summary(b) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 63 iterations Return code 0: successful convergence Log-Likelihood: -212.7524 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.85301 0.20316 4.1986 2.685e-05 *** [2,] 2.03121 0.16700 12.1629 < 2.2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > ## Individual observations, summed gradient > b <- maxLik( ll2i, gr2, start = c(0,1), method = "NM", finalHessian="bhhh") Warning message: In maxOptim(fn = fn, grad = grad, hess = hess, start = start, method = "Nelder-Mead", : For computing the final Hessian by 'BHHH' method, the log-likelihood or gradient must be supplied by observations > # should issue a warning as BHHH not selected > # (yes, could do it based on individual likelihood and numeric gradient) > summary(b) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 63 iterations Return code 0: successful convergence Log-Likelihood: -212.7524 2 free parameters Estimates: Estimate t value Pr(> t) [1,] 0.85301 NA NA [2,] 2.03121 NA NA -------------------------------------------- > > proc.time() user system elapsed 0.332 0.036 0.354 maxLik/tests/numericGradient.Rout.save0000644000176000001440000000330312215600705017616 0ustar ripleyusers R version 3.0.1 (2013-05-16) -- "Good Sport" Copyright (C) 2013 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > > ### test numeric methods, in particular handling of unequal > ### function lengths > library(maxLik) Loading required package: miscTools Please cite the 'maxLik' package as: Henningsen, Arne and Toomet, Ott (2011). maxLik: A package for maximum likelihood estimation in R. Computational Statistics 26(3), 443-458. DOI 10.1007/s00180-010-0217-1. If you have questions, suggestions, or comments regarding the 'maxLik' package, please use a forum or 'tracker' at maxLik's R-Forge site: https://r-forge.r-project.org/projects/maxlik/ > > f <- function(x) { + if(x[1] <= 0) + return(NA) + # support of x[1] is (0, Inf) + return(c(log(x[1]),x[2])) + } > > ng <- numericGradient(f, c(0.01,1), eps=0.1) Warning message: In numericGradient(f, c(0.01, 1), eps = 0.1) : Function value at -0.04 1.00 = NA (length 1) does not conform with the length at original value 2 Component 1 set to NA > > nh <- try(numericHessian(f, t0=c(0.01,1), eps=0.1)) There were 13 warnings (use warnings() to see them) > > proc.time() user system elapsed 0.188 0.016 0.192 maxLik/tests/fitExpDist.Rout.save0000644000176000001440000004067112231233051016564 0ustar ripleyusers R version 3.0.2 (2013-09-25) -- "Frisbee Sailing" Copyright (C) 2013 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ## load the maxLik package > library( maxLik ) Loading required package: miscTools Please cite the 'maxLik' package as: Henningsen, Arne and Toomet, Ott (2011). maxLik: A package for maximum likelihood estimation in R. Computational Statistics 26(3), 443-458. DOI 10.1007/s00180-010-0217-1. If you have questions, suggestions, or comments regarding the 'maxLik' package, please use a forum or 'tracker' at maxLik's R-Forge site: https://r-forge.r-project.org/projects/maxlik/ > > ## fitting an exponential distribution by ML, > ## e.g. estimation of an exponential duration model > > # generate data > options(digits=4) > # less differences b/w different platforms > set.seed( 4 ) > t <- rexp( 100, 2 ) > > # log-likelihood function, gradient, and Hessian > loglik <- function(theta) log(theta) - theta*t > loglikSum <- function(theta) sum( log(theta) - theta*t ) > gradlik <- function(theta) 1/theta - t > gradlikSum <- function(theta) sum( 1/theta - t ) > hesslik <- function(theta) -100/theta^2 > > > ## NR estimation > # Estimate with only function values > ml <- maxLik( loglik, start = 1 ) > print( ml ) Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -25.05 (1 free parameter(s)) Estimate(s): 2.116 > summary( ml ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 5 iterations Return code 1: gradient close to zero Log-Likelihood: -25.05 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.116 0.212 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > nObs( ml ) [1] 100 > print.default( ml ) $maximum [1] -25.05 $estimate [1] 2.116 $gradient [1] -3.804e-07 $hessian [,1] [1,] -22.33 $code [1] 1 $message [1] "gradient close to zero" $last.step NULL $fixed [1] FALSE $iterations [1] 5 $type [1] "Newton-Raphson maximisation" $gradientObs [,1] [1,] 0.386821 [2,] -1.679351 [3,] 0.038568 [4,] 0.071298 [5,] 0.159047 [6,] 0.105192 [7,] 0.248215 [8,] 0.447271 [9,] 0.217946 [10,] 0.054046 [11,] -0.867528 [12,] 0.328583 [13,] 0.270226 [14,] 0.258113 [15,] 0.302820 [16,] -0.051988 [17,] 0.442844 [18,] 0.405509 [19,] -0.447366 [20,] -0.033385 [21,] 0.350565 [22,] -0.150789 [23,] -2.297263 [24,] 0.388691 [25,] -0.444123 [26,] 0.443408 [27,] 0.276873 [28,] -0.151173 [29,] 0.226692 [30,] 0.192216 [31,] -0.216352 [32,] -0.427312 [33,] -0.415672 [34,] 0.278199 [35,] -0.636970 [36,] 0.394517 [37,] 0.344061 [38,] -0.620260 [39,] 0.457767 [40,] 0.167204 [41,] 0.353776 [42,] -0.065341 [43,] 0.147748 [44,] 0.282721 [45,] -0.015243 [46,] 0.079882 [47,] 0.274372 [48,] 0.452304 [49,] -1.144889 [50,] 0.405281 [51,] -0.227730 [52,] 0.433252 [53,] 0.081373 [54,] -0.081126 [55,] -0.739939 [56,] 0.207183 [57,] 0.113523 [58,] 0.119193 [59,] 0.342990 [60,] 0.093240 [61,] 0.440175 [62,] -0.073023 [63,] -0.501037 [64,] 0.075379 [65,] -0.172200 [66,] 0.045447 [67,] -0.025803 [68,] 0.181707 [69,] 0.447989 [70,] -0.160098 [71,] 0.439822 [72,] 0.248287 [73,] 0.403098 [74,] -0.190733 [75,] -0.472651 [76,] -0.065058 [77,] -0.455150 [78,] 0.159506 [79,] 0.376819 [80,] 0.121606 [81,] 0.301921 [82,] -0.001157 [83,] 0.414118 [84,] 0.400994 [85,] 0.349289 [86,] -0.996985 [87,] 0.378741 [88,] 0.385031 [89,] -0.316836 [90,] 0.192621 [91,] 0.328718 [92,] -0.042173 [93,] 0.060584 [94,] -0.644872 [95,] -0.632560 [96,] -0.356327 [97,] -0.323979 [98,] 0.220529 [99,] -0.832596 [100,] 0.361129 attr(,"class") [1] "maxLik" "maxim" "list" > # log-likelihood value summed over all observations > mlSum <- maxLik( loglikSum, start = 1 ) > all.equal( mlSum[], ml[-11], tolerance = 1e-3 ) [1] TRUE > > # Estimate with analytic gradient > mlg <- maxLik( loglik, gradlik, start = 1 ) > nObs( mlg ) [1] 100 > all.equal( mlg, ml, tolerance = 1e-3 ) [1] TRUE > # gradient summed over all observations > mlgSum <- maxLik( loglikSum, gradlikSum, start = 1 ) > all.equal( mlgSum[], mlg[-11], tolerance = 1e-3 ) [1] TRUE > > # Estimate with analytic gradient and Hessian > mlgh <- maxLik( loglik, gradlik, hesslik, start = 1 ) > all.equal( mlgh, mlg, tolerance = 1e-3 ) [1] TRUE > > > ## BHHH estimation > # Estimate with only function values > mlBhhh <- maxLik( loglik, start = 1, method = "BHHH" ) > print( mlBhhh ) Maximum Likelihood estimation BHHH maximisation, 5 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -25.05 (1 free parameter(s)) Estimate(s): 2.116 > summary( mlBhhh ) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 5 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -25.05 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.116 0.215 9.86 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > nObs( mlBhhh ) [1] 100 > all.equal( mlBhhh[ -c( 5, 6, 10 ) ], ml[ -c( 5, 6, 10 ) ], + check.attributes = FALSE, tolerance = 1e-3 ) [1] "Component 4: Mean relative difference: 0.02778" > > # Estimate with analytic gradient > mlgBhhh <- maxLik( loglik, gradlik, start = 1, method = "BHHH" ) > nObs( mlgBhhh ) [1] 100 > all.equal( mlgBhhh, mlBhhh, tolerance = 1e-3 ) [1] TRUE > > # Estimate with analytic gradient and Hessian (unused during estimation) > mlghBhhh <- maxLik( loglik, gradlik, hesslik, start = 1, method = "BHHH" ) > all.equal( mlghBhhh, mlgBhhh, tolerance = 1e-3 ) [1] TRUE > > ## BFGS estimation > # Estimate with only function values > mlBfgs <- maxLik( loglik, start = 1, method = "BFGS" ) > print( mlBfgs ) Maximum Likelihood estimation BFGS maximisation, 14 iterations Return code 0: successful convergence Log-Likelihood: -25.05 (1 free parameter(s)) Estimate(s): 2.116 > summary( mlBfgs ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 14 iterations Return code 0: successful convergence Log-Likelihood: -25.05 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.116 0.212 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > nObs( mlBfgs ) [1] 100 > all.equal( mlBfgs[ -c( 5, 6, 9, 10, 11 ) ], ml[ -c( 5, 6, 9, 10 ) ], + tolerance = 1e-3 ) [1] TRUE > # log-likelihood value summed over all observations > mlSumBfgs <- maxLik( loglikSum, start = 1, method = "BFGS" ) > all.equal( mlSumBfgs[], mlBfgs[-12], tolerance = 1e-3 ) [1] TRUE > > # Estimate with analytic gradient > mlgBfgs <- maxLik( loglik, gradlik, start = 1, method = "BFGS" ) > nObs( mlgBfgs ) [1] 100 > all.equal( mlgBfgs, mlBfgs, tolerance = 1e-3 ) [1] TRUE > # gradient summed over all observations > mlgSumBfgs <- maxLik( loglikSum, gradlikSum, start = 1, method = "BFGS" ) > all.equal( mlgSumBfgs[], mlgBfgs[-12], tolerance = 1e-3 ) [1] TRUE > > # Estimate with analytic gradient and Hessian (unused during estimation) > mlghBfgs <- maxLik( loglik, gradlik, hesslik, start = 1, method = "BFGS" ) > all.equal( mlghBfgs, mlgBfgs, tolerance = 1e-3 ) [1] TRUE > > > ## NM estimation > # Estimate with only function values > mlNm <- maxLik( loglik, start = 1, method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > print( mlNm ) Maximum Likelihood estimation Nelder-Mead maximisation, 28 iterations Return code 0: successful convergence Log-Likelihood: -25.05 (1 free parameter(s)) Estimate(s): 2.116 > summary( mlNm ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 28 iterations Return code 0: successful convergence Log-Likelihood: -25.05 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.116 0.212 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > nObs( mlNm ) [1] 100 > all.equal( mlNm[ -c( 5, 6, 9, 10, 11 ) ], ml[ -c( 5, 6, 9, 10 ) ], + tolerance = 1e-3 ) [1] "Component 3: Mean relative difference: 0.9999" > > # Estimate with analytic gradient (unused during estimation) > mlgNm <- maxLik( loglik, gradlik, start = 1, method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > nObs( mlgNm ) [1] 100 > all.equal( mlgNm, mlNm, tolerance = 1e-3 ) [1] TRUE > > # Estimate with analytic gradient and Hessian (both unused during estimation) > mlghNm <- maxLik( loglik, gradlik, hesslik, start = 1, method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlghNm, mlgNm, tolerance = 1e-3 ) [1] TRUE > > ## SANN estimation > # Estimate with only function values > mlSann <- maxLik( loglik, start = 1, method = "SANN" ) > print( mlSann ) Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -25.05 (1 free parameter(s)) Estimate(s): 2.116 > summary( mlSann ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -25.05 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.116 0.212 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > nObs( mlSann ) [1] 100 > all.equal( mlSann[ -c( 5, 6, 9, 10, 11 ) ], ml[ -c( 5, 6, 9, 10 ) ], + tolerance = 1e-3 ) [1] TRUE > > # Estimate with analytic gradient (unused during estimation) > mlgSann <- maxLik( loglik, gradlik, start = 1, method = "SANN" ) > nObs( mlgSann ) [1] 100 > all.equal( mlgSann, mlSann, tolerance = 1e-3 ) [1] TRUE > > # Estimate with analytic gradient and Hessian (both unused during estimation) > mlghSann <- maxLik( loglik, gradlik, hesslik, start = 1, method = "SANN" ) > all.equal( mlghSann, mlgSann, tolerance = 1e-3 ) [1] TRUE > > > ## CG estimation > # Estimate with only function values > mlCg <- maxLik( loglik, start = 1, method = "CG" ) > print(summary( mlCg)) -------------------------------------------- Maximum Likelihood estimation CG maximisation, 33 iterations Return code 0: successful convergence Log-Likelihood: -25.05 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.116 0.212 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > > # Estimate with analytic gradient > mlgCg <- maxLik( loglik, gradlik, start = 1, method = "CG" ) > print(summary( mlgCg)) -------------------------------------------- Maximum Likelihood estimation CG maximisation, 33 iterations Return code 0: successful convergence Log-Likelihood: -25.05 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.116 0.212 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > > # Estimate with analytic gradient and Hessian (not used for estimation) > mlghCg <- maxLik( loglik, gradlik, hesslik, start = 1, method = "CG" ) > print(summary( mlghCg)) -------------------------------------------- Maximum Likelihood estimation CG maximisation, 33 iterations Return code 0: successful convergence Log-Likelihood: -25.05 1 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 2.116 0.212 10 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > > > ## test for method "estfun" > library( sandwich ) > try( estfun( mlSum ) ) Error in estfun.maxLik(mlSum) : cannot return the gradients of the log-likelihood function evaluated at each observation: please re-run 'maxLik' and provide a gradient function using argument 'grad' or (if no gradient function is specified) a log-likelihood function using argument 'logLik' that return the gradients or log-likelihood values, respectively, at each observation > estfun( ml )[ 1:5, , drop = FALSE ] [,1] [1,] 0.38682 [2,] -1.67935 [3,] 0.03857 [4,] 0.07130 [5,] 0.15905 > estfun( mlg )[ 1:5, , drop = FALSE ] [,1] [1,] 0.38682 [2,] -1.67935 [3,] 0.03857 [4,] 0.07130 [5,] 0.15905 > estfun( mlBhhh )[ 1:5, , drop = FALSE ] [,1] [1,] 0.38682 [2,] -1.67935 [3,] 0.03857 [4,] 0.07130 [5,] 0.15905 > estfun( mlgBhhh )[ 1:5, , drop = FALSE ] [,1] [1,] 0.38682 [2,] -1.67935 [3,] 0.03857 [4,] 0.07130 [5,] 0.15905 > estfun( mlBfgs )[ 1:5, , drop = FALSE ] [,1] [1,] 0.38682 [2,] -1.67935 [3,] 0.03857 [4,] 0.07130 [5,] 0.15905 > estfun( mlgBfgs )[ 1:5, , drop = FALSE ] [,1] [1,] 0.38682 [2,] -1.67935 [3,] 0.03857 [4,] 0.07130 [5,] 0.15905 > estfun( mlNm )[ 1:5, , drop = FALSE ] [,1] [1,] 0.38679 [2,] -1.67939 [3,] 0.03853 [4,] 0.07126 [5,] 0.15901 > estfun( mlgNm )[ 1:5, , drop = FALSE ] [,1] [1,] 0.38679 [2,] -1.67939 [3,] 0.03853 [4,] 0.07126 [5,] 0.15901 > estfun( mlSann )[ 1:5, , drop = FALSE ] [,1] [1,] 0.38682 [2,] -1.67936 [3,] 0.03856 [4,] 0.07129 [5,] 0.15904 > estfun( mlgSann )[ 1:5, , drop = FALSE ] [,1] [1,] 0.38682 [2,] -1.67936 [3,] 0.03856 [4,] 0.07129 [5,] 0.15904 > > > ## test for method "bread" > try( bread( mlSum ) ) Error in nObs.maxLik(x) : cannot return the number of observations: please re-run 'maxLik' and provide a gradient function using argument 'grad' or (if no gradient function is specified) a log-likelihood function using argument 'logLik' that return the gradients or log-likelihood values, respectively, at each observation > bread( ml ) [,1] [1,] 4.478 > bread( mlg ) [,1] [1,] 4.477 > bread( mlBhhh ) [,1] [1,] 4.602 > bread( mlgBhhh ) [,1] [1,] 4.602 > bread( mlBfgs ) [,1] [1,] 4.475 > bread( mlgBfgs ) [,1] [1,] 4.477 > bread( mlNm ) [,1] [1,] 4.476 > bread( mlgNm ) [,1] [1,] 4.478 > bread( mlSann ) [,1] [1,] 4.478 > bread( mlgSann ) [,1] [1,] 4.477 > > > ## test for method "sandwich" > try( sandwich( mlSum ) ) Error in nObs.maxLik(x) : cannot return the number of observations: please re-run 'maxLik' and provide a gradient function using argument 'grad' or (if no gradient function is specified) a log-likelihood function using argument 'logLik' that return the gradients or log-likelihood values, respectively, at each observation > printSandwich <- function( x ) { + print( sandwich( x ) ) + print( all.equal( sandwich( x ), vcov( x ) ) ) + } > printSandwich( ml ) [,1] [1,] 0.04357 [1] "Mean relative difference: 0.02778" > printSandwich( mlg ) [,1] [1,] 0.04355 [1] "Mean relative difference: 0.02799" > printSandwich( mlBhhh ) [,1] [1,] 0.04602 [1] TRUE > printSandwich( mlgBhhh ) [,1] [1,] 0.04602 [1] TRUE > printSandwich( mlBfgs ) [,1] [1,] 0.04351 [1] "Mean relative difference: 0.02843" > printSandwich( mlgBfgs ) [,1] [1,] 0.04355 [1] "Mean relative difference: 0.02799" > printSandwich( mlNm ) [,1] [1,] 0.04353 [1] "Mean relative difference: 0.02827" > printSandwich( mlgNm ) [,1] [1,] 0.04356 [1] "Mean relative difference: 0.02784" > printSandwich( mlSann ) [,1] [1,] 0.04357 [1] "Mean relative difference: 0.02778" > printSandwich( mlgSann ) [,1] [1,] 0.04355 [1] "Mean relative difference: 0.02797" > > proc.time() user system elapsed 1.008 0.052 1.047 maxLik/tests/BFGSR.R0000644000176000001440000000317212222631144013657 0ustar ripleyusers### BFGSR-related tests ## 1. Test maximization algorithm for convex regions ## ## Optimize quadratic form t(D) %*% W %*% D with p.d. weight matrix ## (ie unbounded problems). ## All solutions should go to large values with a message about successful convergence set.seed(0) options(digits=4) quadForm <- function(D) { return( - t(D - (1:N) ) %*% W %*% ( D - (1:N) ) ) } N <- 3 # 3-dimensional case ## a) test quadratic function t(D) %*% D W <- diag(N) library(maxLik) D <- rep(1/N, N) res <- maxBFGSR(quadForm, start=D) summary(res) ## b) add noice to W <- diag(N) + matrix(runif(N*N), N, N) # diagonal weight matrix with some noise D <- rep(1/N, N) res <- maxBFGSR(quadForm, start=D) summary(res) ## Next, optimize hat function in non-concave region. Does not work well. hat <- function(param) { ## Hat function. Hessian negative definite if sqrt(x^2 + y^2) < 0.5 x <- param[1] y <- param[2] exp(-(x-2)^2 - (y-2)^2) } summary(hatNC <- maxBFGSR(hat, start=c(1,1), tol=0, reltol=0)) # should converge to c(0,0). ## Test BFGSR with fixed parameters and equality constraints ## Optimize 3D hat with one parameter fixed (== 2D hat). ## Add an equality constraint on that hat3 <- function(param) { ## Hat function. Hessian negative definite if sqrt((x-2)^2 + (y-2)^2) < 0.5 x <- param[1] y <- param[2] z <- param[3] exp(-(x-2)^2-(y-2)^2-(z-2)^2) } sv <- c(1,1,1) ## constraints: x + y + z = 8 A <- matrix(c(1,1,1), 1, 3) B <- -8 constraints <- list(eqA=A, eqB=B) summary(hat3CF <- maxBFGSR(hat3, start=sv, constraints=constraints, fixed=3)) maxLik/tests/fitGammaDist.Rout.save0000644000176000001440000002322312222502311017042 0ustar ripleyusers R version 3.0.2 (2013-09-25) -- "Frisbee Sailing" Copyright (C) 2013 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ## the idea and most commands were provided by Marco J. Maier, Institute for > ## Statistics and Mathematics, Vienna University of Economics and Business > > library(maxLik) Loading required package: miscTools Please cite the 'maxLik' package as: Henningsen, Arne and Toomet, Ott (2011). maxLik: A package for maximum likelihood estimation in R. Computational Statistics 26(3), 443-458. DOI 10.1007/s00180-010-0217-1. If you have questions, suggestions, or comments regarding the 'maxLik' package, please use a forum or 'tracker' at maxLik's R-Forge site: https://r-forge.r-project.org/projects/maxlik/ > options(warn = -1, digits = 4 ) > set.seed(5) > some_data <- rgamma(1e4, shape = 5, scale = 2) > > # log-likelihood function(s) > logLL <- function(x, X) # per observation for maxLik + dgamma(x = X, shape = exp(x[1]), scale = exp(x[2]), log = TRUE) > logLL_sum <- function(x, X) # negative sum for nlm() + -sum(dgamma(x = X, shape = exp(x[1]), scale = exp(x[2]), log = TRUE)) > > sum(logLL(log(c(5,2)),some_data)) [1] -28528 > logLL_sum(log(c(5,2)),some_data) [1] 28528 > all.equal( sum(logLL(log(c(5,2)),some_data)), -logLL_sum(log(c(5,2)),some_data)) [1] TRUE > > # gradient of log-likelihood function > d_logLL <- function(x, X){ # analytic 1. derivatives + cbind(shape=exp(x[1])*(-x[2]-psigamma(exp(x[1]),0)+log(X)), + scale= X / exp(x[2]) - exp(x[1])) + } > > d_logLLNum <- function(x, X){ + numericGradient( logLL, x, X = X ) + } > > colSums(d_logLL(log(c(5,2)),some_data)) shape scale 92.95 135.27 > colSums(d_logLLNum(log(c(5,2)),some_data)) [1] 92.95 135.27 > > all.equal( d_logLL(log(c(5,2)),some_data), d_logLLNum(log(c(5,2)),some_data), + check.attributes=FALSE) [1] TRUE > > # Hessian of log-likelihood function > dd_logLL <- function(x, X){ # analytic 2. derivatives + grad <- d_logLL( x, X ) + hessian <- matrix(0, 2, 2) + hessian[1,1] <- sum( grad[,1] - exp(x[1])^2 * psigamma(exp(x[1]), 1) ) + hessian[2,2] <- - sum( X / exp(x[2]) ) + hessian[cbind(c(2,1), c(1,2))] <- -exp(x[1]) * length(X) + return(hessian) + } > > dd_logLLNum <- function(x, X){ + numericHessian( function(x,X) sum(logLL(x,X)), t0=x, X = X ) + } > dd_logLLNumGrad <- function(x, X){ + numericHessian( function(x,X) sum(logLL(x,X)), + grad = function(x,X) colSums(d_logLL(x,X)), x, X = X ) + } > > dd_logLL(log(c(5,2)),some_data) [,1] [,2] [1,] -55238 -50000 [2,] -50000 -50135 > dd_logLLNum(log(c(5,2)),some_data) [,1] [,2] [1,] -55235 -50000 [2,] -50000 -50135 > all.equal(dd_logLL(log(c(5,2)),some_data), dd_logLLNum(log(c(5,2)),some_data)) [1] "Mean relative difference: 1.655e-05" > dd_logLLNumGrad(log(c(5,2)),some_data) [,1] [,2] shape -55238 -50000 scale -50000 -50135 > all.equal(dd_logLL(log(c(5,2)),some_data), dd_logLLNumGrad(log(c(5,2)),some_data), + check.attributes=FALSE) [1] TRUE > > # estimation with nlm() > t_nlm <- system.time( r_nlm <- nlm(logLL_sum, c(0,0), X=some_data, hessian=TRUE) ) > > # estimation with nlm() and gradients > logLL_grad <- function(x, X) { + result <- logLL_sum( x, X ) + attr( result, "gradient" ) <- - colSums( d_logLL( x, X ) ) + return( result ) + } > t_nlmg <- system.time( r_nlmg <- nlm(logLL_grad, c(0,0), X=some_data, hessian=TRUE) ) > > # estimation with nlm() and gradients and Hessian > logLL_hess <- function(x, X) { + result <- logLL_sum( x, X ) + attr( result, "gradient" ) <- - colSums( d_logLL( x, X ) ) + attr( result, "hessian" ) <- - dd_logLL( x, X ) + return( result ) + } > t_nlmgh <- system.time( r_nlmgh <- nlm(logLL_hess, c(0,0), X=some_data, hessian=TRUE) ) > > # estimation with optim() / BFGS > t_bfgs <- system.time( r_bfgs <- optim(c(0,0), logLL_sum, X=some_data, + method="BFGS", hessian=TRUE) ) > > # estimation with maxLik() / BFGS > t_bfgsM <- system.time( r_bfgsM <- maxLik( logLL, start = c(0,0), + method="BFGS", X=some_data ) ) > > # estimation with maxLik() / BFGS with gradients > t_bfgsMg <- system.time( r_bfgsMg <- maxLik( logLL, d_logLL, start = c(0,0), + method="BFGS", X=some_data ) ) > > # estimation with maxLik() / BHHH > t_bhhh <- system.time( r_bhhh <- maxLik( logLL, start = c(0,0), + method="BHHH", X=some_data ) ) > > # estimation with maxLik() / BHHH with gradients > t_bhhhg <- system.time( r_bhhhg <- maxLik( logLL, d_logLL, start = c(0,0), + method="BHHH", X=some_data ) ) > > # estimation with maxLik() / NR > t_NRn <- system.time( r_NRn <- maxLik( logLL, start = c(0,0), + method="NR", X=some_data ) ) > > # estimation with maxLik() / NR with gradients > t_NRg <- system.time( r_NRg <- maxLik( logLL, d_logLL, start = c(0,0), + method="NR", X=some_data ) ) > > # estimation with maxLik() / NR with gradients and Hessian > t_NRgh <- system.time( r_NRgh <- maxLik( logLL, d_logLL, dd_logLL, start = c(0,0), + method="NR", X=some_data ) ) > > # log likelihood values > rbind(NLM=-r_nlm$minimum, + NLM_grad=-r_nlmg$minimum, + NLM_gradHess=-r_nlmgh$minimum, + BFGS=-r_bfgs$value, + maxLikBfgs = logLik( r_bfgsM ), + maxLikBfgs_grad = logLik( r_bfgsMg ), + BHHH = logLik( r_bhhh ), + BHHH_grad = logLik( r_bhhhg ), + NR_numeric= logLik( r_NRn ), + NR_grad= logLik( r_NRg ), + NR_gradHess= logLik( r_NRgh ) ) [,1] NLM -28528 NLM_grad -28528 NLM_gradHess -28528 BFGS -28528 maxLikBfgs -28528 maxLikBfgs_grad -28528 BHHH -28528 BHHH_grad -28528 NR_numeric -28528 NR_grad -28528 NR_gradHess -28528 > > > # estimated coefficients > pp <- exp(rbind(NLM=r_nlm$estimate, + NLM_grad=r_nlmg$estimate, + NLM_gradHess=r_nlmgh$estimate, + BFGS=r_bfgs$par, + maxLikBfgs = coef( r_bfgsM ), + maxLikBfgs_grad = coef( r_bfgsMg ), + BHHH = coef( r_bhhh ), + BHHH_grad = coef( r_bhhhg ), + NR_numeric= coef( r_NRn ), + NR_grad= coef( r_NRg ), + NR_gradHess= coef( r_NRgh ) )) > colnames(pp) <- c("shape_alpha", "scale_theta") > pp shape_alpha scale_theta NLM 4.961 2.021 NLM_grad 4.961 2.021 NLM_gradHess 4.961 2.021 BFGS 4.961 2.021 maxLikBfgs 4.961 2.021 maxLikBfgs_grad 4.961 2.021 BHHH 4.961 2.021 BHHH_grad 4.961 2.021 NR_numeric 4.961 2.021 NR_grad 4.961 2.021 NR_gradHess 4.961 2.021 > > > # some Hessians > -100*round(r_nlm$hessian/100,0) [,1] [,2] [1,] -55000 -49600 [2,] -49600 -49600 > round(solve(r_nlm$hessian),5) [,1] [,2] [1,] 0.00019 -0.00019 [2,] -0.00019 0.00021 > > -100*round(r_nlmg$hessian/100,0) [,1] [,2] [1,] -55000 -49600 [2,] -49600 -49600 > round(solve(r_nlmg$hessian),5) [,1] [,2] [1,] 0.00019 -0.00019 [2,] -0.00019 0.00021 > > -100*round(r_nlmgh$hessian/100,0) [,1] [,2] [1,] -55000 -49600 [2,] -49600 -49600 > round(solve(r_nlmgh$hessian),5) [,1] [,2] [1,] 0.00019 -0.00019 [2,] -0.00019 0.00021 > > -100*round(r_bfgs$hessian/100,0) [,1] [,2] [1,] -54900 -49600 [2,] -49600 -49600 > round(solve(r_bfgs$hessian),5) [,1] [,2] [1,] 0.00019 -0.00019 [2,] -0.00019 0.00021 > > 100*round(r_NRn$hessian/100,0) [,1] [,2] [1,] -54900 -49600 [2,] -49600 -49600 > round(solve(-r_NRn$hessian),5) [,1] [,2] [1,] 0.00019 -0.00019 [2,] -0.00019 0.00021 > > 100*round(r_NRg$hessian/100,0) [,1] [,2] [1,] -54900 -49600 [2,] -49600 -49600 > round(solve(-r_NRg$hessian),5) [,1] [,2] [1,] 0.00019 -0.00019 [2,] -0.00019 0.00021 > > > # standard errors > se <- exp(rbind(NLM=sqrt(diag( solve(r_nlm$hessian) )), + NLM_grad=sqrt(diag( solve(r_nlmg$hessian) )), + NLM_gradHess=sqrt(diag( solve(r_nlmgh$hessian) )), + BFGS=sqrt(diag( solve(r_bfgs$hessian) )), + maxLikBfgs = stdEr( r_bfgsM ), + maxLikBfgs_grad = stdEr( r_bfgsMg ), + BHHH = stdEr( r_bhhh ), + BHHH_grad = stdEr( r_bhhhg ), + NR_numeric= stdEr( r_NRn ), + NR_grad= stdEr( r_NRg ), + NR_gradHess= stdEr( r_NRgh ) )) > colnames(se) <- c("shape_alpha", "scale_theta") > se shape_alpha scale_theta NLM 1.014 1.015 NLM_grad 1.014 1.015 NLM_gradHess 1.014 1.015 BFGS 1.014 1.015 maxLikBfgs 1.014 1.015 maxLikBfgs_grad 1.014 1.015 BHHH 1.014 1.015 BHHH_grad 1.014 1.015 NR_numeric 1.014 1.015 NR_grad 1.014 1.015 NR_gradHess 1.014 1.015 > > # execution times > tt <- rbind(t_nlm, t_nlmg, t_nlmgh, t_bfgs, t_bfgsM, t_bfgsMg, + t_bhhh, t_bhhhg, t_NRn, t_NRg, t_NRgh ) > # tt > > proc.time() user system elapsed 15.268 0.036 15.308 maxLik/tests/finalHessian.R0000644000176000001440000000772011643562144015434 0ustar ripleyusers### Test the 'finalHessian' argument of optimization routines library(maxLik) set.seed( 4 ) # log-likelihood function, gradient, and Hessian for 1-parameter case (exponential distribution) ll1i <- function(theta) { if(!all(theta > 0)) return(NA) log(theta) - theta*t } ll1 <- function(theta) sum( log(theta) - theta*t ) gr1i <- function(theta) 1/theta - t gr1 <- function(theta) sum( 1/theta - t ) hs1 <- function(theta) -100/theta^2 t <- rexp( 100, 2 ) ## the same functions for 2-variable case (normal distribution) ll2 <- function( param ) { ## log likelihood function mu <- param[ 1 ] sigma <- param[ 2 ] if(!(sigma > 0)) return(NA) # to avoid warnings in the output N <- length( x ) llValue <- -0.5 * N * log( 2 * pi ) - N * log( sigma ) - 0.5 * sum( ( x - mu )^2 / sigma^2 ) return( llValue ) } ## log likelihood function (individual observations) ll2i <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] if(!(sigma > 0)) return(NA) # to avoid warnings in the output llValues <- -0.5 * log( 2 * pi ) - log( sigma ) - 0.5 * ( x - mu )^2 / sigma^2 return( llValues ) } gr2 <- function( param ) { ## function to calculate analytical gradients mu <- param[ 1 ] sigma <- param[ 2 ] N <- length( x ) llGrad <- c( sum( ( x - mu ) / sigma^2 ), - N / sigma + sum( ( x - mu )^2 / sigma^3 ) ) return( llGrad ) } ## function to calculate analytical gradients (individual observations) gr2i <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] llGrads <- cbind( ( x - mu ) / sigma^2, - 1 / sigma + ( x - mu )^2 / sigma^3 ) return( llGrads ) } ## function to calculate analytical Hessians hs2 <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] N <- length( x ) llHess <- matrix( c( N * ( - 1 / sigma^2 ), sum( - 2 * ( x - mu ) / sigma^3 ), sum( - 2 * ( x - mu ) / sigma^3 ), N / sigma^2 + sum( - 3 * ( x - mu )^2 / sigma^4 ) ), nrow = 2, ncol = 2 ) return( llHess ) } x <- rnorm(100, 1, 2) ## NR # Estimate with only function values (single parameter) a <- maxLik( ll1i, gr1i, start = 1, method = "NR" ) summary(a ) b <- maxLik( ll1i, gr1i, start = 1, method = "NR", finalHessian="bhhh") # should issue a warning as BHHH not possible summary(b ) c <- maxLik( ll1i, gr1i, start = 1, method = "NR", finalHessian=FALSE) summary(c) ## (vector parameter) a <- maxLik( ll2, gr2, start = c(0,1), method = "NR" ) summary(a ) b <- maxLik( ll2, gr2, start = c(0,1), method = "NR", finalHessian="bhhh") # should issue a warning as BHHH not possible summary(b ) c <- maxLik( ll2, gr2, start = c(0,1), method = "NR", finalHessian=FALSE) summary(c) ## BFGSR # Estimate with only function values (single parameter) a <- maxLik( ll1i, gr1i, start = 1, method = "BFGSR" ) summary(a ) b <- maxLik( ll1i, gr1i, start = 1, method = "BFGSR", finalHessian="bhhh") # should issue a warning as BHHH not possible summary(b ) c <- maxLik( ll1i, gr1i, start = 1, method = "BFGSR", finalHessian=FALSE) summary(c) # Estimate with only function values (vector parameter) a <- maxLik( ll2, gr2, start = c(0,1), method = "BFGSR" ) summary(a ) b <- maxLik( ll2, gr2, start = c(0,1), method = "BFGSR", finalHessian="bhhh") # should issue a warning as BHHH not possible summary(b ) c <- maxLik( ll2, gr2, start = c(0,1), method = "BFGSR", finalHessian=FALSE) summary(c) ### Nelder-Mead ## Individual observations only b <- maxLik( ll2i, start = c(0,1), method = "NM", finalHessian="bhhh") summary(b) ## Individual observations, summed gradient b <- maxLik( ll2i, gr2, start = c(0,1), method = "NM", finalHessian="bhhh") # should issue a warning as BHHH not selected # (yes, could do it based on individual likelihood and numeric gradient) summary(b) maxLik/tests/fitExpDist.R0000644000176000001440000001274112215577540015114 0ustar ripleyusers## load the maxLik package library( maxLik ) ## fitting an exponential distribution by ML, ## e.g. estimation of an exponential duration model # generate data options(digits=4) # less differences b/w different platforms set.seed( 4 ) t <- rexp( 100, 2 ) # log-likelihood function, gradient, and Hessian loglik <- function(theta) log(theta) - theta*t loglikSum <- function(theta) sum( log(theta) - theta*t ) gradlik <- function(theta) 1/theta - t gradlikSum <- function(theta) sum( 1/theta - t ) hesslik <- function(theta) -100/theta^2 ## NR estimation # Estimate with only function values ml <- maxLik( loglik, start = 1 ) print( ml ) summary( ml ) nObs( ml ) print.default( ml ) # log-likelihood value summed over all observations mlSum <- maxLik( loglikSum, start = 1 ) all.equal( mlSum[], ml[-11], tolerance = 1e-3 ) # Estimate with analytic gradient mlg <- maxLik( loglik, gradlik, start = 1 ) nObs( mlg ) all.equal( mlg, ml, tolerance = 1e-3 ) # gradient summed over all observations mlgSum <- maxLik( loglikSum, gradlikSum, start = 1 ) all.equal( mlgSum[], mlg[-11], tolerance = 1e-3 ) # Estimate with analytic gradient and Hessian mlgh <- maxLik( loglik, gradlik, hesslik, start = 1 ) all.equal( mlgh, mlg, tolerance = 1e-3 ) ## BHHH estimation # Estimate with only function values mlBhhh <- maxLik( loglik, start = 1, method = "BHHH" ) print( mlBhhh ) summary( mlBhhh ) nObs( mlBhhh ) all.equal( mlBhhh[ -c( 5, 6, 10 ) ], ml[ -c( 5, 6, 10 ) ], check.attributes = FALSE, tolerance = 1e-3 ) # Estimate with analytic gradient mlgBhhh <- maxLik( loglik, gradlik, start = 1, method = "BHHH" ) nObs( mlgBhhh ) all.equal( mlgBhhh, mlBhhh, tolerance = 1e-3 ) # Estimate with analytic gradient and Hessian (unused during estimation) mlghBhhh <- maxLik( loglik, gradlik, hesslik, start = 1, method = "BHHH" ) all.equal( mlghBhhh, mlgBhhh, tolerance = 1e-3 ) ## BFGS estimation # Estimate with only function values mlBfgs <- maxLik( loglik, start = 1, method = "BFGS" ) print( mlBfgs ) summary( mlBfgs ) nObs( mlBfgs ) all.equal( mlBfgs[ -c( 5, 6, 9, 10, 11 ) ], ml[ -c( 5, 6, 9, 10 ) ], tolerance = 1e-3 ) # log-likelihood value summed over all observations mlSumBfgs <- maxLik( loglikSum, start = 1, method = "BFGS" ) all.equal( mlSumBfgs[], mlBfgs[-12], tolerance = 1e-3 ) # Estimate with analytic gradient mlgBfgs <- maxLik( loglik, gradlik, start = 1, method = "BFGS" ) nObs( mlgBfgs ) all.equal( mlgBfgs, mlBfgs, tolerance = 1e-3 ) # gradient summed over all observations mlgSumBfgs <- maxLik( loglikSum, gradlikSum, start = 1, method = "BFGS" ) all.equal( mlgSumBfgs[], mlgBfgs[-12], tolerance = 1e-3 ) # Estimate with analytic gradient and Hessian (unused during estimation) mlghBfgs <- maxLik( loglik, gradlik, hesslik, start = 1, method = "BFGS" ) all.equal( mlghBfgs, mlgBfgs, tolerance = 1e-3 ) ## NM estimation # Estimate with only function values mlNm <- maxLik( loglik, start = 1, method = "NM" ) print( mlNm ) summary( mlNm ) nObs( mlNm ) all.equal( mlNm[ -c( 5, 6, 9, 10, 11 ) ], ml[ -c( 5, 6, 9, 10 ) ], tolerance = 1e-3 ) # Estimate with analytic gradient (unused during estimation) mlgNm <- maxLik( loglik, gradlik, start = 1, method = "NM" ) nObs( mlgNm ) all.equal( mlgNm, mlNm, tolerance = 1e-3 ) # Estimate with analytic gradient and Hessian (both unused during estimation) mlghNm <- maxLik( loglik, gradlik, hesslik, start = 1, method = "NM" ) all.equal( mlghNm, mlgNm, tolerance = 1e-3 ) ## SANN estimation # Estimate with only function values mlSann <- maxLik( loglik, start = 1, method = "SANN" ) print( mlSann ) summary( mlSann ) nObs( mlSann ) all.equal( mlSann[ -c( 5, 6, 9, 10, 11 ) ], ml[ -c( 5, 6, 9, 10 ) ], tolerance = 1e-3 ) # Estimate with analytic gradient (unused during estimation) mlgSann <- maxLik( loglik, gradlik, start = 1, method = "SANN" ) nObs( mlgSann ) all.equal( mlgSann, mlSann, tolerance = 1e-3 ) # Estimate with analytic gradient and Hessian (both unused during estimation) mlghSann <- maxLik( loglik, gradlik, hesslik, start = 1, method = "SANN" ) all.equal( mlghSann, mlgSann, tolerance = 1e-3 ) ## CG estimation # Estimate with only function values mlCg <- maxLik( loglik, start = 1, method = "CG" ) print(summary( mlCg)) # Estimate with analytic gradient mlgCg <- maxLik( loglik, gradlik, start = 1, method = "CG" ) print(summary( mlgCg)) # Estimate with analytic gradient and Hessian (not used for estimation) mlghCg <- maxLik( loglik, gradlik, hesslik, start = 1, method = "CG" ) print(summary( mlghCg)) ## test for method "estfun" library( sandwich ) try( estfun( mlSum ) ) estfun( ml )[ 1:5, , drop = FALSE ] estfun( mlg )[ 1:5, , drop = FALSE ] estfun( mlBhhh )[ 1:5, , drop = FALSE ] estfun( mlgBhhh )[ 1:5, , drop = FALSE ] estfun( mlBfgs )[ 1:5, , drop = FALSE ] estfun( mlgBfgs )[ 1:5, , drop = FALSE ] estfun( mlNm )[ 1:5, , drop = FALSE ] estfun( mlgNm )[ 1:5, , drop = FALSE ] estfun( mlSann )[ 1:5, , drop = FALSE ] estfun( mlgSann )[ 1:5, , drop = FALSE ] ## test for method "bread" try( bread( mlSum ) ) bread( ml ) bread( mlg ) bread( mlBhhh ) bread( mlgBhhh ) bread( mlBfgs ) bread( mlgBfgs ) bread( mlNm ) bread( mlgNm ) bread( mlSann ) bread( mlgSann ) ## test for method "sandwich" try( sandwich( mlSum ) ) printSandwich <- function( x ) { print( sandwich( x ) ) print( all.equal( sandwich( x ), vcov( x ) ) ) } printSandwich( ml ) printSandwich( mlg ) printSandwich( mlBhhh ) printSandwich( mlgBhhh ) printSandwich( mlBfgs ) printSandwich( mlgBfgs ) printSandwich( mlNm ) printSandwich( mlgNm ) printSandwich( mlSann ) printSandwich( mlgSann ) maxLik/tests/fitNormalDist.R0000644000176000001440000013130112222627415015575 0ustar ripleyusers# load the 'maxLik' package library(maxLik) options(digits = 4) # just to avoid so many differences when comparing these output files ## data to fit a normal distribution # set seed for pseudo random numbers set.seed( 123 ) # generate a variable from normally distributed random numbers x <- rnorm( 100, 1, 2 ) xSaved <- x ## log likelihood function llf <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] if(!(sigma > 0)) return(NA) # to avoid warnings in the output N <- length( x ) llValue <- -0.5 * N * log( 2 * pi ) - N * log( sigma ) - 0.5 * sum( ( x - mu )^2 / sigma^2 ) return( llValue ) } ## log likelihood function (individual observations) llfInd <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] if(!(sigma > 0)) return(NA) # to avoid warnings in the output llValues <- -0.5 * log( 2 * pi ) - log( sigma ) - 0.5 * ( x - mu )^2 / sigma^2 return( llValues ) } ## function to calculate analytical gradients gf <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] N <- length( x ) llGrad <- c( sum( ( x - mu ) / sigma^2 ), - N / sigma + sum( ( x - mu )^2 / sigma^3 ) ) return( llGrad ) } ## function to calculate analytical gradients (individual observations) gfInd <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] llGrads <- cbind( ( x - mu ) / sigma^2, - 1 / sigma + ( x - mu )^2 / sigma^3 ) return( llGrads ) } ## log likelihood function with gradients as attributes llfGrad <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] if(!(sigma > 0)) return(NA) # to avoid warnings in the output N <- length( x ) llValue <- -0.5 * N * log( 2 * pi ) - N * log( sigma ) - 0.5 * sum( ( x - mu )^2 / sigma^2 ) attributes( llValue )$gradient <- c( sum( ( x - mu ) / sigma^2 ), - N / sigma + sum( ( x - mu )^2 / sigma^3 ) ) return( llValue ) } ## log likelihood function with gradients as attributes (individual observations) llfGradInd <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] if(!(sigma > 0)) return(NA) # to avoid warnings in the output llValues <- -0.5 * log( 2 * pi ) - log( sigma ) - 0.5 * ( x - mu )^2 / sigma^2 attributes( llValues )$gradient <- cbind( ( x - mu ) / sigma^2, - 1 / sigma + ( x - mu )^2 / sigma^3 ) return( llValues ) } ## function to calculate analytical Hessians hf <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] N <- length( x ) llHess <- matrix( c( N * ( - 1 / sigma^2 ), sum( - 2 * ( x - mu ) / sigma^3 ), sum( - 2 * ( x - mu ) / sigma^3 ), N / sigma^2 + sum( - 3 * ( x - mu )^2 / sigma^4 ) ), nrow = 2, ncol = 2 ) return( llHess ) } ## log likelihood function with gradients and Hessian as attributes llfGradHess <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] if(!(sigma > 0)) return(NA) # to avoid warnings in the output N <- length( x ) llValue <- -0.5 * N * log( 2 * pi ) - N * log( sigma ) - 0.5 * sum( ( x - mu )^2 / sigma^2 ) attributes( llValue )$gradient <- c( sum( ( x - mu ) / sigma^2 ), - N / sigma + sum( ( x - mu )^2 / sigma^3 ) ) attributes( llValue )$hessian <- matrix( c( N * ( - 1 / sigma^2 ), sum( - 2 * ( x - mu ) / sigma^3 ), sum( - 2 * ( x - mu ) / sigma^3 ), N / sigma^2 + sum( - 3 * ( x - mu )^2 / sigma^4 ) ), nrow = 2, ncol = 2 ) return( llValue ) } ## log likelihood function with gradients as attributes (individual observations) llfGradHessInd <- function( param ) { mu <- param[ 1 ] sigma <- param[ 2 ] if(!(sigma > 0)) return(NA) # to avoid warnings in the output N <- length( x ) llValues <- -0.5 * log( 2 * pi ) - log( sigma ) - 0.5 * ( x - mu )^2 / sigma^2 attributes( llValues )$gradient <- cbind( ( x - mu ) / sigma^2, - 1 / sigma + ( x - mu )^2 / sigma^3 ) attributes( llValues )$hessian <- matrix( c( N * ( - 1 / sigma^2 ), sum( - 2 * ( x - mu ) / sigma^3 ), sum( - 2 * ( x - mu ) / sigma^3 ), N / sigma^2 + sum( - 3 * ( x - mu )^2 / sigma^4 ) ), nrow = 2, ncol = 2 ) return( llValues ) } # start values startVal <- c( mu = 0, sigma = 1 ) ## NR method ml <- maxLik( llf, start = startVal ) print( ml ) summary( ml ) activePar( ml ) AIC( ml ) coef( ml ) condiNumber( ml ) round( hessian( ml ), 2 ) logLik( ml ) maximType( ml ) nIter( ml ) try( nObs( ml ) ) nParam( ml ) returnCode( ml ) returnMessage( ml ) round( vcov( ml ), 4 ) logLik( summary( ml ) ) mlInd <- maxLik( llfInd, start = startVal ) summary( mlInd ) all.equal( ml[-c(3,4,5,6)], mlInd[ -c(3,4,5,6,11) ], tolerance = 1e-3 ) # 3 gradient, should be close to 0, but may vary enormously in relative terms mlInd[[11]][sample(nrow(mlInd[[11]]), 10),] # just print a sample of 10 nObs( mlInd ) # with analytical gradients mlg <- maxLik( llf, gf, start = startVal ) summary( mlg ) all.equal( ml[-c(5,6)], mlg[-c(5,6)], tolerance = 1e-3 ) mlgInd <- maxLik( llfInd, gfInd, start = startVal ) all.equal( mlInd, mlgInd, tolerance = 1e-3 ) all.equal( mlg[ ], mlgInd[ -11 ], tolerance = 1e-3 ) round( mlgInd[[ 11 ]], 3 ) # with analytical gradients as attribute mlG <- maxLik( llfGrad, start = startVal ) all.equal( mlG, mlg, tolerance = 1e-3 ) all.equal( mlG$gradient, gf( coef( mlG ) ), check.attributes = FALSE, tolerance = 1e-3 ) mlGInd <- maxLik( llfGradInd, start = startVal ) all.equal( mlGInd, mlgInd, tolerance = 1e-3 ) all.equal( mlGInd$gradient, colSums( gfInd( coef( mlGInd ) ) ), check.attributes = FALSE, tolerance = 1e-3 ) all.equal( mlGInd$gradientObs, gfInd( coef( mlGInd ) ), check.attributes = FALSE, tolerance = 1e-3 ) # with analytical gradients as argument and attribute mlgG <- maxLik( llfGrad, gf, start = startVal ) all.equal( mlgG, mlg, tolerance = 1e-3 ) all.equal( mlgG, mlG, tolerance = 1e-3 ) # with analytical gradients and Hessians mlgh <- maxLik( llf, gf, hf, start = startVal ) all.equal( mlg, mlgh, tolerance = 1e-3 ) # with analytical gradients and Hessian as attribute mlGH <- maxLik( llfGradHess, start = startVal ) all.equal( mlGH, mlgh, tolerance = 1e-3 ) # with analytical gradients and Hessian as argument and attribute mlgGhH <- maxLik( llfGradHess, gf, hf, start = startVal ) all.equal( mlgGhH, mlgh, tolerance = 1e-3 ) all.equal( mlgGhH, mlGH, tolerance = 1e-3 ) ## BHHH method mlBHHH <- try( maxLik( llf, start = startVal, method = "BHHH" ) ) x <- xSaved[1] try( maxLik( llfInd, start = startVal, method = "BHHH" ) ) x <- xSaved[1:2] try( maxLik( llfInd, start = startVal, method = "BHHH" ) ) x <- xSaved mlBHHH <- maxLik( llfInd, start = startVal, method = "BHHH" ) print( mlBHHH ) summary( mlBHHH ) activePar( mlBHHH ) AIC( mlBHHH ) coef( mlBHHH ) condiNumber( mlBHHH ) round( hessian( mlBHHH ), 2 ) logLik( mlBHHH ) maximType( mlBHHH ) nIter( mlBHHH ) nParam( mlBHHH ) returnCode( mlBHHH ) returnMessage( mlBHHH ) round( vcov( mlBHHH ), 4 ) logLik( summary( mlBHHH ) ) all.equal( ml[-c(4,5,6,9,10) ], mlBHHH[ -c(4,5,6,9,10,11) ], tolerance = 1e-3 ) round( mlBHHH[[ 11 ]], 3 ) nObs( mlBHHH ) # final Hessian = usual Hessian mlBhhhH <- maxLik( llfInd, start = startVal, method = "BHHH", finalHessian = TRUE ) all.equal( mlBhhhH[-4], mlBHHH[-4], tolerance = 1e-3 ) round( hessian( mlBhhhH ), 2 ) summary( mlBhhhH ) # with analytical gradients mlgBHHH <- try( maxLik( llf, gf, start = startVal, method = "BHHH" ) ) mlgBHHH <- try( maxLik( llfInd, gf, start = startVal, method = "BHHH" ) ) x <- xSaved[1] try( maxLik( llf, gfInd, start = startVal, method = "BHHH" ) ) try( maxLik( llfInd, gfInd, start = startVal, method = "BHHH" ) ) x <- xSaved[1:2] try( maxLik( llf, gfInd, start = startVal, method = "BHHH" ) ) try( maxLik( llfInd, gfInd, start = startVal, method = "BHHH" ) ) x <- xSaved mlgBHHH <- maxLik( llfInd, gfInd, start = startVal, method = "BHHH" ) summary( mlgBHHH ) all.equal( mlBHHH, mlgBHHH, tolerance = 1e-3 ) all.equal( mlg[-c(4,5,6,9,10)], mlgBHHH[-c(4,5,6,9,10,11)], tolerance = 1e-3 ) round( mlgBHHH[[ 11 ]], 3 ) mlgBHHH2 <- maxLik( llf, gfInd, start = startVal, method = "BHHH" ) all.equal( mlgBHHH, mlgBHHH2, tolerance = 1e-3 ) # final Hessian = usual Hessian mlgBhhhH <- maxLik( llf, gfInd, start = startVal, method = "BHHH", finalHessian = TRUE ) all.equal( mlgBhhhH, mlBhhhH, tolerance = 1e-3 ) all.equal( mlgBhhhH[-4], mlgBHHH[-4], tolerance = 1e-3 ) round( hessian( mlgBhhhH ), 2 ) # with analytical gradients as attribute try( maxLik( llfGrad, start = startVal, method = "BHHH" ) ) x <- xSaved[1] try( maxLik( llfGrad, start = startVal, method = "BHHH" ) ) try( maxLik( llfGradInd, start = startVal, method = "BHHH" ) ) x <- xSaved[1:2] try( maxLik( llfGrad, start = startVal, method = "BHHH" ) ) try( maxLik( llfGradInd, start = startVal, method = "BHHH" ) ) x <- xSaved mlGBHHH <- maxLik( llfGradInd, start = startVal, method = "BHHH" ) all.equal( mlGBHHH, mlgBHHH, tolerance = 1e-3 ) # final Hessian = usual Hessian mlGBhhhH <- maxLik( llfGradInd, start = startVal, method = "BHHH", finalHessian = TRUE ) all.equal( mlGBhhhH, mlgBhhhH, tolerance = 1e-3 ) # with analytical gradients as argument and attribute mlgGBHHH <- maxLik( llfGradInd, gfInd, start = startVal, method = "BHHH" ) all.equal( mlgGBHHH, mlgBHHH, tolerance = 1e-3 ) all.equal( mlgGBHHH, mlGBHHH, tolerance = 1e-3 ) # with unused Hessian mlghBHHH <- maxLik( llfInd, gfInd, hf, start = startVal, method = "BHHH" ) all.equal( mlgBHHH, mlghBHHH, tolerance = 1e-3 ) # final Hessian = usual Hessian mlghBhhhH <- maxLik( llfInd, gfInd, hf, start = startVal, method = "BHHH", finalHessian = TRUE ) all.equal( mlghBhhhH[-4], mlghBHHH[-4], tolerance = 1e-3 ) all.equal( mlghBhhhH, mlgBhhhH, tolerance = 1e-3 ) # with unused Hessian as attribute mlGHBHHH <- maxLik( llfGradHessInd, start = startVal, method = "BHHH" ) all.equal( mlGHBHHH, mlghBHHH, tolerance = 1e-3 ) # final Hessian = usual Hessian mlGHBhhhH <- maxLik( llfGradHessInd, start = startVal, method = "BHHH", finalHessian = TRUE ) all.equal( mlGHBhhhH, mlghBhhhH, tolerance = 1e-3 ) # with analytical gradients and Hessian as argument and attribute mlgGhHBHHH <- maxLik( llfGradHessInd, gfInd, hf, start = startVal, method = "BHHH" ) all.equal( mlgGhHBHHH, mlghBHHH, tolerance = 1e-3 ) all.equal( mlgGhHBHHH, mlGHBHHH, tolerance = 1e-3 ) ### BFGS-YC method mlBFGSYC <- maxLik( llf, start = startVal, method = "bfgsr" ) print( mlBFGSYC ) summary( mlBFGSYC ) activePar( mlBFGSYC ) AIC( mlBFGSYC ) coef( mlBFGSYC ) condiNumber( mlBFGSYC ) round( hessian( mlBFGSYC ), 2 ) logLik( mlBFGSYC ) maximType( mlBFGSYC ) nIter( mlBFGSYC ) try( nObs( mlBFGSYC ) ) nParam( mlBFGSYC ) returnCode( mlBFGSYC ) returnMessage( mlBFGSYC ) round( vcov( mlBFGSYC ), 4 ) logLik( summary( mlBFGSYC ) ) all.equal( ml[-c(3,4,5,6,9,10)], mlBFGSYC[-c(3,4,5,6,9,10)], tolerance = 1e-3 ) all.equal( ml[-c(5,6,9,10)], mlBFGSYC[-c(5,6,9,10)], tolerance = 1e-2 ) mlIndBFGSYC <- maxLik( llfInd, start = startVal, method = "BFGSR" ) summary( mlIndBFGSYC ) all.equal( mlBFGSYC[-c(3,4,9)], mlIndBFGSYC[ -c(3,4,9,11) ], tolerance = 1e-3 ) round( mlIndBFGSYC[[ 11 ]], 3 ) nObs( mlIndBFGSYC ) # with analytical gradients mlgBFGSYC <- maxLik( llf, gf, start = startVal, method = "BFGSR" , print.level=1) summary(mlgBFGSYC) all.equal( mlBFGSYC[-4], mlgBFGSYC[-4], tolerance = 1e-3 ) mlgIndBFGSYC <- maxLik( llfInd, gfInd, start = startVal, method = "BFGSR" ) all.equal( mlIndBFGSYC, mlgIndBFGSYC, tolerance = 1e-3 ) all.equal( mlgBFGSYC[ -c(3,9) ], mlgIndBFGSYC[ -c(3,9,11) ], tolerance = 1e-3 ) round( mlgIndBFGSYC[[ 11 ]], 3 ) # with analytical gradients as attribute mlGBFGSYC <- maxLik( llfGrad, start = startVal, method = "BFGSR" , print.level=1) all.equal( mlGBFGSYC, mlgBFGSYC, tolerance = 1e-3 ) mlGIndBFGSYC <- maxLik( llfGradInd, start = startVal, method = "BFGSR" ) all.equal( mlGIndBFGSYC, mlgIndBFGSYC, tolerance = 1e-3 ) # with analytical gradients as argument and attribute mlgGBFGSYC <- maxLik( llfGrad, gf, start = startVal, method = "BFGSR" ) all.equal( mlgGBFGSYC, mlgBFGSYC, tolerance = 1e-3 ) all.equal( mlgGBFGSYC, mlGBFGSYC, tolerance = 1e-3 ) # with analytical gradients and Hessians mlghBFGSYC <- maxLik( llf, gf, hf, start = startVal, method = "BFGSR" ) all.equal( mlgBFGSYC, mlghBFGSYC, tolerance = 1e-3 ) # with analytical gradients and Hessian as attribute mlGHBFGSYC <- maxLik( llfGradHess, start = startVal, method = "BFGSR" ) all.equal( mlGHBFGSYC, mlghBFGSYC, tolerance = 1e-3 ) # with analytical gradients and Hessian as argument and attribute mlgGhHBFGSYC <- maxLik( llfGradHess, gf, hf, start = startVal, method = "BFGSR" ) all.equal( mlgGhHBFGSYC, mlghBFGSYC, tolerance = 1e-3 ) all.equal( mlgGhHBFGSYC, mlGHBFGSYC, tolerance = 1e-3 ) ## BFGS method mlBFGS <- maxLik( llf, start = startVal, method = "BFGS" ) print( mlBFGS ) summary( mlBFGS ) activePar( mlBFGS ) AIC( mlBFGS ) coef( mlBFGS ) condiNumber( mlBFGS ) round( hessian( mlBFGS ), 2 ) logLik( mlBFGS ) maximType( mlBFGS ) nIter( mlBFGS ) nParam( mlBFGS ) returnCode( mlBFGS ) returnMessage( mlBFGS ) round( vcov( mlBFGS ), 4 ) logLik( summary( mlBFGS ) ) all.equal( ml[-c(4,5,6,9,10)], mlBFGS[-c(4,5,6,9,10,11)], tolerance = 1e-3 ) # with individual log likelihood values mlIndBFGS <- maxLik( llfInd, start = startVal, method = "BFGS" ) summary( mlIndBFGS ) all.equal( mlBFGS[-4], mlIndBFGS[-c(4,12)], tolerance = 1e-3 ) mlIndBFGS[12] nObs( mlIndBFGS ) # with analytical gradients mlgBFGS <- maxLik( llf, gf, start = startVal, method = "BFGS" ) summary( mlgBFGS ) all.equal( mlBFGS[-4], mlgBFGS[-4], tolerance = 1e-3 ) all.equal( mlg[-c(5,6,9,10)], mlgBFGS[-c(5,6,9,10,11)], tolerance = 1e-3 ) mlgIndBFGS <- maxLik( llfInd, gfInd, start = startVal, method = "BFGS" ) all.equal( mlgBFGS[], mlgIndBFGS[-12], tolerance = 1e-3 ) mlgIndBFGS[12] # with analytical gradients as attribute mlGBFGS <- maxLik( llfGrad, start = startVal, method = "BFGS" ) all.equal( mlGBFGS, mlgBFGS, tolerance = 1e-3 ) mlGIndBFGS <- maxLik( llfGradInd, start = startVal, method = "BFGS" ) all.equal( mlGIndBFGS, mlgIndBFGS, tolerance = 1e-3 ) # with analytical gradients as argument and attribute mlgGBFGS <- maxLik( llfGrad, gf, start = startVal, method = "BFGS" ) all.equal( mlgGBFGS, mlgBFGS, tolerance = 1e-3 ) all.equal( mlgGBFGS, mlGBFGS, tolerance = 1e-3 ) # with unused Hessian mlghBFGS <- maxLik( llf, gf, hf, start = startVal, method = "BFGS" ) all.equal( mlgBFGS, mlghBFGS, tolerance = 1e-3 ) # with analytical gradients and Hessian as attribute mlGHBFGS <- maxLik( llfGradHess, start = startVal, method = "BFGS" ) all.equal( mlGHBFGS, mlghBFGS, tolerance = 1e-3 ) # with analytical gradients and Hessian as argument and attribute mlgGhHBFGS <- maxLik( llfGradHess, gf, hf, start = startVal, method = "BFGS" ) all.equal( mlgGhHBFGS, mlghBFGS, tolerance = 1e-3 ) all.equal( mlgGhHBFGS, mlGHBFGS, tolerance = 1e-3 ) ## NM method mlNM <- maxLik( llf, start = startVal, method = "NM" ) print( mlNM ) summary( mlNM ) activePar( mlNM ) AIC( mlNM ) coef( mlNM ) condiNumber( mlNM ) round( hessian( mlNM ), 2 ) logLik( mlNM ) maximType( mlNM ) nIter( mlNM ) nParam( mlNM ) returnCode( mlNM ) returnMessage( mlNM ) round( vcov( mlNM ), 4 ) logLik( summary( mlNM ) ) all.equal( ml[-c(3,4,5,6,9,10)], mlNM[-c(3,4,5,6,9,10,11)], tolerance = 1e-3 ) # with individual log likelihood values mlIndNM <- maxLik( llfInd, start = startVal, method = "NM" ) summary( mlIndNM ) all.equal( mlNM[-4], mlIndNM[-c(4,12)], tolerance = 1e-3 ) mlIndNM[12] nObs( mlIndNM ) # with unused analytical gradients mlgNM <- maxLik( llf, gf, start = startVal, method = "NM" ) summary( mlgNM ) all.equal( mlNM[-4], mlgNM[-4], tolerance = 1e-3 ) # with individual log likelihood values and gradients mlgIndNM <- maxLik( llfInd, gfInd, start = startVal, method = "NM" ) summary( mlgIndNM ) all.equal( mlgNM[], mlgIndNM[-12], tolerance = 1e-3 ) mlgIndNM[12] # with (unused) analytical gradients as attribute mlGNM <- maxLik( llfGrad, start = startVal, method = "NM" ) all.equal( mlGNM, mlgNM, tolerance = 1e-3 ) mlGIndNM <- maxLik( llfGradInd, start = startVal, method = "NM" ) all.equal( mlGIndNM, mlgIndNM, tolerance = 1e-3 ) # with analytical gradients as argument and attribute mlgGNM <- maxLik( llfGrad, gf, start = startVal, method = "NM" ) all.equal( mlgGNM, mlgNM, tolerance = 1e-3 ) all.equal( mlgGNM, mlGNM, tolerance = 1e-3 ) # with unused analytical gradients and Hessian mlghNM <- maxLik( llf, gf, hf, start = startVal, method = "NM" ) all.equal( mlgNM, mlghNM, tolerance = 1e-3 ) ## SANN method mlSANN <- maxLik( llf, start = startVal, method = "SANN" ) print( mlSANN ) summary( mlSANN ) activePar( mlSANN ) AIC( mlSANN ) coef( mlSANN ) condiNumber( mlSANN ) round( hessian( mlSANN ), 2 ) logLik( mlSANN ) maximType( mlSANN ) nIter( mlSANN ) nParam( mlSANN ) returnCode( mlSANN ) returnMessage( mlSANN ) round( vcov( mlSANN ), 4 ) logLik( summary( mlSANN ) ) all.equal( ml[-c(3,4,5,6,9,10)], mlSANN[-c(3,4,5,6,9,10,11)], tolerance = 1e-3 ) # with individual log likelihood values mlIndSANN <- maxLik( llfInd, start = startVal, method = "SANN" ) summary( mlIndSANN ) all.equal( mlSANN[-4], mlIndSANN[-c(4,12)], tolerance = 1e-3 ) mlIndSANN[12] nObs( mlIndSANN ) # with unused analytical gradients mlgSANN <- maxLik( llf, gf, start = startVal, method = "SANN" ) summary( mlgSANN ) all.equal( mlSANN[-4], mlgSANN[-4], tolerance = 1e-3 ) # with individual log likelihood values and gradients mlgIndSANN <- maxLik( llfInd, gfInd, start = startVal, method = "SANN" ) summary( mlgIndSANN ) all.equal( mlgSANN[], mlgIndSANN[-12], tolerance = 1e-3 ) mlgIndSANN[12] # with unused analytical gradients and Hessian mlghSANN <- maxLik( llf, gf, hf, start = startVal, method = "SANN" ) all.equal( mlgSANN, mlghSANN, tolerance = 1e-3 ) # with a user-specified function to generate a new candidate point mlSANNCand <- maxLik( llf, start = startVal, method = "SANN", cand = function(x)c(rnorm(1,x[1]),rnorm(1,x[2])) ) summary( mlSANNCand ) all.equal( mlSANNCand[-c(3,4)], mlSANN[-c(3,4)], tolerance = 1e-2 ) ############### with fixed parameters ############### # start values startValFix <- c( mu = 1, sigma = 1 ) # fix mu (the mean ) at its start value isFixed <- c( TRUE, FALSE ) ## NR method with fixed parameters mlFix <- maxLik( llf, start = startValFix, activePar = !isFixed ) mlFix1 <- maxLik( llf, start = startValFix, activePar = 2 ) all.equal( mlFix, mlFix1, tolerance = 1e-3 ) mlFix2 <- maxLik( llf, start = startValFix, fixed = isFixed ) all.equal( mlFix, mlFix2, tolerance = 1e-3 ) mlFix3 <- maxLik( llf, start = startValFix, fixed = "mu" ) all.equal( mlFix, mlFix3, tolerance = 1e-3 ) mlFix4 <- maxLik( llf, start = startValFix, fixed = 1 ) all.equal( mlFix, mlFix4, tolerance = 1e-3 ) print( mlFix ) summary( mlFix ) activePar( mlFix ) AIC( mlFix ) coef( mlFix ) condiNumber( mlFix ) round( hessian( mlFix ), 2 ) logLik( mlFix ) maximType( mlFix ) nIter( mlFix ) nParam( mlFix ) returnCode( mlFix ) returnMessage( mlFix ) round( vcov( mlFix ), 4 ) logLik( summary( mlFix ) ) mlIndFix <- maxLik( llfInd, start = startValFix, activePar = !isFixed ) mlIndFix1 <- maxLik( llfInd, start = startValFix, activePar = 2 ) all.equal( mlIndFix, mlIndFix1, tolerance = 1e-3 ) mlIndFix2 <- maxLik( llfInd, start = startValFix, fixed = isFixed ) all.equal( mlIndFix, mlIndFix2, tolerance = 1e-3 ) mlIndFix3 <- maxLik( llfInd, start = startValFix, fixed = "mu" ) all.equal( mlIndFix, mlIndFix3, tolerance = 1e-3 ) mlIndFix4 <- maxLik( llfInd, start = startValFix, fixed = 1 ) all.equal( mlIndFix, mlIndFix4, tolerance = 1e-3 ) summary( mlIndFix ) all.equal( mlFix[ ], mlIndFix[ -11 ], tolerance = 1e-3 ) round( mlFix[[3]], 5 ) round( mlIndFix[[3]], 5 ) round( mlIndFix[[ 11 ]], 3 ) nObs( mlIndFix ) # with analytical gradients mlgFix <- maxLik( llf, gf, start = startValFix, activePar = !isFixed ) mlgFix1 <- maxLik( llf, gf, start = startValFix, activePar = 2 ) all.equal( mlgFix, mlgFix1, tolerance = 1e-3 ) mlgFix2 <- maxLik( llf, gf, start = startValFix, fixed = isFixed ) all.equal( mlgFix, mlgFix2, tolerance = 1e-3 ) summary( mlgFix ) all.equal( mlFix, mlgFix, tolerance = 1e-3 ) round( mlFix[[3]], 5 ) round( mlgFix[[3]], 5 ) mlFix[[4]] mlgFix[[4]] mlgIndFix <- maxLik( llfInd, gfInd, start = startValFix, activePar = !isFixed ) all.equal( mlIndFix, mlgIndFix, tolerance = 1e-3 ) round( mlIndFix[[3]], 5 ) round( mlgIndFix[[3]], 5 ) mlIndFix[[4]] mlgIndFix[[4]] all.equal( mlgFix[ ], mlgIndFix[ -11 ], tolerance = 1e-3 ) round( mlgIndFix[[ 11 ]], 3 ) # with analytical gradients and Hessians mlghFix <- maxLik( llf, gf, hf, start = startValFix, activePar = !isFixed ) all.equal( mlgFix, mlghFix, tolerance = 1e-3 ) mlgFix[[4]] mlghFix[[4]] ## BHHH method with fixed parameters mlFixBHHH <- maxLik( llfInd, start = startValFix, activePar = !isFixed, method = "BHHH" ) mlFixBHHH1 <- maxLik( llfInd, start = startValFix, activePar = 2, method = "BHHH" ) all.equal( mlFixBHHH, mlFixBHHH1, tolerance = 1e-3 ) mlFixBHHH2 <- maxLik( llfInd, start = startValFix, fixed = isFixed, method = "BHHH" ) all.equal( mlFixBHHH, mlFixBHHH2, tolerance = 1e-3 ) mlFixBHHH3 <- maxLik( llfInd, start = startValFix, fixed = "mu", method = "BHHH" ) all.equal( mlFixBHHH, mlFixBHHH3, tolerance = 1e-3 ) mlFixBHHH4 <- maxLik( llfInd, start = startValFix, fixed = 1, method = "BHHH" ) all.equal( mlFixBHHH, mlFixBHHH4, tolerance = 1e-3 ) print( mlFixBHHH ) summary( mlFixBHHH ) activePar( mlFixBHHH ) AIC( mlFixBHHH ) coef( mlFixBHHH ) condiNumber( mlFixBHHH ) round( hessian( mlFixBHHH ), 2 ) logLik( mlFixBHHH ) maximType( mlFixBHHH ) nIter( mlFixBHHH ) nParam( mlFixBHHH ) returnCode( mlFixBHHH ) returnMessage( mlFixBHHH ) round( vcov( mlFixBHHH ), 4 ) logLik( summary( mlFixBHHH ) ) all.equal( mlFix[ -c( 4, 5, 6, 9, 10 ) ], mlFixBHHH[ -c( 4, 5, 6, 9, 10, 11 ) ], tolerance = 1e-3 ) mlFix[[ 3 ]] mlFixBHHH[[ 3 ]] mlFix[[ 4 ]] mlFixBHHH[[ 4 ]] round( mlFixBHHH[[ 11 ]], 3 ) nObs( mlFixBHHH ) # with analytical gradients mlgFixBHHH <- maxLik( llfInd, gfInd, start = startValFix, activePar = !isFixed, method = "BHHH" ) mlgFixBHHH1 <- maxLik( llfInd, gfInd, start = startValFix, activePar = 2, method = "BHHH" ) all.equal( mlgFixBHHH, mlgFixBHHH1, tolerance = 1e-3 ) mlgFixBHHH2 <- maxLik( llfInd, gfInd, start = startValFix, fixed = isFixed, method = "BHHH" ) all.equal( mlgFixBHHH, mlgFixBHHH2, tolerance = 1e-3 ) mlgFixBHHH3 <- maxLik( llfInd, gfInd, start = startValFix, fixed = "mu", method = "BHHH" ) all.equal( mlgFixBHHH, mlgFixBHHH3, tolerance = 1e-3 ) mlgFixBHHH4 <- maxLik( llfInd, gfInd, start = startValFix, fixed = 1, method = "BHHH" ) all.equal( mlgFixBHHH, mlgFixBHHH4, tolerance = 1e-3 ) summary( mlgFixBHHH ) all.equal( mlFixBHHH, mlgFixBHHH, tolerance = 1e-3 ) mlgFixBHHH2 <- maxLik( llf, gfInd, start = startValFix, activePar = !isFixed, method = "BHHH") all.equal( mlgFixBHHH, mlgFixBHHH2, tolerance = 1e-3 ) # with unused Hessians mlghFixBHHH <- maxLik( llfInd, gfInd, hf, start = startValFix, activePar = !isFixed, method = "BHHH" ) all.equal( mlgFixBHHH, mlghFixBHHH, tolerance = 1e-3 ) ## BFGS method with fixed parameters mlFixBfgs <- maxLik( llf, start = startValFix, fixed = isFixed, method = "BFGS" ) mlFixBfgs3 <- maxLik( llf, start = startValFix, fixed = "mu", method = "BFGS" ) all.equal( mlFixBfgs, mlFixBfgs3, tolerance = 1e-3 ) mlFixBfgs4 <- maxLik( llf, start = startValFix, fixed = 1, method = "BFGS" ) all.equal( mlFixBfgs, mlFixBfgs4, tolerance = 1e-3 ) print( mlFixBfgs ) summary( mlFixBfgs ) activePar( mlFixBfgs ) AIC( mlFixBfgs ) coef( mlFixBfgs ) condiNumber( mlFixBfgs ) round( hessian( mlFixBfgs ), 2 ) logLik( mlFixBfgs ) maximType( mlFixBfgs ) nIter( mlFixBfgs ) nParam( mlFixBfgs ) returnCode( mlFixBfgs ) returnMessage( mlFixBfgs ) round( vcov( mlFixBfgs ), 4 ) logLik( summary( mlFixBfgs ) ) all.equal( mlghFix[ -c( 5, 6, 9, 10 ) ], mlFixBfgs[ -c( 5, 6, 9, 10, 11 ) ], tolerance = 1e-3 ) mlIndFixBfgs <- maxLik( llfInd, start = startValFix, fixed = isFixed, method = "BFGS" ) all.equal( mlFixBfgs[-c(4,9)], mlIndFixBfgs[ -c(4,9,12) ], tolerance = 1e-3 ) print(formatC(mlIndFixBfgs$gradientObs, format="f", digits=4, width=7), quote=FALSE) # print fradient, only 4 digits to avoid clutter in R CMD tests mlIndFixBfgs3 <- maxLik( llfInd, start = startValFix, fixed = "mu", method = "BFGS" ) all.equal( mlIndFixBfgs, mlIndFixBfgs3, tolerance = 1e-3 ) mlIndFixBfgs4 <- maxLik( llfInd, start = startValFix, fixed = 1, method = "BFGS" ) all.equal( mlIndFixBfgs, mlIndFixBfgs4, tolerance = 1e-3 ) nObs( mlIndFixBfgs ) # with analytical gradients mlgFixBfgs <- maxLik( llf, gf, start = startValFix, fixed = isFixed, method = "BFGS" ) mlgFixBfgs3 <- maxLik( llf, gf, start = startValFix, fixed = "mu", method = "BFGS" ) all.equal( mlgFixBfgs, mlgFixBfgs3, tolerance = 1e-3 ) mlgFixBfgs4 <- maxLik( llf, gf, start = startValFix, fixed = 1, method = "BFGS" ) all.equal( mlgFixBfgs, mlgFixBfgs4, tolerance = 1e-3 ) summary( mlgFixBfgs ) all.equal( mlFixBfgs[ -9 ], mlgFixBfgs[ -9 ], tolerance = 1e-3 ) mlgIndFixBfgs <- maxLik( llfInd, gfInd, start = startValFix, fixed = isFixed, method = "BFGS") all.equal( mlgFixBfgs[ ], mlgIndFixBfgs[ -12 ], tolerance = 1e-3 ) round( mlgIndFixBfgs[[ 12 ]], 3 ) mlgIndFixBfgs3 <- maxLik( llfInd, gfInd, start = startValFix, fixed = "mu", method = "BFGS" ) all.equal( mlgIndFixBfgs, mlgIndFixBfgs3, tolerance = 1e-3 ) mlgIndFixBfgs4 <- maxLik( llfInd, gfInd, start = startValFix, fixed = 1, method = "BFGS" ) all.equal( mlgIndFixBfgs, mlgIndFixBfgs4, tolerance = 1e-3 ) # with unused Hessians mlghFixBfgs <- maxLik( llf, gf, hf, start = startValFix, fixed = isFixed, method = "BFGS" ) all.equal( mlgFixBfgs, mlghFixBfgs, tolerance = 1e-3 ) mlghFixBfgs3 <- maxLik( llf, gf, hf, start = startValFix, fixed = "mu", method = "BFGS" ) all.equal( mlghFixBfgs, mlghFixBfgs3, tolerance = 1e-3 ) mlghFixBfgs4 <- maxLik( llf, gf, hf, start = startValFix, fixed = 1, method = "BFGS" ) all.equal( mlghFixBfgs, mlghFixBfgs4, tolerance = 1e-3 ) ## NM method with fixed parameters mlFixNm <- maxLik( llf, start = startValFix, fixed = isFixed, method = "NM" ) mlFixNm3 <- maxLik( llf, start = startValFix, fixed = "mu", method = "NM" ) all.equal( mlFixNm, mlFixNm3, tolerance = 1e-3 ) mlFixNm4 <- maxLik( llf, start = startValFix, fixed = 1, method = "NM" ) all.equal( mlFixNm, mlFixNm4, tolerance = 1e-3 ) print( mlFixNm ) summary( mlFixNm ) activePar( mlFixNm ) AIC( mlFixNm ) coef( mlFixNm ) condiNumber( mlFixNm ) round( hessian( mlFixNm ), 2 ) logLik( mlFixNm ) maximType( mlFixNm ) nIter( mlFixNm ) nParam( mlFixNm ) returnCode( mlFixNm ) returnMessage( mlFixNm ) round( vcov( mlFixNm ), 4 ) logLik( summary( mlFixNm ) ) all.equal( mlFixBfgs[ -c(4,9,10) ], mlFixNm[ -c(4,9,10) ], tolerance = 1e-3 ) mlIndFixNm <- maxLik( llfInd, start = startValFix, fixed = isFixed, method = "NM" ) all.equal( mlFixNm[-4], mlIndFixNm[-c(4,12)], tolerance = 1e-3 ) round( mlIndFixNm[[ 12 ]], 3 ) mlIndFixNm3 <- maxLik( llfInd, start = startValFix, fixed = "mu", method = "NM" ) all.equal( mlIndFixNm, mlIndFixNm3, tolerance = 1e-3 ) mlIndFixNm4 <- maxLik( llfInd, start = startValFix, fixed = 1, method = "NM" ) all.equal( mlIndFixNm, mlIndFixNm4, tolerance = 1e-3 ) nObs( mlIndFixNm ) # with analytical gradients mlgFixNm <- maxLik( llf, gf, start = startValFix, fixed = isFixed, method = "NM" ) mlgFixNm3 <- maxLik( llf, gf, start = startValFix, fixed = "mu", method = "NM" ) all.equal( mlgFixNm, mlgFixNm3, tolerance = 1e-3 ) mlgFixNm4 <- maxLik( llf, gf, start = startValFix, fixed = 1, method = "NM" ) all.equal( mlgFixNm, mlgFixNm4, tolerance = 1e-3 ) summary( mlgFixNm ) all.equal( mlFixNm, mlgFixNm, tolerance = 1e-3 ) mlgIndFixNm <- maxLik( llfInd, gfInd, start = startValFix, fixed = isFixed, method = "NM") all.equal( mlgFixNm[ ], mlgIndFixNm[ -12 ], tolerance = 1e-3 ) round( mlgIndFixNm[[ 12 ]], 3 ) # with unused Hessians mlghFixNm <- maxLik( llf, gf, hf, start = startValFix, fixed = isFixed, method = "NM" ) all.equal( mlgFixNm, mlghFixNm, tolerance = 1e-3 ) mlghFixNm3 <- maxLik( llf, gf, hf, start = startValFix, fixed = "mu", method = "NM" ) all.equal( mlghFixNm, mlghFixNm3, tolerance = 1e-3 ) mlghFixNm4 <- maxLik( llf, gf, hf, start = startValFix, fixed = 1, method = "NM" ) all.equal( mlghFixNm, mlghFixNm4, tolerance = 1e-3 ) ## SANN method with fixed parameters mlFixSann <- maxLik( llf, start = startValFix, fixed = isFixed, method = "SANN" ) mlFixSann3 <- maxLik( llf, start = startValFix, fixed = "mu", method = "SANN" ) all.equal( mlFixSann, mlFixSann3, tolerance = 1e-3 ) mlFixSann4 <- maxLik( llf, start = startValFix, fixed = 1, method = "SANN" ) all.equal( mlFixSann, mlFixSann4, tolerance = 1e-3 ) print( mlFixSann ) summary( mlFixSann ) activePar( mlFixSann ) AIC( mlFixSann ) coef( mlFixSann ) condiNumber( mlFixSann ) round( hessian( mlFixSann ), 2 ) logLik( mlFixSann ) maximType( mlFixSann ) nIter( mlFixSann ) nParam( mlFixSann ) returnCode( mlFixSann ) returnMessage( mlFixSann ) round( vcov( mlFixSann ), 4 ) logLik( summary( mlFixSann ) ) all.equal( mlFixBfgs[ -c(4,9,10) ], mlFixSann[ -c(4,9,10) ], tolerance = 1e-3 ) mlIndFixSann <- maxLik( llfInd, start = startValFix, fixed = isFixed, method = "SANN" ) all.equal( mlFixSann[ ], mlIndFixSann[ -12 ], tolerance = 1e-3 ) round( mlIndFixSann[[ 12 ]], 3 ) nObs( mlIndFixSann ) # with analytical gradients mlgFixSann <- maxLik( llf, gf, start = startValFix, fixed = isFixed, method = "SANN" ) summary( mlgFixSann ) all.equal( mlFixSann[-4], mlgFixSann[-4], tolerance = 1e-3 ) mlgIndFixSann <- maxLik( llfInd, gfInd, start = startValFix, fixed = isFixed, method = "SANN") all.equal( mlgFixSann[ ], mlgIndFixSann[ -12 ], tolerance = 1e-3 ) round( mlgIndFixSann[[ 12 ]], 3 ) # with unused Hessians mlghFixSann <- maxLik( llf, gf, hf, start = startValFix, fixed = isFixed, method = "SANN" ) all.equal( mlgFixSann, mlghFixSann, tolerance = 1e-3 ) ############### with parameter constraints ############### A <- matrix( -1, nrow = 1, ncol = 2 ) ############### inequality constraints ############### inEq <- list( ineqA = A, ineqB = 2.5 ) ## NR method with inequality constraints try( maxLik( llf, start = startVal, constraints = inEq, method = "NR" ) ) ## BHHH method with inequality constraints try( maxLik( llf, start = startVal, constraints = inEq, method = "BHHH" ) ) ## BFGS method with inequality constraints mlBfgsInEq <- maxLik( llf, start = startVal, constraints = inEq, method = "BFGS" ) print( mlBfgsInEq ) summary( mlBfgsInEq ) activePar( mlBfgsInEq ) AIC( mlBfgsInEq ) coef( mlBfgsInEq ) condiNumber( mlBfgsInEq ) round( hessian( mlBfgsInEq ), 2 ) logLik( mlBfgsInEq ) maximType( mlBfgsInEq ) nIter( mlBfgsInEq ) nParam( mlBfgsInEq ) returnCode( mlBfgsInEq ) returnMessage( mlBfgsInEq ) round( vcov( mlBfgsInEq ), 4 ) logLik( summary( mlBfgsInEq ) ) mlBfgsInEqInd <- maxLik( llfInd, start = startVal, constraints = inEq, method = "BFGS" ) summary( mlBfgsInEqInd ) all.equal( mlBfgsInEq[ ], mlBfgsInEqInd[ -12 ], tolerance = 1e-3 ) round( mlBfgsInEqInd[[ 12 ]], 3 ) nObs( mlBfgsInEqInd ) # with analytical gradients mlgBfgsInEq <- maxLik( llf, gf, start = startVal, constraints = inEq, method = "BFGS" ) all.equal( mlBfgsInEq, mlgBfgsInEq, tolerance = 1e-3 ) mlgBfgsInEqInd <- maxLik( llfInd, gfInd, start = startVal, constraints = inEq, method = "BFGS" ) all.equal( mlgBfgsInEqInd[ -12 ], mlgBfgsInEq[ ], tolerance = 1e-3 ) round( mlgBfgsInEqInd[[ 12 ]], 3 ) mlgBfgsInEqInd2 <- maxLik( llf, gfInd, start = startVal, constraints = inEq, method = "BFGS" ) all.equal( mlgBfgsInEqInd, mlgBfgsInEqInd2, tolerance = 1e-3 ) # with unused Hessian mlghBfgsInEq <- maxLik( llf, gf, hf, start = startVal, constraints = inEq, method = "BFGS" ) all.equal( mlgBfgsInEq, mlghBfgsInEq, tolerance = 1e-3 ) ## NM method with inequality constraints mlNmInEq <- maxLik( llf, start = startVal, constraints = inEq, method = "NM" ) print( mlNmInEq ) summary( mlNmInEq ) activePar( mlNmInEq ) AIC( mlNmInEq ) coef( mlNmInEq ) condiNumber( mlNmInEq ) round( hessian( mlNmInEq ), 2 ) logLik( mlNmInEq ) maximType( mlNmInEq ) nIter( mlNmInEq ) nParam( mlNmInEq ) returnCode( mlNmInEq ) returnMessage( mlNmInEq ) round( vcov( mlNmInEq ), 4 ) logLik( summary( mlNmInEq ) ) all.equal( mlBfgsInEq[-c(9,10,11)], mlNmInEq[-c(9,10,11)], tolerance = 1e-3 ) mlNmInEqInd <- maxLik( llfInd, start = startVal, constraints = inEq, method = "NM" ) summary( mlNmInEqInd ) all.equal( mlNmInEq[-4], mlNmInEqInd[-c(4,12)], tolerance = 1e-3 ) round( mlNmInEqInd[[ 12 ]], 3 ) nObs( mlNmInEqInd ) # with unused analytical gradients mlgNmInEq <- maxLik( llf, gf, start = startVal, constraints = inEq, method = "NM" ) all.equal( mlNmInEq, mlgNmInEq, tolerance = 1e-3 ) # with unused analytical gradients and Hessians mlghNmInEq <- maxLik( llf, gf, hf, start = startVal, constraints = inEq, method = "NM" ) all.equal( mlgNmInEq, mlghNmInEq, tolerance = 1e-3 ) ## SANN method with inequality constraints mlSannInEq <- maxLik( llf, start = startVal, constraints = inEq, method = "SANN" ) print( mlSannInEq ) summary( mlSannInEq ) activePar( mlSannInEq ) AIC( mlSannInEq ) coef( mlSannInEq ) condiNumber( mlSannInEq ) round( hessian( mlSannInEq ), 2 ) logLik( mlSannInEq ) maximType( mlSannInEq ) nIter( mlSannInEq ) nParam( mlSannInEq ) returnCode( mlSannInEq ) returnMessage( mlSannInEq ) round( vcov( mlSannInEq ), 4 ) logLik( summary( mlSannInEq ) ) all.equal( mlBfgsInEq[-c(2,3,4,9,10,11)], mlSannInEq[-c(2,3,4,9,10,11)], tolerance = 1e-3 ) all.equal( mlBfgsInEq[-c(3,4,9,10,11)], mlSannInEq[-c(3,4,9,10,11)], tolerance = 1e-2 ) # with unused analytical gradients mlgSannInEq <- maxLik( llf, gf, start = startVal, constraints = inEq, method = "SANN" ) all.equal( mlSannInEq, mlgSannInEq, tolerance = 1e-3 ) # with a user-specified function to generate a new candidate point mlSannInEqCand <- maxLik( llf, start = startVal, constraints = inEq, method = "SANN", cand = function(x)c(rnorm(1,x[1]),rnorm(1,x[2])) ) summary( mlSannInEqCand ) all.equal( mlSannInEqCand[-c(2,3,4)], mlSannInEq[-c(2,3,4)], tolerance = 1e-3 ) all.equal( mlSannInEqCand, mlSannInEq, tolerance = 1e-1 ) ############### equality constraints ############### eqCon <- list( eqA = A, eqB = 2.5 ) ## NR method with equality constraints mlCon <- maxLik( llf, start = startVal, constraints = eqCon ) print( mlCon ) summary( mlCon ) activePar( mlCon ) AIC( mlCon ) coef( mlCon ) condiNumber( mlCon ) round( hessian( mlCon ), 2 ) logLik( mlCon ) maximType( mlCon ) nIter( mlCon ) nParam( mlCon ) returnCode( mlCon ) returnMessage( mlCon ) round( vcov( mlCon ), 4 ) logLik( summary( mlCon ) ) mlConInd <- maxLik( llfInd, start = startVal, constraints = eqCon ) summary( mlConInd ) all.equal( mlCon[-4], mlConInd[-c(4,11)], tolerance = 1e-3 ) mlConInd[11] nObs( mlConInd ) # with analytical gradients mlgCon <- maxLik( llf, gf, start = startVal, constraints = eqCon ) summary( mlgCon ) all.equal( mlCon[ -c(2,3,4,5,6,7,9,11) ], mlgCon[ -c(2,3,4,5,6,7,9,11) ], tolerance = 1e-3 ) all.equal( mlCon[ -c( 5, 6, 7, 9, 11 ) ], mlgCon[ -c( 5, 6, 7, 9, 11 ) ], tolerance = 1e-1 ) mlgConInd <- maxLik( llfInd, gfInd, start = startVal, constraints = eqCon ) all.equal( mlConInd[ -c(2,3,4,5,6,7,9,11,12) ], mlgConInd[ -c(2,3,4,5,6,7,9,11,12) ], tolerance = 1e-3 ) all.equal( mlConInd[ -c(5,6,7,9,12) ], mlgConInd[ -c(5,6,7,9,12) ], tolerance = 1e-1 ) all.equal( mlgCon[], mlgConInd[-11], tolerance = 1e-3 ) mlgConInd[11] # with analytical gradients as attribute mlGCon <- maxLik( llfGrad, start = startVal, constraints = eqCon ) all.equal( mlGCon, mlgCon, tolerance = 1e-3 ) all.equal( mlGCon[-c(2,3,4,5,6,7,9,11)], mlCon[-c(2,3,4,5,6,7,9,11)], tolerance = 1e-3 ) all.equal( mlGCon[-c(5,6,7,9,11)], mlCon[-c(5,6,7,9,11)], tolerance = 1e-1 ) # with analytical gradients and Hessians mlghCon <- maxLik( llf, gf, hf, start = startVal, constraints = eqCon ) all.equal( mlgCon, mlghCon, tolerance = 1e-3 ) # with analytical gradients and Hessians as attributes mlGHCon <- maxLik( llfGradHess, start = startVal, constraints = eqCon ) all.equal( mlGHCon, mlghCon, tolerance = 1e-3 ) all.equal( mlGHCon[-c(2,3,4,5,6,7,9,11)], mlCon[-c(2,3,4,5,6,7,9,11)], tolerance = 1e-3 ) all.equal( mlGHCon[-c(5,6,7,9,11)], mlCon[-c(5,6,7,9,11)], tolerance = 1e-1 ) ## BHHH method with equality constraints mlBhhhCon <- maxLik( llfInd, start = startVal, constraints = eqCon, method = "BHHH" ) print( mlBhhhCon ) summary( mlBhhhCon ) activePar( mlBhhhCon ) AIC( mlBhhhCon ) coef( mlBhhhCon ) condiNumber( mlBhhhCon ) round( hessian( mlBhhhCon ), 2 ) logLik( mlBhhhCon ) maximType( mlBhhhCon ) nIter( mlBhhhCon ) nParam( mlBhhhCon ) returnCode( mlBhhhCon ) returnMessage( mlBhhhCon ) round( vcov( mlBhhhCon ), 4 ) logLik( summary( mlBhhhCon ) ) all.equal( mlCon[ -c( 5, 6, 7, 9, 10 ) ], mlBhhhCon[ -c( 5, 6, 7, 9, 10, 11 ) ], tolerance = 1e-3 ) mlBhhhCon[11] nObs( mlBhhhCon ) # with analytical gradients mlgBhhhCon <- maxLik( llf, gfInd, start = startVal, constraints = eqCon, method = "BHHH" ) summary( mlgBhhhCon ) all.equal( mlBhhhCon[-c(2,3,4,5,6,7,9,11,12)], mlgBhhhCon[-c(2,3,4,5,6,7,9,11,12)], tolerance = 1e-3 ) all.equal( mlBhhhCon[-c(5,6,7,9,12)], mlgBhhhCon[-c(5,6,7,9,12)], tolerance = 1e-1 ) mlgBhhhConInd <- maxLik( llfInd, gfInd, start = startVal, constraints = eqCon, method = "BHHH" ) all.equal( mlgBhhhCon, mlgBhhhConInd, tolerance = 1e-3 ) # with analytical gradients as attribute mlGBhhhCon <- maxLik( llfGradInd, start = startVal, constraints = eqCon, method = "BHHH" ) summary( mlGBhhhCon ) all.equal( mlGBhhhCon, mlgBhhhCon, tolerance = 1e-3 ) all.equal( mlGBhhhCon[-c(2,3,4,5,6,7,9,11,12)], mlBhhhCon[-c(2,3,4,5,6,7,9,11,12)], tolerance = 1e-3 ) all.equal( mlGBhhhCon[-c(5,6,7,9,12)], mlBhhhCon[-c(5,6,7,9,12)], tolerance = 1e-1 ) # with analytical gradients and unused Hessians mlghBhhhCon <- maxLik( llf, gfInd, hf, start = startVal, constraints = eqCon, method = "BHHH" ) all.equal( mlgBhhhCon, mlghBhhhCon, tolerance = 1e-3 ) # with analytical gradients and unused Hessians as attributes mlGHBhhhCon <- maxLik( llfGradHessInd, start = startVal, constraints = eqCon, method = "BHHH" ) all.equal( mlGHBhhhCon, mlghBhhhCon, tolerance = 1e-3 ) all.equal( mlGHBhhhCon, mlGBhhhCon, tolerance = 1e-3 ) ## BFGS method with equality constraints mlBfgsCon <- maxLik( llf, start = startVal, constraints = eqCon, method = "BFGS" ) print( mlBfgsCon ) summary( mlBfgsCon ) activePar( mlBfgsCon ) AIC( mlBfgsCon ) coef( mlBfgsCon ) condiNumber( mlBfgsCon ) round( hessian( mlBfgsCon ), 2 ) logLik( mlBfgsCon ) maximType( mlBfgsCon ) nIter( mlBfgsCon ) nParam( mlBfgsCon ) returnCode( mlBfgsCon ) returnMessage( mlBfgsCon ) round( vcov( mlBfgsCon ), 4 ) logLik( summary( mlBfgsCon ) ) all.equal( mlBfgsCon[ -c( 4, 5, 6, 9, 10 ) ], mlCon[ -c( 4, 5, 6, 9, 10 ) ], tolerance = 1e-3 ) mlBfgsConInd <- maxLik( llfInd, start = startVal, constraints = eqCon, method = "BFGS" ) summary( mlBfgsConInd ) all.equal( mlBfgsCon[-4], mlBfgsConInd[-c(4,12)], tolerance = 1e-3 ) mlBfgsConInd[12] nObs( mlBfgsConInd ) # with analytical gradients mlgBfgsCon <- maxLik( llf, gf, start = startVal, constraints = eqCon, method = "BFGS" ) summary( mlgBfgsCon ) all.equal( mlBfgsCon[-c(3,4,9,11)], mlgBfgsCon[-c(3,4,9,11)], tolerance = 1e-2 ) mlgBfgsConInd <- maxLik( llfInd, gfInd, start = startVal, constraints = eqCon, method = "BFGS" ) all.equal( mlgBfgsCon[], mlgBfgsConInd[-12], tolerance = 1e-3 ) mlgBfgsConInd[12] # with analytical gradients and unused Hessians mlghBfgsCon <- maxLik( llf, gf, hf, start = startVal, constraints = eqCon, method = "BFGS" ) all.equal( mlgBfgsCon, mlghBfgsCon, tolerance = 1e-3 ) ## NM method with equality constraints mlNmCon <- maxLik( llf, start = startVal, constraints = eqCon, method = "NM", SUMTTol=0) print( mlNmCon ) summary( mlNmCon ) activePar( mlNmCon ) AIC( mlNmCon ) coef( mlNmCon ) condiNumber( mlNmCon ) round( hessian( mlNmCon ), 2 ) logLik( mlNmCon ) maximType( mlNmCon ) nIter( mlNmCon ) nParam( mlNmCon ) returnCode( mlNmCon ) returnMessage( mlNmCon ) round( vcov( mlNmCon ), 4 ) logLik( summary( mlNmCon ) ) all.equal( mlNmCon[ -c( 4, 5, 6, 9, 10 ) ], mlCon[ -c( 4, 5, 6, 9, 10 ) ], tolerance = 1e-3 ) mlNmConInd <- maxLik( llfInd, start = startVal, constraints = eqCon, method = "NM", SUMTTol=0) summary( mlNmConInd ) all.equal( mlNmCon[], mlNmConInd[-12], tolerance = 1e-3 ) mlNmConInd[12] nObs( mlNmConInd ) # with unused analytical gradients mlgNmCon <- maxLik( llf, gf, start = startVal, constraints = eqCon, method = "NM", SUMTTol=0) all.equal( mlNmCon, mlgNmCon, tolerance = 1e-3 ) mlgNmConInd <- maxLik( llfInd, gfInd, start = startVal, constraints = eqCon, method = "NM", SUMTTol=0) all.equal( mlgNmCon[], mlgNmConInd[-12], tolerance = 1e-3 ) mlgNmConInd[12] # with unused analytical gradients and Hessians mlghNmCon <- maxLik( llf, gf, hf, start = startVal, constraints = eqCon, method = "NM", SUMTTol=0) all.equal( mlgNmCon, mlghNmCon, tolerance = 1e-3 ) ## SANN method with equality constraints mlSannCon <- maxLik( llf, start = startVal, constraints = eqCon, method = "SANN", SUMTTol=0) print( mlSannCon ) summary( mlSannCon ) activePar( mlSannCon ) AIC( mlSannCon ) coef( mlSannCon ) condiNumber( mlSannCon ) round( hessian( mlSannCon ), 2 ) logLik( mlSannCon ) maximType( mlSannCon ) nIter( mlSannCon ) nParam( mlSannCon ) returnCode( mlSannCon ) returnMessage( mlSannCon ) round( vcov( mlSannCon ), 4 ) logLik( summary( mlSannCon ) ) all.equal( mlSannCon[ -c(2,3,4,5,6,9,10,11) ], mlBfgsCon[ -c(2,3,4,5,6,9,10,11) ], tolerance = 1e-3 ) all.equal( mlSannCon[ -c(3,4,5,6,9,10,11) ], mlBfgsCon[ -c(3,4,5,6,9,10,11) ], tolerance = 1e-2 ) # with unused analytical gradients mlgSannCon <- maxLik( llf, gf, start = startVal, constraints = eqCon, method = "SANN", SUMTTol=0) all.equal( mlSannCon, mlgSannCon, tolerance = 1e-3 ) # with a user-specified function to generate a new candidate point mlSannConCand <- maxLik( llf, start = startVal, constraints = eqCon, method = "SANN", cand = function(x)c(rnorm(1,x[1]),rnorm(1,x[2])) ) summary( mlSannConCand ) all.equal( mlSannConCand[-c(1,2,3,4,11)], mlSannCon[-c(1,2,3,4,11)], tolerance = 1e-3 ) all.equal( mlSannConCand[-c(2,3,4,11)], mlSannCon[-c(2,3,4,11)], tolerance = 1e-1 ) ## test for method "estfun" library( sandwich ) try( estfun( ml ) ) estfun( mlInd )[ 1:5, ] estfun( mlgInd )[ 1:5, ] estfun( mlBHHH )[ 1:5, ] estfun( mlgBHHH )[ 1:5, ] estfun( mlIndBFGS )[ 1:5, ] estfun( mlgIndBFGS )[ 1:5, ] estfun( mlIndNM )[ 1:5, ] estfun( mlgIndNM )[ 1:5, ] estfun( mlIndSANN )[ 1:5, ] estfun( mlgIndSANN )[ 1:5, ] estfun( mlIndFix )[ 1:5, ] estfun( mlgIndFix )[ 1:5, ] estfun( mlFixBHHH )[ 1:5, ] estfun( mlgFixBHHH )[ 1:5, ] estfun( mlIndFixBfgs )[ 1:5, ] estfun( mlgIndFixBfgs )[ 1:5, ] estfun( mlIndFixNm )[ 1:5, ] estfun( mlgIndFixNm )[ 1:5, ] estfun( mlIndFixSann )[ 1:5, ] estfun( mlgIndFixSann )[ 1:5, ] estfun( mlBfgsInEqInd )[ 1:5, ] estfun( mlgBfgsInEqInd )[ 1:5, ] estfun( mlNmInEqInd )[ 1:5, ] estfun( mlConInd )[ 1:5, ] estfun( mlgConInd )[ 1:5, ] estfun( mlBhhhCon )[ 1:5, ] estfun( mlgBhhhCon )[ 1:5, ] estfun( mlBfgsConInd )[ 1:5, ] estfun( mlgBfgsConInd )[ 1:5, ] estfun( mlNmConInd )[ 1:5, ] estfun( mlgNmConInd )[ 1:5, ] ## test for method "bread" try( bread( ml ) ) round( bread( mlInd ), 3 ) round( bread( mlgInd ), 3 ) round( bread( mlBHHH ), 3 ) round( bread( mlgBHHH ), 3 ) round( bread( mlIndBFGS ), 3 ) round( bread( mlgIndBFGS ), 3 ) round( bread( mlIndNM ), 3 ) round( bread( mlgIndNM ), 3 ) round( bread( mlIndSANN ), 3 ) round( bread( mlgIndSANN ), 3 ) round( bread( mlIndFix ), 3 ) round( bread( mlgIndFix ), 3 ) round( bread( mlFixBHHH ), 3 ) round( bread( mlgFixBHHH ), 3 ) round( bread( mlIndFixBfgs ), 3 ) round( bread( mlgIndFixBfgs ), 3 ) round( bread( mlIndFixNm ), 3 ) round( bread( mlgIndFixNm ), 3 ) round( bread( mlIndFixSann ), 3 ) round( bread( mlgIndFixSann ), 3 ) round( bread( mlBfgsInEqInd ), 3 ) round( bread( mlgBfgsInEqInd ), 3 ) round( bread( mlNmInEqInd ), 3 ) round( bread( mlConInd ), 3 ) round( bread( mlgConInd ), 3 ) round( bread( mlBhhhCon ), 3 ) round( bread( mlgBhhhCon ), 3 ) round( bread( mlBfgsConInd ), 3 ) round( bread( mlgBfgsConInd ), 3 ) round( bread( mlNmConInd ), 3 ) round( bread( mlgNmConInd ), 3 ) ## test for method "sandwich" try( sandwich( ml ) ) printSandwich <- function( x ) { print( round( sandwich( x ), 3 ) ) tmp <- all.equal( sandwich( x ), vcov( x ) ) if( isTRUE( tmp ) ) { print( tmp ) } } printSandwich( mlInd ) printSandwich( mlgInd ) printSandwich( mlBHHH ) printSandwich( mlgBHHH ) printSandwich( mlIndBFGS ) printSandwich( mlgIndBFGS ) printSandwich( mlIndNM ) printSandwich( mlgIndNM ) printSandwich( mlIndSANN ) printSandwich( mlgIndSANN ) printSandwich( mlIndFix ) printSandwich( mlgIndFix ) printSandwich( mlFixBHHH ) printSandwich( mlgFixBHHH ) printSandwich( mlIndFixBfgs ) printSandwich( mlgIndFixBfgs ) printSandwich( mlIndFixNm ) printSandwich( mlgIndFixNm ) printSandwich( mlIndFixSann ) printSandwich( mlgIndFixSann ) printSandwich( mlBfgsInEqInd ) printSandwich( mlgBfgsInEqInd ) printSandwich( mlNmInEqInd ) printSandwich( mlConInd ) printSandwich( mlgConInd ) printSandwich( mlBhhhCon ) printSandwich( mlgBhhhCon ) printSandwich( mlBfgsConInd ) printSandwich( mlgBfgsConInd ) printSandwich( mlNmConInd ) printSandwich( mlgNmConInd ) maxLik/tests/fitNormalDist.Rout.save0000644000176000001440000054142212231233071017262 0ustar ripleyusers R version 3.0.2 (2013-09-25) -- "Frisbee Sailing" Copyright (C) 2013 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > # load the 'maxLik' package > library(maxLik) Loading required package: miscTools Please cite the 'maxLik' package as: Henningsen, Arne and Toomet, Ott (2011). maxLik: A package for maximum likelihood estimation in R. Computational Statistics 26(3), 443-458. DOI 10.1007/s00180-010-0217-1. If you have questions, suggestions, or comments regarding the 'maxLik' package, please use a forum or 'tracker' at maxLik's R-Forge site: https://r-forge.r-project.org/projects/maxlik/ > options(digits = 4) > # just to avoid so many differences when comparing these output files > ## data to fit a normal distribution > # set seed for pseudo random numbers > set.seed( 123 ) > # generate a variable from normally distributed random numbers > x <- rnorm( 100, 1, 2 ) > xSaved <- x > > ## log likelihood function > llf <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + if(!(sigma > 0)) + return(NA) + # to avoid warnings in the output + N <- length( x ) + llValue <- -0.5 * N * log( 2 * pi ) - N * log( sigma ) - + 0.5 * sum( ( x - mu )^2 / sigma^2 ) + return( llValue ) + } > > ## log likelihood function (individual observations) > llfInd <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + if(!(sigma > 0)) + return(NA) + # to avoid warnings in the output + llValues <- -0.5 * log( 2 * pi ) - log( sigma ) - + 0.5 * ( x - mu )^2 / sigma^2 + return( llValues ) + } > > ## function to calculate analytical gradients > gf <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + N <- length( x ) + llGrad <- c( sum( ( x - mu ) / sigma^2 ), + - N / sigma + sum( ( x - mu )^2 / sigma^3 ) ) + return( llGrad ) + } > > ## function to calculate analytical gradients (individual observations) > gfInd <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + llGrads <- cbind( ( x - mu ) / sigma^2, + - 1 / sigma + ( x - mu )^2 / sigma^3 ) + return( llGrads ) + } > > ## log likelihood function with gradients as attributes > llfGrad <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + if(!(sigma > 0)) + return(NA) + # to avoid warnings in the output + N <- length( x ) + llValue <- -0.5 * N * log( 2 * pi ) - N * log( sigma ) - + 0.5 * sum( ( x - mu )^2 / sigma^2 ) + attributes( llValue )$gradient <- c( sum( ( x - mu ) / sigma^2 ), + - N / sigma + sum( ( x - mu )^2 / sigma^3 ) ) + return( llValue ) + } > > ## log likelihood function with gradients as attributes (individual observations) > llfGradInd <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + if(!(sigma > 0)) + return(NA) + # to avoid warnings in the output + llValues <- -0.5 * log( 2 * pi ) - log( sigma ) - + 0.5 * ( x - mu )^2 / sigma^2 + attributes( llValues )$gradient <- cbind( ( x - mu ) / sigma^2, + - 1 / sigma + ( x - mu )^2 / sigma^3 ) + return( llValues ) + } > > ## function to calculate analytical Hessians > hf <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + N <- length( x ) + llHess <- matrix( c( + N * ( - 1 / sigma^2 ), + sum( - 2 * ( x - mu ) / sigma^3 ), + sum( - 2 * ( x - mu ) / sigma^3 ), + N / sigma^2 + sum( - 3 * ( x - mu )^2 / sigma^4 ) ), + nrow = 2, ncol = 2 ) + return( llHess ) + } > > ## log likelihood function with gradients and Hessian as attributes > llfGradHess <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + if(!(sigma > 0)) + return(NA) + # to avoid warnings in the output + N <- length( x ) + llValue <- -0.5 * N * log( 2 * pi ) - N * log( sigma ) - + 0.5 * sum( ( x - mu )^2 / sigma^2 ) + attributes( llValue )$gradient <- c( sum( ( x - mu ) / sigma^2 ), + - N / sigma + sum( ( x - mu )^2 / sigma^3 ) ) + attributes( llValue )$hessian <- matrix( c( + N * ( - 1 / sigma^2 ), + sum( - 2 * ( x - mu ) / sigma^3 ), + sum( - 2 * ( x - mu ) / sigma^3 ), + N / sigma^2 + sum( - 3 * ( x - mu )^2 / sigma^4 ) ), + nrow = 2, ncol = 2 ) + return( llValue ) + } > > ## log likelihood function with gradients as attributes (individual observations) > llfGradHessInd <- function( param ) { + mu <- param[ 1 ] + sigma <- param[ 2 ] + if(!(sigma > 0)) + return(NA) + # to avoid warnings in the output + N <- length( x ) + llValues <- -0.5 * log( 2 * pi ) - log( sigma ) - + 0.5 * ( x - mu )^2 / sigma^2 + attributes( llValues )$gradient <- cbind( ( x - mu ) / sigma^2, + - 1 / sigma + ( x - mu )^2 / sigma^3 ) + attributes( llValues )$hessian <- matrix( c( + N * ( - 1 / sigma^2 ), + sum( - 2 * ( x - mu ) / sigma^3 ), + sum( - 2 * ( x - mu ) / sigma^3 ), + N / sigma^2 + sum( - 3 * ( x - mu )^2 / sigma^4 ) ), + nrow = 2, ncol = 2 ) + return( llValues ) + } > > > # start values > startVal <- c( mu = 0, sigma = 1 ) > > ## NR method > ml <- maxLik( llf, start = startVal ) > print( ml ) Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -201.6 (2 free parameter(s)) Estimate(s): 1.181 1.816 > summary( ml ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 7.9e-11 *** sigma 1.816 0.128 14.1 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( ml ) mu sigma TRUE TRUE > AIC( ml ) [1] 407.2 > coef( ml ) mu sigma 1.181 1.816 > condiNumber( ml ) mu 1 sigma 1.669 > round( hessian( ml ), 2 ) mu sigma mu -30.33 -0.03 sigma -0.03 -60.62 > logLik( ml ) [1] -201.6 > maximType( ml ) [1] "Newton-Raphson maximisation" > nIter( ml ) [1] 7 > try( nObs( ml ) ) Error in nObs.maxLik(ml) : cannot return the number of observations: please re-run 'maxLik' and provide a gradient function using argument 'grad' or (if no gradient function is specified) a log-likelihood function using argument 'logLik' that return the gradients or log-likelihood values, respectively, at each observation > nParam( ml ) [1] 2 > returnCode( ml ) [1] 2 > returnMessage( ml ) [1] "successive function values within tolerance limit" > round( vcov( ml ), 4 ) mu sigma mu 0.033 0.0000 sigma 0.000 0.0165 > logLik( summary( ml ) ) [1] -201.6 > mlInd <- maxLik( llfInd, start = startVal ) > summary( mlInd ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 8.1e-11 *** sigma 1.816 0.128 14.2 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( ml[-c(3,4,5,6)], mlInd[ -c(3,4,5,6,11) ], tolerance = 1e-3 ) [1] TRUE > # 3 gradient, should be close to 0, but may vary enormously in relative terms > mlInd[[11]][sample(nrow(mlInd[[11]]), 10),] mu sigma [1,] -0.49660 -0.1025 [2,] -0.41864 -0.2322 [3,] 0.02027 -0.5498 [4,] -0.10533 -0.5304 [5,] -0.24025 -0.4457 [6,] 0.33578 -0.3457 [7,] 0.44319 -0.1937 [8,] 0.45301 -0.1777 [9,] 1.02831 1.3703 [10,] 0.27760 -0.4105 > # just print a sample of 10 > nObs( mlInd ) [1] 100 > > # with analytical gradients > mlg <- maxLik( llf, gf, start = startVal ) > summary( mlg ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 8e-11 *** sigma 1.816 0.128 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( ml[-c(5,6)], mlg[-c(5,6)], tolerance = 1e-3 ) [1] TRUE > mlgInd <- maxLik( llfInd, gfInd, start = startVal ) > all.equal( mlInd, mlgInd, tolerance = 1e-3 ) [1] TRUE > all.equal( mlg[ ], mlgInd[ -11 ], tolerance = 1e-3 ) [1] TRUE > round( mlgInd[[ 11 ]], 3 ) mu sigma [1,] -0.395 -0.268 [2,] -0.194 -0.482 [3,] 0.890 0.888 [4,] -0.012 -0.550 [5,] 0.024 -0.550 [6,] 0.985 1.211 [7,] 0.225 -0.459 [8,] -0.822 0.676 [9,] -0.471 -0.147 [10,] -0.325 -0.359 [11,] 0.687 0.307 [12,] 0.163 -0.502 [13,] 0.188 -0.486 [14,] 0.012 -0.550 [15,] -0.392 -0.272 [16,] 1.028 1.370 [17,] 0.247 -0.440 [18,] -1.247 2.273 [19,] 0.370 -0.301 [20,] -0.341 -0.339 [21,] -0.702 0.345 [22,] -0.187 -0.487 [23,] -0.677 0.281 [24,] -0.497 -0.103 [25,] -0.434 -0.209 [26,] -1.077 1.557 [27,] 0.453 -0.178 [28,] 0.038 -0.548 [29,] -0.745 0.457 [30,] 0.705 0.353 [31,] 0.204 -0.475 [32,] -0.234 -0.451 [33,] 0.488 -0.118 [34,] 0.477 -0.136 [35,] 0.443 -0.194 [36,] 0.363 -0.312 [37,] 0.281 -0.407 [38,] -0.092 -0.535 [39,] -0.240 -0.446 [40,] -0.285 -0.403 [41,] -0.476 -0.139 [42,] -0.181 -0.491 [43,] -0.822 0.676 [44,] 1.260 2.333 [45,] 0.677 0.283 [46,] -0.736 0.432 [47,] -0.299 -0.388 [48,] -0.338 -0.343 [49,] 0.418 -0.233 [50,] -0.105 -0.530 [51,] 0.099 -0.533 [52,] -0.072 -0.541 [53,] -0.081 -0.539 [54,] 0.775 0.540 [55,] -0.192 -0.484 [56,] 0.864 0.807 [57,] -0.994 1.243 [58,] 0.300 -0.388 [59,] 0.020 -0.550 [60,] 0.076 -0.540 [61,] 0.175 -0.495 [62,] -0.359 -0.316 [63,] -0.257 -0.431 [64,] -0.672 0.270 [65,] -0.704 0.351 [66,] 0.129 -0.520 [67,] 0.217 -0.465 [68,] -0.023 -0.550 [69,] 0.504 -0.089 [70,] 1.188 2.012 [71,] -0.352 -0.325 [72,] -1.454 3.292 [73,] 0.555 0.009 [74,] -0.485 -0.124 [75,] -0.472 -0.146 [76,] 0.567 0.033 [77,] -0.227 -0.457 [78,] -0.795 0.597 [79,] 0.055 -0.545 [80,] -0.139 -0.515 [81,] -0.051 -0.546 [82,] 0.179 -0.492 [83,] -0.279 -0.409 [84,] 0.336 -0.346 [85,] -0.188 -0.486 [86,] 0.146 -0.512 [87,] 0.610 0.125 [88,] 0.209 -0.471 [89,] -0.252 -0.435 [90,] 0.642 0.197 [91,] 0.547 -0.006 [92,] 0.278 -0.411 [93,] 0.090 -0.536 [94,] -0.435 -0.206 [95,] 0.770 0.526 [96,] -0.419 -0.232 [97,] 1.271 2.384 [98,] 0.874 0.838 [99,] -0.198 -0.480 [100,] -0.677 0.282 > > # with analytical gradients as attribute > mlG <- maxLik( llfGrad, start = startVal ) > all.equal( mlG, mlg, tolerance = 1e-3 ) [1] TRUE > all.equal( mlG$gradient, gf( coef( mlG ) ), check.attributes = FALSE, + tolerance = 1e-3 ) [1] TRUE > mlGInd <- maxLik( llfGradInd, start = startVal ) > all.equal( mlGInd, mlgInd, tolerance = 1e-3 ) [1] TRUE > all.equal( mlGInd$gradient, colSums( gfInd( coef( mlGInd ) ) ), + check.attributes = FALSE, tolerance = 1e-3 ) [1] TRUE > all.equal( mlGInd$gradientObs, gfInd( coef( mlGInd ) ), + check.attributes = FALSE, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients as argument and attribute > mlgG <- maxLik( llfGrad, gf, start = startVal ) Warning message: In maxNRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, hessOrig = hess, : the gradient is provided both as attribute 'gradient' and as argument 'grad': ignoring argument 'grad' > all.equal( mlgG, mlg, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgG, mlG, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients and Hessians > mlgh <- maxLik( llf, gf, hf, start = startVal ) > all.equal( mlg, mlgh, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients and Hessian as attribute > mlGH <- maxLik( llfGradHess, start = startVal ) > all.equal( mlGH, mlgh, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients and Hessian as argument and attribute > mlgGhH <- maxLik( llfGradHess, gf, hf, start = startVal ) Warning messages: 1: In maxNRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, hessOrig = hess, : the gradient is provided both as attribute 'gradient' and as argument 'grad': ignoring argument 'grad' 2: In maxNRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, hessOrig = hess, : the Hessian is provided both as attribute 'hessian' and as argument 'hess': ignoring argument 'hess' > all.equal( mlgGhH, mlgh, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgGhH, mlGH, tolerance = 1e-3 ) [1] TRUE > > > ## BHHH method > mlBHHH <- try( maxLik( llf, start = startVal, method = "BHHH" ) ) Error in checkBhhhGrad(g = gr, theta = theta, analytic = (!is.null(attr(f, : if the gradients (argument 'grad') are not provided by the user, the BHHH method requires that the log-likelihood function (argument 'fn') returns a numeric vector, where each element must be the log-likelihood value corresponding to an individual (independent) observation > x <- xSaved[1] > try( maxLik( llfInd, start = startVal, method = "BHHH" ) ) Error in checkBhhhGrad(g = gr, theta = theta, analytic = (!is.null(attr(f, : if the gradients (argument 'grad') are not provided by the user, the BHHH method requires that the log-likelihood function (argument 'fn') returns a numeric vector, where each element must be the log-likelihood value corresponding to an individual (independent) observation > x <- xSaved[1:2] > try( maxLik( llfInd, start = startVal, method = "BHHH" ) ) Maximum Likelihood estimation BHHH maximisation, 8 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -0.6227 (2 free parameter(s)) Estimate(s): 0.2158 0.3302 > x <- xSaved > mlBHHH <- maxLik( llfInd, start = startVal, method = "BHHH" ) > print( mlBHHH ) Maximum Likelihood estimation BHHH maximisation, 13 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -201.6 (2 free parameter(s)) Estimate(s): 1.181 1.816 > summary( mlBHHH ) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 13 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.49 8.4e-11 *** sigma 1.816 0.134 13.55 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( mlBHHH ) mu sigma TRUE TRUE > AIC( mlBHHH ) [1] 407.2 > coef( mlBHHH ) mu sigma 1.181 1.816 > condiNumber( mlBHHH ) mu 1 sigma 1.719 > round( hessian( mlBHHH ), 2 ) mu sigma mu -30.31 -1.83 sigma -1.83 -55.73 attr(,"type") [1] "BHHH" > logLik( mlBHHH ) [1] -201.6 > maximType( mlBHHH ) [1] "BHHH maximisation" > nIter( mlBHHH ) [1] 13 > nParam( mlBHHH ) [1] 2 > returnCode( mlBHHH ) [1] 2 > returnMessage( mlBHHH ) [1] "successive function values within tolerance limit" > round( vcov( mlBHHH ), 4 ) mu sigma mu 0.0331 -0.0011 sigma -0.0011 0.0180 > logLik( summary( mlBHHH ) ) [1] -201.6 > all.equal( ml[-c(4,5,6,9,10) ], mlBHHH[ -c(4,5,6,9,10,11) ], tolerance = 1e-3 ) [1] TRUE > round( mlBHHH[[ 11 ]], 3 ) mu sigma [1,] -0.395 -0.268 [2,] -0.194 -0.482 [3,] 0.890 0.888 [4,] -0.012 -0.550 [5,] 0.024 -0.550 [6,] 0.985 1.211 [7,] 0.225 -0.459 [8,] -0.822 0.676 [9,] -0.471 -0.147 [10,] -0.325 -0.359 [11,] 0.687 0.307 [12,] 0.163 -0.502 [13,] 0.188 -0.486 [14,] 0.012 -0.550 [15,] -0.392 -0.272 [16,] 1.028 1.370 [17,] 0.247 -0.440 [18,] -1.247 2.273 [19,] 0.370 -0.301 [20,] -0.341 -0.339 [21,] -0.702 0.345 [22,] -0.187 -0.487 [23,] -0.677 0.281 [24,] -0.497 -0.103 [25,] -0.434 -0.209 [26,] -1.077 1.557 [27,] 0.453 -0.178 [28,] 0.038 -0.548 [29,] -0.745 0.457 [30,] 0.705 0.353 [31,] 0.204 -0.475 [32,] -0.234 -0.451 [33,] 0.488 -0.118 [34,] 0.477 -0.136 [35,] 0.443 -0.194 [36,] 0.363 -0.312 [37,] 0.281 -0.407 [38,] -0.092 -0.535 [39,] -0.240 -0.446 [40,] -0.285 -0.403 [41,] -0.476 -0.139 [42,] -0.181 -0.491 [43,] -0.822 0.676 [44,] 1.260 2.333 [45,] 0.677 0.283 [46,] -0.736 0.432 [47,] -0.299 -0.388 [48,] -0.338 -0.343 [49,] 0.418 -0.233 [50,] -0.105 -0.530 [51,] 0.099 -0.533 [52,] -0.072 -0.541 [53,] -0.081 -0.539 [54,] 0.775 0.540 [55,] -0.192 -0.484 [56,] 0.864 0.807 [57,] -0.994 1.243 [58,] 0.300 -0.388 [59,] 0.020 -0.550 [60,] 0.076 -0.540 [61,] 0.175 -0.495 [62,] -0.359 -0.316 [63,] -0.257 -0.431 [64,] -0.672 0.270 [65,] -0.704 0.351 [66,] 0.129 -0.520 [67,] 0.217 -0.465 [68,] -0.023 -0.550 [69,] 0.504 -0.089 [70,] 1.188 2.012 [71,] -0.352 -0.325 [72,] -1.454 3.292 [73,] 0.555 0.009 [74,] -0.485 -0.124 [75,] -0.472 -0.146 [76,] 0.567 0.033 [77,] -0.227 -0.457 [78,] -0.795 0.597 [79,] 0.055 -0.545 [80,] -0.139 -0.515 [81,] -0.051 -0.546 [82,] 0.179 -0.492 [83,] -0.279 -0.409 [84,] 0.336 -0.346 [85,] -0.188 -0.486 [86,] 0.146 -0.512 [87,] 0.610 0.125 [88,] 0.209 -0.471 [89,] -0.252 -0.435 [90,] 0.642 0.197 [91,] 0.547 -0.006 [92,] 0.278 -0.411 [93,] 0.090 -0.536 [94,] -0.435 -0.206 [95,] 0.770 0.526 [96,] -0.419 -0.232 [97,] 1.271 2.384 [98,] 0.874 0.838 [99,] -0.198 -0.480 [100,] -0.677 0.282 > nObs( mlBHHH ) [1] 100 > # final Hessian = usual Hessian > mlBhhhH <- maxLik( llfInd, start = startVal, method = "BHHH", + finalHessian = TRUE ) > all.equal( mlBhhhH[-4], mlBHHH[-4], tolerance = 1e-3 ) [1] TRUE > round( hessian( mlBhhhH ), 2 ) mu sigma mu -30.33 0.00 sigma 0.00 -60.62 > summary( mlBhhhH ) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 13 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 7.9e-11 *** sigma 1.816 0.128 14.1 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > > # with analytical gradients > mlgBHHH <- try( maxLik( llf, gf, start = startVal, method = "BHHH" ) ) Error in checkBhhhGrad(g = gr, theta = theta, analytic = (!is.null(attr(f, : gradient is not a matrix but of class 'numeric'; the BHHH method requires that the gradient function (argument 'grad') returns a numeric matrix, where each row must correspond to the gradient(s) of the log-likelihood function at an individual (independent) observation and each column must correspond to a parameter > mlgBHHH <- try( maxLik( llfInd, gf, start = startVal, method = "BHHH" ) ) Error in checkBhhhGrad(g = gr, theta = theta, analytic = (!is.null(attr(f, : gradient is not a matrix but of class 'numeric'; the BHHH method requires that the gradient function (argument 'grad') returns a numeric matrix, where each row must correspond to the gradient(s) of the log-likelihood function at an individual (independent) observation and each column must correspond to a parameter > x <- xSaved[1] > try( maxLik( llf, gfInd, start = startVal, method = "BHHH" ) ) Error in checkBhhhGrad(g = gr, theta = theta, analytic = (!is.null(attr(f, : the matrix returned by the gradient function (argument 'grad') must have at least as many rows as the number of parameters (2), where each row must correspond to the gradients of the log-likelihood function of an individual (independent) observation: currently, there are (is) 2 parameter(s) but the gradient matrix has only 1 row(s) > try( maxLik( llfInd, gfInd, start = startVal, method = "BHHH" ) ) Error in checkBhhhGrad(g = gr, theta = theta, analytic = (!is.null(attr(f, : the matrix returned by the gradient function (argument 'grad') must have at least as many rows as the number of parameters (2), where each row must correspond to the gradients of the log-likelihood function of an individual (independent) observation: currently, there are (is) 2 parameter(s) but the gradient matrix has only 1 row(s) > x <- xSaved[1:2] > try( maxLik( llf, gfInd, start = startVal, method = "BHHH" ) ) Maximum Likelihood estimation BHHH maximisation, 8 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -0.6227 (2 free parameter(s)) Estimate(s): 0.2158 0.3302 > try( maxLik( llfInd, gfInd, start = startVal, method = "BHHH" ) ) Maximum Likelihood estimation BHHH maximisation, 8 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -0.6227 (2 free parameter(s)) Estimate(s): 0.2158 0.3302 > x <- xSaved > mlgBHHH <- maxLik( llfInd, gfInd, start = startVal, method = "BHHH" ) > summary( mlgBHHH ) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 13 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.49 8.4e-11 *** sigma 1.816 0.134 13.55 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlBHHH, mlgBHHH, tolerance = 1e-3 ) [1] TRUE > all.equal( mlg[-c(4,5,6,9,10)], mlgBHHH[-c(4,5,6,9,10,11)], tolerance = 1e-3 ) [1] TRUE > round( mlgBHHH[[ 11 ]], 3 ) mu sigma [1,] -0.395 -0.268 [2,] -0.194 -0.482 [3,] 0.890 0.888 [4,] -0.012 -0.550 [5,] 0.024 -0.550 [6,] 0.985 1.211 [7,] 0.225 -0.459 [8,] -0.822 0.676 [9,] -0.471 -0.147 [10,] -0.325 -0.359 [11,] 0.687 0.307 [12,] 0.163 -0.502 [13,] 0.188 -0.486 [14,] 0.012 -0.550 [15,] -0.392 -0.272 [16,] 1.028 1.370 [17,] 0.247 -0.440 [18,] -1.247 2.273 [19,] 0.370 -0.301 [20,] -0.341 -0.339 [21,] -0.702 0.345 [22,] -0.187 -0.487 [23,] -0.677 0.281 [24,] -0.497 -0.103 [25,] -0.434 -0.209 [26,] -1.077 1.557 [27,] 0.453 -0.178 [28,] 0.038 -0.548 [29,] -0.745 0.457 [30,] 0.705 0.353 [31,] 0.204 -0.475 [32,] -0.234 -0.451 [33,] 0.488 -0.118 [34,] 0.477 -0.136 [35,] 0.443 -0.194 [36,] 0.363 -0.312 [37,] 0.281 -0.407 [38,] -0.092 -0.535 [39,] -0.240 -0.446 [40,] -0.285 -0.403 [41,] -0.476 -0.139 [42,] -0.181 -0.491 [43,] -0.822 0.676 [44,] 1.260 2.333 [45,] 0.677 0.283 [46,] -0.736 0.432 [47,] -0.299 -0.388 [48,] -0.338 -0.343 [49,] 0.418 -0.233 [50,] -0.105 -0.530 [51,] 0.099 -0.533 [52,] -0.072 -0.541 [53,] -0.081 -0.539 [54,] 0.775 0.540 [55,] -0.192 -0.484 [56,] 0.864 0.807 [57,] -0.994 1.243 [58,] 0.300 -0.388 [59,] 0.020 -0.550 [60,] 0.076 -0.540 [61,] 0.175 -0.495 [62,] -0.359 -0.316 [63,] -0.257 -0.431 [64,] -0.672 0.270 [65,] -0.704 0.351 [66,] 0.129 -0.520 [67,] 0.217 -0.465 [68,] -0.023 -0.550 [69,] 0.504 -0.089 [70,] 1.188 2.012 [71,] -0.352 -0.325 [72,] -1.454 3.292 [73,] 0.555 0.009 [74,] -0.485 -0.124 [75,] -0.472 -0.146 [76,] 0.567 0.033 [77,] -0.227 -0.457 [78,] -0.795 0.597 [79,] 0.055 -0.545 [80,] -0.139 -0.515 [81,] -0.051 -0.546 [82,] 0.179 -0.492 [83,] -0.279 -0.409 [84,] 0.336 -0.346 [85,] -0.188 -0.486 [86,] 0.146 -0.512 [87,] 0.610 0.125 [88,] 0.209 -0.471 [89,] -0.252 -0.435 [90,] 0.642 0.197 [91,] 0.547 -0.006 [92,] 0.278 -0.411 [93,] 0.090 -0.536 [94,] -0.435 -0.206 [95,] 0.770 0.526 [96,] -0.419 -0.232 [97,] 1.271 2.384 [98,] 0.874 0.838 [99,] -0.198 -0.480 [100,] -0.677 0.282 > mlgBHHH2 <- maxLik( llf, gfInd, start = startVal, method = "BHHH" ) > all.equal( mlgBHHH, mlgBHHH2, tolerance = 1e-3 ) [1] TRUE > # final Hessian = usual Hessian > mlgBhhhH <- maxLik( llf, gfInd, start = startVal, method = "BHHH", + finalHessian = TRUE ) > all.equal( mlgBhhhH, mlBhhhH, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgBhhhH[-4], mlgBHHH[-4], tolerance = 1e-3 ) [1] TRUE > round( hessian( mlgBhhhH ), 2 ) mu sigma mu -30.31 0.00 sigma 0.00 -60.61 > > # with analytical gradients as attribute > try( maxLik( llfGrad, start = startVal, method = "BHHH" ) ) Error in checkBhhhGrad(g = gr, theta = theta, analytic = (!is.null(attr(f, : gradient is not a matrix but of class 'numeric'; the BHHH method requires that the gradient function (argument 'grad') returns a numeric matrix, where each row must correspond to the gradient(s) of the log-likelihood function at an individual (independent) observation and each column must correspond to a parameter > x <- xSaved[1] > try( maxLik( llfGrad, start = startVal, method = "BHHH" ) ) Error in checkBhhhGrad(g = gr, theta = theta, analytic = (!is.null(attr(f, : gradient is not a matrix but of class 'numeric'; the BHHH method requires that the gradient function (argument 'grad') returns a numeric matrix, where each row must correspond to the gradient(s) of the log-likelihood function at an individual (independent) observation and each column must correspond to a parameter > try( maxLik( llfGradInd, start = startVal, method = "BHHH" ) ) Error in checkBhhhGrad(g = gr, theta = theta, analytic = (!is.null(attr(f, : the matrix returned by the gradient function (argument 'grad') must have at least as many rows as the number of parameters (2), where each row must correspond to the gradients of the log-likelihood function of an individual (independent) observation: currently, there are (is) 2 parameter(s) but the gradient matrix has only 1 row(s) > x <- xSaved[1:2] > try( maxLik( llfGrad, start = startVal, method = "BHHH" ) ) Error in checkBhhhGrad(g = gr, theta = theta, analytic = (!is.null(attr(f, : gradient is not a matrix but of class 'numeric'; the BHHH method requires that the gradient function (argument 'grad') returns a numeric matrix, where each row must correspond to the gradient(s) of the log-likelihood function at an individual (independent) observation and each column must correspond to a parameter > try( maxLik( llfGradInd, start = startVal, method = "BHHH" ) ) Maximum Likelihood estimation BHHH maximisation, 8 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -0.6227 (2 free parameter(s)) Estimate(s): 0.2158 0.3302 > x <- xSaved > mlGBHHH <- maxLik( llfGradInd, start = startVal, method = "BHHH" ) > all.equal( mlGBHHH, mlgBHHH, tolerance = 1e-3 ) [1] TRUE > # final Hessian = usual Hessian > mlGBhhhH <- maxLik( llfGradInd, start = startVal, method = "BHHH", + finalHessian = TRUE ) > all.equal( mlGBhhhH, mlgBhhhH, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients as argument and attribute > mlgGBHHH <- maxLik( llfGradInd, gfInd, start = startVal, method = "BHHH" ) Warning message: In maxNRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, hessOrig = hess, : the gradient is provided both as attribute 'gradient' and as argument 'grad': ignoring argument 'grad' > all.equal( mlgGBHHH, mlgBHHH, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgGBHHH, mlGBHHH, tolerance = 1e-3 ) [1] TRUE > > # with unused Hessian > mlghBHHH <- maxLik( llfInd, gfInd, hf, start = startVal, method = "BHHH" ) > all.equal( mlgBHHH, mlghBHHH, tolerance = 1e-3 ) [1] TRUE > # final Hessian = usual Hessian > mlghBhhhH <- maxLik( llfInd, gfInd, hf, start = startVal, method = "BHHH", + finalHessian = TRUE ) > all.equal( mlghBhhhH[-4], mlghBHHH[-4], tolerance = 1e-3 ) [1] TRUE > all.equal( mlghBhhhH, mlgBhhhH, tolerance = 1e-3 ) [1] TRUE > > # with unused Hessian as attribute > mlGHBHHH <- maxLik( llfGradHessInd, start = startVal, method = "BHHH" ) > all.equal( mlGHBHHH, mlghBHHH, tolerance = 1e-3 ) [1] TRUE > # final Hessian = usual Hessian > mlGHBhhhH <- maxLik( llfGradHessInd, start = startVal, method = "BHHH", + finalHessian = TRUE ) > all.equal( mlGHBhhhH, mlghBhhhH, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients and Hessian as argument and attribute > mlgGhHBHHH <- maxLik( llfGradHessInd, gfInd, hf, start = startVal, method = "BHHH" ) Warning messages: 1: In maxNRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, hessOrig = hess, : the gradient is provided both as attribute 'gradient' and as argument 'grad': ignoring argument 'grad' 2: In maxNRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, hessOrig = hess, : the Hessian is provided both as attribute 'hessian' and as argument 'hess': ignoring argument 'hess' > all.equal( mlgGhHBHHH, mlghBHHH, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgGhHBHHH, mlGHBHHH, tolerance = 1e-3 ) [1] TRUE > > > ### BFGS-YC method > mlBFGSYC <- maxLik( llf, start = startVal, method = "bfgsr" ) > print( mlBFGSYC ) Maximum Likelihood estimation BFGSR maximization, 15 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -201.6 (2 free parameter(s)) Estimate(s): 1.181 1.816 > summary( mlBFGSYC ) -------------------------------------------- Maximum Likelihood estimation BFGSR maximization, 15 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 8e-11 *** sigma 1.816 0.129 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( mlBFGSYC ) mu sigma TRUE TRUE > AIC( mlBFGSYC ) [1] 407.2 > coef( mlBFGSYC ) mu sigma 1.181 1.816 > condiNumber( mlBFGSYC ) mu 1 sigma 1.668 > round( hessian( mlBFGSYC ), 2 ) mu sigma mu -30.30 0.03 sigma 0.03 -60.54 > logLik( mlBFGSYC ) [1] -201.6 > maximType( mlBFGSYC ) [1] "BFGSR maximization" > nIter( mlBFGSYC ) [1] 15 > try( nObs( mlBFGSYC ) ) Error in nObs.maxLik(mlBFGSYC) : cannot return the number of observations: please re-run 'maxLik' and provide a gradient function using argument 'grad' or (if no gradient function is specified) a log-likelihood function using argument 'logLik' that return the gradients or log-likelihood values, respectively, at each observation > nParam( mlBFGSYC ) [1] 2 > returnCode( mlBFGSYC ) [1] 2 > returnMessage( mlBFGSYC ) [1] "successive function values within tolerance limit" > round( vcov( mlBFGSYC ), 4 ) mu sigma mu 0.033 0.0000 sigma 0.000 0.0165 > logLik( summary( mlBFGSYC ) ) [1] -201.6 > all.equal( ml[-c(3,4,5,6,9,10)], mlBFGSYC[-c(3,4,5,6,9,10)], tolerance = 1e-3 ) [1] TRUE > all.equal( ml[-c(5,6,9,10)], mlBFGSYC[-c(5,6,9,10)], tolerance = 1e-2 ) [1] TRUE > mlIndBFGSYC <- maxLik( llfInd, start = startVal, method = "BFGSR" ) > summary( mlIndBFGSYC ) -------------------------------------------- Maximum Likelihood estimation BFGSR maximization, 34 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 7.9e-11 *** sigma 1.816 0.128 14.2 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlBFGSYC[-c(3,4,9)], mlIndBFGSYC[ -c(3,4,9,11) ], tolerance = 1e-3 ) [1] TRUE > round( mlIndBFGSYC[[ 11 ]], 3 ) mu sigma [1,] -0.395 -0.268 [2,] -0.194 -0.482 [3,] 0.890 0.889 [4,] -0.012 -0.550 [5,] 0.024 -0.550 [6,] 0.985 1.211 [7,] 0.225 -0.459 [8,] -0.822 0.676 [9,] -0.471 -0.147 [10,] -0.325 -0.359 [11,] 0.687 0.307 [12,] 0.163 -0.502 [13,] 0.188 -0.486 [14,] 0.012 -0.550 [15,] -0.392 -0.272 [16,] 1.028 1.371 [17,] 0.247 -0.440 [18,] -1.247 2.274 [19,] 0.370 -0.301 [20,] -0.341 -0.339 [21,] -0.702 0.345 [22,] -0.187 -0.487 [23,] -0.677 0.281 [24,] -0.497 -0.102 [25,] -0.434 -0.209 [26,] -1.077 1.557 [27,] 0.453 -0.178 [28,] 0.038 -0.548 [29,] -0.745 0.457 [30,] 0.705 0.353 [31,] 0.204 -0.475 [32,] -0.234 -0.451 [33,] 0.488 -0.118 [34,] 0.478 -0.136 [35,] 0.443 -0.194 [36,] 0.363 -0.312 [37,] 0.281 -0.407 [38,] -0.092 -0.535 [39,] -0.240 -0.446 [40,] -0.285 -0.403 [41,] -0.476 -0.139 [42,] -0.181 -0.491 [43,] -0.822 0.676 [44,] 1.260 2.333 [45,] 0.677 0.283 [46,] -0.736 0.432 [47,] -0.299 -0.388 [48,] -0.338 -0.343 [49,] 0.418 -0.233 [50,] -0.105 -0.530 [51,] 0.099 -0.533 [52,] -0.072 -0.541 [53,] -0.081 -0.539 [54,] 0.775 0.540 [55,] -0.192 -0.484 [56,] 0.865 0.807 [57,] -0.994 1.243 [58,] 0.300 -0.388 [59,] 0.020 -0.550 [60,] 0.076 -0.540 [61,] 0.175 -0.495 [62,] -0.359 -0.316 [63,] -0.257 -0.431 [64,] -0.672 0.270 [65,] -0.705 0.351 [66,] 0.129 -0.520 [67,] 0.217 -0.465 [68,] -0.023 -0.550 [69,] 0.504 -0.089 [70,] 1.188 2.013 [71,] -0.352 -0.325 [72,] -1.455 3.293 [73,] 0.555 0.009 [74,] -0.485 -0.124 [75,] -0.472 -0.146 [76,] 0.567 0.033 [77,] -0.227 -0.457 [78,] -0.795 0.597 [79,] 0.055 -0.545 [80,] -0.139 -0.515 [81,] -0.051 -0.546 [82,] 0.179 -0.493 [83,] -0.280 -0.409 [84,] 0.336 -0.346 [85,] -0.188 -0.486 [86,] 0.146 -0.512 [87,] 0.610 0.126 [88,] 0.209 -0.471 [89,] -0.252 -0.435 [90,] 0.642 0.197 [91,] 0.547 -0.006 [92,] 0.278 -0.411 [93,] 0.090 -0.536 [94,] -0.435 -0.206 [95,] 0.770 0.526 [96,] -0.419 -0.232 [97,] 1.271 2.385 [98,] 0.874 0.838 [99,] -0.198 -0.480 [100,] -0.677 0.282 > nObs( mlIndBFGSYC ) [1] 100 > > # with analytical gradients > mlgBFGSYC <- maxLik( llf, gf, start = startVal, method = "BFGSR" , print.level=1) Initial value of the function : -326.589781090132 Iteration 1 step = 1, lnL = -325.1, chi2 = 1.504, function increment = 1.494 Iteration 2 step = 1, lnL = -254.9, chi2 = 107.8, function increment = 70.19 Iteration 3 step = 1, lnL = -254.8, chi2 = 0.147, function increment = 0.1464 Iteration 4 step = 1, lnL = -250, chi2 = 18.76, function increment = 4.778 Iteration 5 step = 0.25, lnL = -218.8, chi2 = 1496, function increment = 31.18 Iteration 6 step = 1, lnL = -201.7, chi2 = 22.41, function increment = 17.06 Iteration 7 step = 0.25, lnL = -201.7, chi2 = 0.7577, function increment = 0.08696 Iteration 8 step = 1, lnL = -201.6, chi2 = 0.07892, function increment = 0.05362 Iteration 9 step = 0.25, lnL = -201.6, chi2 = 0.07185, function increment = 0.004091 Iteration 10 step = 0.125, lnL = -201.6, chi2 = 0.223, function increment = 0.01277 Iteration 11 step = 0.0625, lnL = -201.6, chi2 = 0.002795, function increment = 6.314e-05 Iteration 12 step = 0.5, lnL = -201.6, chi2 = 0.0001251, function increment = 2.456e-05 Iteration 13 step = 0.0625, lnL = -201.6, chi2 = 6.645e-05, function increment = 4.239e-07 Iteration 14 step = 0.03125, lnL = -201.6, chi2 = 1.782e-05, function increment = 2.54e-07 Iteration 15 step = 0.01562, lnL = -201.6, chi2 = 5.203e-08, function increment = 2.604e-10 -------------- successive function values within tolerance limit 15 iterations estimate: 1.181 1.816 Function value: -201.6 > summary(mlgBFGSYC) -------------------------------------------- Maximum Likelihood estimation BFGSR maximization, 15 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 7.9e-11 *** sigma 1.816 0.128 14.1 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlBFGSYC[-4], mlgBFGSYC[-4], tolerance = 1e-3 ) [1] TRUE > mlgIndBFGSYC <- maxLik( llfInd, gfInd, start = startVal, + method = "BFGSR" ) > all.equal( mlIndBFGSYC, mlgIndBFGSYC, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgBFGSYC[ -c(3,9) ], mlgIndBFGSYC[ -c(3,9,11) ], tolerance = 1e-3 ) [1] TRUE > round( mlgIndBFGSYC[[ 11 ]], 3 ) mu sigma [1,] -0.395 -0.268 [2,] -0.194 -0.482 [3,] 0.890 0.889 [4,] -0.012 -0.550 [5,] 0.024 -0.550 [6,] 0.985 1.211 [7,] 0.225 -0.459 [8,] -0.822 0.676 [9,] -0.471 -0.147 [10,] -0.325 -0.359 [11,] 0.687 0.307 [12,] 0.163 -0.502 [13,] 0.188 -0.486 [14,] 0.012 -0.550 [15,] -0.392 -0.272 [16,] 1.028 1.371 [17,] 0.247 -0.440 [18,] -1.247 2.274 [19,] 0.370 -0.301 [20,] -0.341 -0.339 [21,] -0.702 0.345 [22,] -0.187 -0.487 [23,] -0.677 0.281 [24,] -0.497 -0.102 [25,] -0.434 -0.209 [26,] -1.077 1.557 [27,] 0.453 -0.178 [28,] 0.038 -0.548 [29,] -0.745 0.457 [30,] 0.705 0.353 [31,] 0.204 -0.475 [32,] -0.234 -0.451 [33,] 0.488 -0.118 [34,] 0.478 -0.136 [35,] 0.443 -0.194 [36,] 0.363 -0.312 [37,] 0.281 -0.407 [38,] -0.092 -0.535 [39,] -0.240 -0.446 [40,] -0.285 -0.403 [41,] -0.476 -0.139 [42,] -0.181 -0.491 [43,] -0.822 0.676 [44,] 1.260 2.333 [45,] 0.677 0.283 [46,] -0.736 0.432 [47,] -0.299 -0.388 [48,] -0.338 -0.343 [49,] 0.418 -0.233 [50,] -0.105 -0.530 [51,] 0.099 -0.533 [52,] -0.072 -0.541 [53,] -0.081 -0.539 [54,] 0.775 0.540 [55,] -0.192 -0.484 [56,] 0.865 0.807 [57,] -0.994 1.243 [58,] 0.300 -0.388 [59,] 0.020 -0.550 [60,] 0.076 -0.540 [61,] 0.175 -0.495 [62,] -0.359 -0.316 [63,] -0.257 -0.431 [64,] -0.672 0.270 [65,] -0.705 0.351 [66,] 0.129 -0.520 [67,] 0.217 -0.465 [68,] -0.023 -0.550 [69,] 0.504 -0.089 [70,] 1.188 2.013 [71,] -0.352 -0.325 [72,] -1.455 3.293 [73,] 0.555 0.009 [74,] -0.485 -0.124 [75,] -0.472 -0.146 [76,] 0.567 0.033 [77,] -0.227 -0.457 [78,] -0.795 0.597 [79,] 0.055 -0.545 [80,] -0.139 -0.515 [81,] -0.051 -0.546 [82,] 0.179 -0.493 [83,] -0.280 -0.409 [84,] 0.336 -0.346 [85,] -0.188 -0.486 [86,] 0.146 -0.512 [87,] 0.610 0.126 [88,] 0.209 -0.471 [89,] -0.252 -0.435 [90,] 0.642 0.197 [91,] 0.547 -0.006 [92,] 0.278 -0.411 [93,] 0.090 -0.536 [94,] -0.435 -0.206 [95,] 0.770 0.526 [96,] -0.419 -0.232 [97,] 1.271 2.385 [98,] 0.874 0.838 [99,] -0.198 -0.480 [100,] -0.677 0.282 > > # with analytical gradients as attribute > mlGBFGSYC <- maxLik( llfGrad, start = startVal, method = "BFGSR" , print.level=1) Initial value of the function : -326.589781090132 Iteration 1 step = 1, lnL = -325.1, chi2 = 1.504, function increment = 1.494 Iteration 2 step = 1, lnL = -254.9, chi2 = 107.8, function increment = 70.19 Iteration 3 step = 1, lnL = -254.8, chi2 = 0.147, function increment = 0.1464 Iteration 4 step = 1, lnL = -250, chi2 = 18.76, function increment = 4.778 Iteration 5 step = 0.25, lnL = -218.8, chi2 = 1496, function increment = 31.18 Iteration 6 step = 1, lnL = -201.7, chi2 = 22.41, function increment = 17.06 Iteration 7 step = 0.25, lnL = -201.7, chi2 = 0.7577, function increment = 0.08696 Iteration 8 step = 1, lnL = -201.6, chi2 = 0.07892, function increment = 0.05362 Iteration 9 step = 0.25, lnL = -201.6, chi2 = 0.07185, function increment = 0.004091 Iteration 10 step = 0.125, lnL = -201.6, chi2 = 0.223, function increment = 0.01277 Iteration 11 step = 0.0625, lnL = -201.6, chi2 = 0.002795, function increment = 6.314e-05 Iteration 12 step = 0.5, lnL = -201.6, chi2 = 0.0001251, function increment = 2.456e-05 Iteration 13 step = 0.0625, lnL = -201.6, chi2 = 6.645e-05, function increment = 4.239e-07 Iteration 14 step = 0.03125, lnL = -201.6, chi2 = 1.782e-05, function increment = 2.54e-07 Iteration 15 step = 0.01562, lnL = -201.6, chi2 = 5.203e-08, function increment = 2.604e-10 -------------- successive function values within tolerance limit 15 iterations estimate: 1.181 1.816 Function value: -201.6 > all.equal( mlGBFGSYC, mlgBFGSYC, tolerance = 1e-3 ) [1] TRUE > mlGIndBFGSYC <- maxLik( llfGradInd, start = startVal, method = "BFGSR" ) > all.equal( mlGIndBFGSYC, mlgIndBFGSYC, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients as argument and attribute > mlgGBFGSYC <- maxLik( llfGrad, gf, start = startVal, method = "BFGSR" ) Warning message: In maxBFGSRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, : the gradient is provided both as attribute 'gradient' and as argument 'grad': ignoring argument 'grad' > all.equal( mlgGBFGSYC, mlgBFGSYC, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgGBFGSYC, mlGBFGSYC, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients and Hessians > mlghBFGSYC <- maxLik( llf, gf, hf, start = startVal, method = "BFGSR" ) > all.equal( mlgBFGSYC, mlghBFGSYC, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients and Hessian as attribute > mlGHBFGSYC <- maxLik( llfGradHess, start = startVal, method = "BFGSR" ) > all.equal( mlGHBFGSYC, mlghBFGSYC, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients and Hessian as argument and attribute > mlgGhHBFGSYC <- maxLik( llfGradHess, gf, hf, start = startVal, method = "BFGSR" ) Warning messages: 1: In maxBFGSRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, : the gradient is provided both as attribute 'gradient' and as argument 'grad': ignoring argument 'grad' 2: In maxBFGSRCompute(fn = logLikAttr, fnOrig = fn, gradOrig = grad, : the Hessian is provided both as attribute 'hessian' and as argument 'hess': ignoring argument 'hess' > all.equal( mlgGhHBFGSYC, mlghBFGSYC, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgGhHBFGSYC, mlGHBFGSYC, tolerance = 1e-3 ) [1] TRUE > > > ## BFGS method > mlBFGS <- maxLik( llf, start = startVal, method = "BFGS" ) > print( mlBFGS ) Maximum Likelihood estimation BFGS maximisation, 36 iterations Return code 0: successful convergence Log-Likelihood: -201.6 (2 free parameter(s)) Estimate(s): 1.181 1.816 > summary( mlBFGS ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 36 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 8.2e-11 *** sigma 1.816 0.128 14.1 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( mlBFGS ) mu sigma TRUE TRUE > AIC( mlBFGS ) [1] 407.2 > coef( mlBFGS ) mu sigma 1.181 1.816 > condiNumber( mlBFGS ) mu 1 sigma 1.672 > round( hessian( mlBFGS ), 2 ) mu sigma mu -30.27 0.03 sigma 0.03 -60.62 > logLik( mlBFGS ) [1] -201.6 > maximType( mlBFGS ) [1] "BFGS maximisation" > nIter( mlBFGS ) function 36 > nParam( mlBFGS ) [1] 2 > returnCode( mlBFGS ) [1] 0 > returnMessage( mlBFGS ) [1] "successful convergence " > round( vcov( mlBFGS ), 4 ) mu sigma mu 0.033 0.0000 sigma 0.000 0.0165 > logLik( summary( mlBFGS ) ) [1] -201.6 > all.equal( ml[-c(4,5,6,9,10)], mlBFGS[-c(4,5,6,9,10,11)], tolerance = 1e-3 ) [1] TRUE > # with individual log likelihood values > mlIndBFGS <- maxLik( llfInd, start = startVal, method = "BFGS" ) > summary( mlIndBFGS ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 36 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 8.1e-11 *** sigma 1.816 0.128 14.2 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlBFGS[-4], mlIndBFGS[-c(4,12)], tolerance = 1e-3 ) [1] TRUE > mlIndBFGS[12] $gradientObs mu sigma [1,] -0.39452 -0.267786 [2,] -0.19432 -0.481927 [3,] 0.88999 0.888279 [4,] -0.01206 -0.550251 [5,] 0.02357 -0.549506 [6,] 0.98476 1.211023 [7,] 0.22458 -0.458899 [8,] -0.82159 0.675638 [9,] -0.47112 -0.147336 [10,] -0.32493 -0.358734 [11,] 0.68716 0.307205 [12,] 0.16330 -0.502076 [13,] 0.18812 -0.486229 [14,] 0.01229 -0.550240 [15,] -0.39171 -0.271798 [16,] 1.02831 1.370271 [17,] 0.24697 -0.439724 [18,] -1.24683 2.273358 [19,] 0.37032 -0.301412 [20,] -0.34137 -0.338831 [21,] -0.70204 0.344759 [22,] -0.18692 -0.487049 [23,] -0.67669 0.281276 [24,] -0.49660 -0.102544 [25,] -0.43365 -0.208914 [26,] -1.07716 1.557095 [27,] 0.45301 -0.177735 [28,] 0.03817 -0.547869 [29,] -0.74466 0.456758 [30,] 0.70518 0.352786 [31,] 0.20370 -0.475145 [32,] -0.23365 -0.451349 [33,] 0.48777 -0.118342 [34,] 0.47747 -0.136401 [35,] 0.44319 -0.193726 [36,] 0.36261 -0.311673 [37,] 0.28095 -0.407134 [38,] -0.09232 -0.535032 [39,] -0.24025 -0.445666 [40,] -0.28541 -0.402542 [41,] -0.47588 -0.139147 [42,] -0.18082 -0.491121 [43,] -0.82180 0.676245 [44,] 1.25988 2.332775 [45,] 0.67739 0.282986 [46,] -0.73555 0.432266 [47,] -0.29900 -0.388120 [48,] -0.33765 -0.343419 [49,] 0.41797 -0.233185 [50,] -0.10533 -0.530362 [51,] 0.09875 -0.532802 [52,] -0.07210 -0.541072 [53,] -0.08078 -0.538661 [54,] 0.77476 0.539827 [55,] -0.19164 -0.483800 [56,] 0.86439 0.806692 [57,] -0.99355 1.242603 [58,] 0.29956 -0.387515 [59,] 0.02027 -0.549768 [60,] 0.07609 -0.539998 [61,] 0.17531 -0.494685 [62,] -0.35927 -0.316049 [63,] -0.25677 -0.430757 [64,] -0.67219 0.270243 [65,] -0.70445 0.350903 [66,] 0.12918 -0.520202 [67,] 0.21688 -0.465075 [68,] -0.02267 -0.549581 [69,] 0.50422 -0.088698 [70,] 1.18783 2.012418 [71,] -0.35243 -0.324898 [72,] -1.45446 3.292176 [73,] 0.55481 0.008632 [74,] -0.48467 -0.123818 [75,] -0.47182 -0.146136 [76,] 0.56684 0.033125 [77,] -0.22741 -0.456577 [78,] -0.79472 0.596724 [79,] 0.05510 -0.545001 [80,] -0.13898 -0.515427 [81,] -0.05130 -0.545734 [82,] 0.17873 -0.492486 [83,] -0.27947 -0.408644 [84,] 0.33578 -0.345709 [85,] -0.18844 -0.486011 [86,] 0.14631 -0.511632 [87,] 0.61003 0.125471 [88,] 0.20898 -0.471184 [89,] -0.25236 -0.434835 [90,] 0.64153 0.197084 [91,] 0.54740 -0.006216 [92,] 0.27760 -0.410530 [93,] 0.08991 -0.535832 [94,] -0.43539 -0.206171 [95,] 0.76994 0.526306 [96,] -0.41863 -0.232167 [97,] 1.27102 2.383985 [98,] 0.87417 0.837587 [99,] -0.19766 -0.479543 [100,] -0.67695 0.281897 > nObs( mlIndBFGS ) [1] 100 > > # with analytical gradients > mlgBFGS <- maxLik( llf, gf, start = startVal, method = "BFGS" ) > summary( mlgBFGS ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 36 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 8e-11 *** sigma 1.816 0.128 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlBFGS[-4], mlgBFGS[-4], tolerance = 1e-3 ) [1] TRUE > all.equal( mlg[-c(5,6,9,10)], mlgBFGS[-c(5,6,9,10,11)], tolerance = 1e-3 ) [1] TRUE > mlgIndBFGS <- maxLik( llfInd, gfInd, start = startVal, method = "BFGS" ) > all.equal( mlgBFGS[], mlgIndBFGS[-12], tolerance = 1e-3 ) [1] TRUE > mlgIndBFGS[12] $gradientObs mu sigma [1,] -0.39452 -0.267786 [2,] -0.19432 -0.481927 [3,] 0.88999 0.888279 [4,] -0.01206 -0.550251 [5,] 0.02357 -0.549506 [6,] 0.98476 1.211023 [7,] 0.22458 -0.458899 [8,] -0.82159 0.675638 [9,] -0.47112 -0.147336 [10,] -0.32493 -0.358734 [11,] 0.68716 0.307205 [12,] 0.16330 -0.502076 [13,] 0.18812 -0.486229 [14,] 0.01229 -0.550240 [15,] -0.39171 -0.271798 [16,] 1.02831 1.370271 [17,] 0.24697 -0.439724 [18,] -1.24683 2.273358 [19,] 0.37032 -0.301412 [20,] -0.34137 -0.338831 [21,] -0.70204 0.344759 [22,] -0.18692 -0.487049 [23,] -0.67669 0.281276 [24,] -0.49660 -0.102544 [25,] -0.43365 -0.208914 [26,] -1.07716 1.557095 [27,] 0.45301 -0.177735 [28,] 0.03817 -0.547869 [29,] -0.74466 0.456758 [30,] 0.70518 0.352786 [31,] 0.20370 -0.475145 [32,] -0.23365 -0.451349 [33,] 0.48777 -0.118342 [34,] 0.47747 -0.136401 [35,] 0.44319 -0.193726 [36,] 0.36261 -0.311673 [37,] 0.28095 -0.407134 [38,] -0.09232 -0.535032 [39,] -0.24025 -0.445666 [40,] -0.28541 -0.402542 [41,] -0.47588 -0.139147 [42,] -0.18082 -0.491121 [43,] -0.82180 0.676245 [44,] 1.25988 2.332775 [45,] 0.67739 0.282986 [46,] -0.73555 0.432266 [47,] -0.29900 -0.388120 [48,] -0.33765 -0.343419 [49,] 0.41797 -0.233185 [50,] -0.10533 -0.530362 [51,] 0.09875 -0.532802 [52,] -0.07210 -0.541072 [53,] -0.08078 -0.538661 [54,] 0.77476 0.539827 [55,] -0.19164 -0.483800 [56,] 0.86439 0.806692 [57,] -0.99355 1.242603 [58,] 0.29956 -0.387515 [59,] 0.02027 -0.549768 [60,] 0.07609 -0.539998 [61,] 0.17531 -0.494685 [62,] -0.35927 -0.316049 [63,] -0.25677 -0.430757 [64,] -0.67219 0.270243 [65,] -0.70445 0.350903 [66,] 0.12918 -0.520202 [67,] 0.21688 -0.465075 [68,] -0.02267 -0.549581 [69,] 0.50422 -0.088698 [70,] 1.18783 2.012418 [71,] -0.35243 -0.324898 [72,] -1.45446 3.292176 [73,] 0.55481 0.008632 [74,] -0.48467 -0.123818 [75,] -0.47182 -0.146136 [76,] 0.56684 0.033125 [77,] -0.22741 -0.456577 [78,] -0.79472 0.596724 [79,] 0.05510 -0.545001 [80,] -0.13898 -0.515427 [81,] -0.05130 -0.545734 [82,] 0.17873 -0.492486 [83,] -0.27947 -0.408644 [84,] 0.33578 -0.345709 [85,] -0.18844 -0.486011 [86,] 0.14631 -0.511632 [87,] 0.61003 0.125471 [88,] 0.20898 -0.471184 [89,] -0.25236 -0.434835 [90,] 0.64153 0.197084 [91,] 0.54740 -0.006216 [92,] 0.27760 -0.410530 [93,] 0.08991 -0.535832 [94,] -0.43539 -0.206171 [95,] 0.76994 0.526306 [96,] -0.41863 -0.232167 [97,] 1.27102 2.383985 [98,] 0.87417 0.837587 [99,] -0.19766 -0.479543 [100,] -0.67695 0.281897 > > # with analytical gradients as attribute > mlGBFGS <- maxLik( llfGrad, start = startVal, method = "BFGS" ) > all.equal( mlGBFGS, mlgBFGS, tolerance = 1e-3 ) [1] TRUE > mlGIndBFGS <- maxLik( llfGradInd, start = startVal, method = "BFGS" ) > all.equal( mlGIndBFGS, mlgIndBFGS, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients as argument and attribute > mlgGBFGS <- maxLik( llfGrad, gf, start = startVal, method = "BFGS" ) Warning message: In maxOptim(fn = fn, grad = grad, hess = hess, start = start, method = "BFGS", : the gradient is provided both as attribute 'gradient' and as argument 'grad': ignoring argument 'grad' > all.equal( mlgGBFGS, mlgBFGS, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgGBFGS, mlGBFGS, tolerance = 1e-3 ) [1] TRUE > > # with unused Hessian > mlghBFGS <- maxLik( llf, gf, hf, start = startVal, method = "BFGS" ) > all.equal( mlgBFGS, mlghBFGS, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients and Hessian as attribute > mlGHBFGS <- maxLik( llfGradHess, start = startVal, method = "BFGS" ) > all.equal( mlGHBFGS, mlghBFGS, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients and Hessian as argument and attribute > mlgGhHBFGS <- maxLik( llfGradHess, gf, hf, start = startVal, method = "BFGS" ) Warning messages: 1: In maxOptim(fn = fn, grad = grad, hess = hess, start = start, method = "BFGS", : the gradient is provided both as attribute 'gradient' and as argument 'grad': ignoring argument 'grad' 2: In maxOptim(fn = fn, grad = grad, hess = hess, start = start, method = "BFGS", : the Hessian is provided both as attribute 'hessian' and as argument 'hess': ignoring argument 'hess' > all.equal( mlgGhHBFGS, mlghBFGS, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgGhHBFGS, mlGHBFGS, tolerance = 1e-3 ) [1] TRUE > > > ## NM method > mlNM <- maxLik( llf, start = startVal, method = "NM" ) > print( mlNM ) Maximum Likelihood estimation Nelder-Mead maximisation, 63 iterations Return code 0: successful convergence Log-Likelihood: -201.6 (2 free parameter(s)) Estimate(s): 1.181 1.817 > summary( mlNM ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 63 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 8.3e-11 *** sigma 1.817 0.128 14.1 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( mlNM ) mu sigma TRUE TRUE > AIC( mlNM ) [1] 407.2 > coef( mlNM ) mu sigma 1.181 1.817 > condiNumber( mlNM ) mu 1 sigma 1.668 > round( hessian( mlNM ), 2 ) mu sigma mu -30.27 0.0 sigma 0.00 -60.6 > logLik( mlNM ) [1] -201.6 > maximType( mlNM ) [1] "Nelder-Mead maximisation" > nIter( mlNM ) function 63 > nParam( mlNM ) [1] 2 > returnCode( mlNM ) [1] 0 > returnMessage( mlNM ) [1] "successful convergence " > round( vcov( mlNM ), 4 ) mu sigma mu 0.033 0.0000 sigma 0.000 0.0165 > logLik( summary( mlNM ) ) [1] -201.6 > all.equal( ml[-c(3,4,5,6,9,10)], mlNM[-c(3,4,5,6,9,10,11)], tolerance = 1e-3 ) [1] TRUE > # with individual log likelihood values > mlIndNM <- maxLik( llfInd, start = startVal, method = "NM" ) > summary( mlIndNM ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 63 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 8e-11 *** sigma 1.817 0.128 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlNM[-4], mlIndNM[-c(4,12)], tolerance = 1e-3 ) [1] TRUE > mlIndNM[12] $gradientObs mu sigma [1,] -0.39439 -0.267899 [2,] -0.19422 -0.481941 [3,] 0.88990 0.888156 [4,] -0.01200 -0.550207 [5,] 0.02363 -0.549454 [6,] 0.98465 1.210839 [7,] 0.22460 -0.458826 [8,] -0.82139 0.675189 [9,] -0.47098 -0.147497 [10,] -0.32481 -0.358808 [11,] 0.68710 0.307186 [12,] 0.16333 -0.502006 [13,] 0.18815 -0.486157 [14,] 0.01235 -0.550191 [15,] -0.39158 -0.271909 [16,] 1.02820 1.370056 [17,] 0.24699 -0.439650 [18,] -1.24656 2.272406 [19,] 0.37032 -0.301346 [20,] -0.34125 -0.338914 [21,] -0.70186 0.344421 [22,] -0.18683 -0.487060 [23,] -0.67652 0.280960 [24,] -0.49646 -0.102723 [25,] -0.43352 -0.209051 [26,] -1.07691 1.556363 [27,] 0.45300 -0.177683 [28,] 0.03822 -0.547814 [29,] -0.74447 0.456382 [30,] 0.70512 0.352759 [31,] 0.20372 -0.475072 [32,] -0.23355 -0.451379 [33,] 0.48775 -0.118297 [34,] 0.47745 -0.136353 [35,] 0.44318 -0.193672 [36,] 0.36261 -0.311606 [37,] 0.28096 -0.407062 [38,] -0.09225 -0.535009 [39,] -0.24015 -0.445699 [40,] -0.28530 -0.402597 [41,] -0.47574 -0.139311 [42,] -0.18073 -0.491130 [43,] -0.82160 0.675795 [44,] 1.25973 2.332366 [45,] 0.67733 0.282971 [46,] -0.73536 0.431898 [47,] -0.29889 -0.388181 [48,] -0.33753 -0.343500 [49,] 0.41796 -0.233126 [50,] -0.10525 -0.530344 [51,] 0.09879 -0.532738 [52,] -0.07203 -0.541043 [53,] -0.08071 -0.538635 [54,] 0.77469 0.539768 [55,] -0.19155 -0.483812 [56,] 0.86430 0.806585 [57,] -0.99332 1.241970 [58,] 0.29957 -0.387443 [59,] 0.02033 -0.549717 [60,] 0.07614 -0.539936 [61,] 0.17535 -0.494613 [62,] -0.35915 -0.316142 [63,] -0.25666 -0.430798 [64,] -0.67201 0.269930 [65,] -0.70426 0.350563 [66,] 0.12922 -0.520134 [67,] 0.21690 -0.465002 [68,] -0.02260 -0.549540 [69,] 0.50419 -0.088657 [70,] 1.18769 2.012075 [71,] -0.35231 -0.324987 [72,] -1.45415 3.290917 [73,] 0.55478 0.008659 [74,] -0.48452 -0.123988 [75,] -0.47168 -0.146298 [76,] 0.56680 0.033149 [77,] -0.22731 -0.456604 [78,] -0.79452 0.596301 [79,] 0.05515 -0.544943 [80,] -0.13890 -0.515420 [81,] -0.05123 -0.545699 [82,] 0.17876 -0.492414 [83,] -0.27936 -0.408696 [84,] 0.33578 -0.345640 [85,] -0.18835 -0.486023 [86,] 0.14634 -0.511562 [87,] 0.60999 0.125481 [88,] 0.20901 -0.471111 [89,] -0.25225 -0.434874 [90,] 0.64148 0.197083 [91,] 0.54737 -0.006186 [92,] 0.27762 -0.410457 [93,] 0.08995 -0.535769 [94,] -0.43526 -0.206309 [95,] 0.76987 0.526250 [96,] -0.41850 -0.232295 [97,] 1.27086 2.383565 [98,] 0.87408 0.837474 [99,] -0.19757 -0.479559 [100,] -0.67677 0.281580 > nObs( mlIndNM ) [1] 100 > > # with unused analytical gradients > mlgNM <- maxLik( llf, gf, start = startVal, method = "NM" ) > summary( mlgNM ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 63 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 8.1e-11 *** sigma 1.817 0.128 14.1 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlNM[-4], mlgNM[-4], tolerance = 1e-3 ) [1] TRUE > # with individual log likelihood values and gradients > mlgIndNM <- maxLik( llfInd, gfInd, start = startVal, method = "NM" ) > summary( mlgIndNM ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 63 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.181 0.182 6.5 8.1e-11 *** sigma 1.817 0.128 14.1 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlgNM[], mlgIndNM[-12], tolerance = 1e-3 ) [1] TRUE > mlgIndNM[12] $gradientObs mu sigma [1,] -0.39439 -0.267899 [2,] -0.19422 -0.481941 [3,] 0.88990 0.888156 [4,] -0.01200 -0.550207 [5,] 0.02363 -0.549454 [6,] 0.98465 1.210839 [7,] 0.22460 -0.458826 [8,] -0.82139 0.675189 [9,] -0.47098 -0.147497 [10,] -0.32481 -0.358808 [11,] 0.68710 0.307186 [12,] 0.16333 -0.502006 [13,] 0.18815 -0.486157 [14,] 0.01235 -0.550191 [15,] -0.39158 -0.271909 [16,] 1.02820 1.370056 [17,] 0.24699 -0.439650 [18,] -1.24656 2.272406 [19,] 0.37032 -0.301346 [20,] -0.34125 -0.338914 [21,] -0.70186 0.344421 [22,] -0.18683 -0.487060 [23,] -0.67652 0.280960 [24,] -0.49646 -0.102723 [25,] -0.43352 -0.209051 [26,] -1.07691 1.556363 [27,] 0.45300 -0.177683 [28,] 0.03822 -0.547814 [29,] -0.74447 0.456382 [30,] 0.70512 0.352759 [31,] 0.20372 -0.475072 [32,] -0.23355 -0.451379 [33,] 0.48775 -0.118297 [34,] 0.47745 -0.136353 [35,] 0.44318 -0.193672 [36,] 0.36261 -0.311606 [37,] 0.28096 -0.407062 [38,] -0.09225 -0.535009 [39,] -0.24015 -0.445699 [40,] -0.28530 -0.402597 [41,] -0.47574 -0.139311 [42,] -0.18073 -0.491130 [43,] -0.82160 0.675795 [44,] 1.25973 2.332366 [45,] 0.67733 0.282971 [46,] -0.73536 0.431898 [47,] -0.29889 -0.388181 [48,] -0.33753 -0.343500 [49,] 0.41796 -0.233126 [50,] -0.10525 -0.530344 [51,] 0.09879 -0.532738 [52,] -0.07203 -0.541043 [53,] -0.08071 -0.538635 [54,] 0.77469 0.539768 [55,] -0.19155 -0.483812 [56,] 0.86430 0.806585 [57,] -0.99332 1.241970 [58,] 0.29957 -0.387443 [59,] 0.02033 -0.549717 [60,] 0.07614 -0.539936 [61,] 0.17535 -0.494613 [62,] -0.35915 -0.316142 [63,] -0.25666 -0.430798 [64,] -0.67201 0.269930 [65,] -0.70426 0.350563 [66,] 0.12922 -0.520134 [67,] 0.21690 -0.465002 [68,] -0.02260 -0.549540 [69,] 0.50419 -0.088657 [70,] 1.18769 2.012075 [71,] -0.35231 -0.324987 [72,] -1.45415 3.290917 [73,] 0.55478 0.008659 [74,] -0.48452 -0.123988 [75,] -0.47168 -0.146298 [76,] 0.56680 0.033149 [77,] -0.22731 -0.456604 [78,] -0.79452 0.596301 [79,] 0.05515 -0.544943 [80,] -0.13890 -0.515420 [81,] -0.05123 -0.545699 [82,] 0.17876 -0.492414 [83,] -0.27936 -0.408696 [84,] 0.33578 -0.345640 [85,] -0.18835 -0.486023 [86,] 0.14634 -0.511562 [87,] 0.60999 0.125481 [88,] 0.20901 -0.471111 [89,] -0.25225 -0.434874 [90,] 0.64148 0.197083 [91,] 0.54737 -0.006186 [92,] 0.27762 -0.410457 [93,] 0.08995 -0.535769 [94,] -0.43526 -0.206309 [95,] 0.76987 0.526250 [96,] -0.41850 -0.232295 [97,] 1.27086 2.383565 [98,] 0.87408 0.837474 [99,] -0.19757 -0.479559 [100,] -0.67677 0.281580 > > # with (unused) analytical gradients as attribute > mlGNM <- maxLik( llfGrad, start = startVal, method = "NM" ) > all.equal( mlGNM, mlgNM, tolerance = 1e-3 ) [1] TRUE > mlGIndNM <- maxLik( llfGradInd, start = startVal, method = "NM" ) > all.equal( mlGIndNM, mlgIndNM, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients as argument and attribute > mlgGNM <- maxLik( llfGrad, gf, start = startVal, method = "NM" ) Warning message: In maxOptim(fn = fn, grad = grad, hess = hess, start = start, method = "Nelder-Mead", : the gradient is provided both as attribute 'gradient' and as argument 'grad': ignoring argument 'grad' > all.equal( mlgGNM, mlgNM, tolerance = 1e-3 ) [1] TRUE > all.equal( mlgGNM, mlGNM, tolerance = 1e-3 ) [1] TRUE > > # with unused analytical gradients and Hessian > mlghNM <- maxLik( llf, gf, hf, start = startVal, method = "NM" ) > all.equal( mlgNM, mlghNM, tolerance = 1e-3 ) [1] TRUE > > > ## SANN method > mlSANN <- maxLik( llf, start = startVal, method = "SANN" ) > print( mlSANN ) Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -201.6 (2 free parameter(s)) Estimate(s): 1.182 1.817 > summary( mlSANN ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.182 0.182 6.5 7.9e-11 *** sigma 1.817 0.128 14.1 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( mlSANN ) mu sigma TRUE TRUE > AIC( mlSANN ) [1] 407.2 > coef( mlSANN ) mu sigma 1.182 1.817 > condiNumber( mlSANN ) mu 1 sigma 1.673 > round( hessian( mlSANN ), 2 ) mu sigma mu -30.27 0.06 sigma 0.06 -60.60 > logLik( mlSANN ) [1] -201.6 > maximType( mlSANN ) [1] "SANN maximisation" > nIter( mlSANN ) function 10000 > nParam( mlSANN ) [1] 2 > returnCode( mlSANN ) [1] 0 > returnMessage( mlSANN ) [1] "successful convergence " > round( vcov( mlSANN ), 4 ) mu sigma mu 0.033 0.0000 sigma 0.000 0.0165 > logLik( summary( mlSANN ) ) [1] -201.6 > all.equal( ml[-c(3,4,5,6,9,10)], mlSANN[-c(3,4,5,6,9,10,11)], tolerance = 1e-3 ) [1] TRUE > # with individual log likelihood values > mlIndSANN <- maxLik( llfInd, start = startVal, method = "SANN" ) > summary( mlIndSANN ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.182 0.182 6.51 7.6e-11 *** sigma 1.817 0.128 14.15 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlSANN[-4], mlIndSANN[-c(4,12)], tolerance = 1e-3 ) [1] TRUE > mlIndSANN[12] $gradientObs mu sigma [1,] -0.39480 -0.267372 [2,] -0.19460 -0.481713 [3,] 0.88966 0.887254 [4,] -0.01235 -0.550226 [5,] 0.02327 -0.549519 [6,] 0.98443 1.209877 [7,] 0.22428 -0.459132 [8,] -0.82185 0.676448 [9,] -0.47140 -0.146848 [10,] -0.32521 -0.358388 [11,] 0.68684 0.306432 [12,] 0.16300 -0.502242 [13,] 0.18782 -0.486422 [14,] 0.01200 -0.550242 [15,] -0.39199 -0.271387 [16,] 1.02797 1.369068 [17,] 0.24666 -0.439982 [18,] -1.24707 2.274521 [19,] 0.37001 -0.301810 [20,] -0.34165 -0.338469 [21,] -0.70230 0.345462 [22,] -0.18720 -0.486842 [23,] -0.67696 0.281956 [24,] -0.49688 -0.102032 [25,] -0.43393 -0.208462 [26,] -1.07741 1.558122 [27,] 0.45270 -0.178229 [28,] 0.03787 -0.547898 [29,] -0.74492 0.457500 [30,] 0.70486 0.351991 [31,] 0.20340 -0.475355 [32,] -0.23393 -0.451095 [33,] 0.48745 -0.118877 [34,] 0.47716 -0.136923 [35,] 0.44288 -0.194209 [36,] 0.36230 -0.312062 [37,] 0.28065 -0.407431 [38,] -0.09261 -0.534923 [39,] -0.24053 -0.445405 [40,] -0.28569 -0.402236 [41,] -0.47616 -0.138654 [42,] -0.18111 -0.490921 [43,] -0.82206 0.677055 [44,] 1.25953 2.331267 [45,] 0.67707 0.282225 [46,] -0.73581 0.432999 [47,] -0.29928 -0.387800 [48,] -0.33793 -0.343061 [49,] 0.41765 -0.233638 [50,] -0.10562 -0.530239 [51,] 0.09845 -0.532897 [52,] -0.07239 -0.540984 [53,] -0.08107 -0.538564 [54,] 0.77443 0.538946 [55,] -0.19193 -0.483588 [56,] 0.86406 0.805699 [57,] -0.99380 1.243560 [58,] 0.29925 -0.387832 [59,] 0.01998 -0.549778 [60,] 0.07580 -0.540068 [61,] 0.17501 -0.494864 [62,] -0.35955 -0.315670 [63,] -0.25705 -0.430479 [64,] -0.67246 0.270919 [65,] -0.70471 0.351608 [66,] 0.12888 -0.520330 [67,] 0.21658 -0.465300 [68,] -0.02296 -0.549546 [69,] 0.50391 -0.089252 [70,] 1.18748 2.011006 [71,] -0.35271 -0.324525 [72,] -1.45469 3.293497 [73,] 0.55450 0.008018 [74,] -0.48494 -0.123317 [75,] -0.47210 -0.145648 [76,] 0.56652 0.032497 [77,] -0.22769 -0.456329 [78,] -0.79498 0.597510 [79,] 0.05480 -0.545048 [80,] -0.13927 -0.515269 [81,] -0.05159 -0.545668 [82,] 0.17843 -0.492668 [83,] -0.27975 -0.408344 [84,] 0.33547 -0.346068 [85,] -0.18873 -0.485803 [86,] 0.14601 -0.511779 [87,] 0.60971 0.124791 [88,] 0.20868 -0.471400 [89,] -0.25264 -0.434562 [90,] 0.64121 0.196366 [91,] 0.54708 -0.006821 [92,] 0.27730 -0.410822 [93,] 0.08961 -0.535917 [94,] -0.43567 -0.205717 [95,] 0.76961 0.525431 [96,] -0.41891 -0.231730 [97,] 1.27067 2.382461 [98,] 0.87384 0.836582 [99,] -0.19795 -0.479326 [100,] -0.67721 0.282577 > nObs( mlIndSANN ) [1] 100 > > # with unused analytical gradients > mlgSANN <- maxLik( llf, gf, start = startVal, method = "SANN" ) > summary( mlgSANN ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.182 0.182 6.51 7.7e-11 *** sigma 1.817 0.128 14.14 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlSANN[-4], mlgSANN[-4], tolerance = 1e-3 ) [1] TRUE > # with individual log likelihood values and gradients > mlgIndSANN <- maxLik( llfInd, gfInd, start = startVal, method = "SANN" ) > summary( mlgIndSANN ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.182 0.182 6.51 7.7e-11 *** sigma 1.817 0.128 14.14 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlgSANN[], mlgIndSANN[-12], tolerance = 1e-3 ) [1] TRUE > mlgIndSANN[12] $gradientObs mu sigma [1,] -0.39480 -0.267372 [2,] -0.19460 -0.481713 [3,] 0.88966 0.887254 [4,] -0.01235 -0.550226 [5,] 0.02327 -0.549519 [6,] 0.98443 1.209877 [7,] 0.22428 -0.459132 [8,] -0.82185 0.676448 [9,] -0.47140 -0.146848 [10,] -0.32521 -0.358388 [11,] 0.68684 0.306432 [12,] 0.16300 -0.502242 [13,] 0.18782 -0.486422 [14,] 0.01200 -0.550242 [15,] -0.39199 -0.271387 [16,] 1.02797 1.369068 [17,] 0.24666 -0.439982 [18,] -1.24707 2.274521 [19,] 0.37001 -0.301810 [20,] -0.34165 -0.338469 [21,] -0.70230 0.345462 [22,] -0.18720 -0.486842 [23,] -0.67696 0.281956 [24,] -0.49688 -0.102032 [25,] -0.43393 -0.208462 [26,] -1.07741 1.558122 [27,] 0.45270 -0.178229 [28,] 0.03787 -0.547898 [29,] -0.74492 0.457500 [30,] 0.70486 0.351991 [31,] 0.20340 -0.475355 [32,] -0.23393 -0.451095 [33,] 0.48745 -0.118877 [34,] 0.47716 -0.136923 [35,] 0.44288 -0.194209 [36,] 0.36230 -0.312062 [37,] 0.28065 -0.407431 [38,] -0.09261 -0.534923 [39,] -0.24053 -0.445405 [40,] -0.28569 -0.402236 [41,] -0.47616 -0.138654 [42,] -0.18111 -0.490921 [43,] -0.82206 0.677055 [44,] 1.25953 2.331267 [45,] 0.67707 0.282225 [46,] -0.73581 0.432999 [47,] -0.29928 -0.387800 [48,] -0.33793 -0.343061 [49,] 0.41765 -0.233638 [50,] -0.10562 -0.530239 [51,] 0.09845 -0.532897 [52,] -0.07239 -0.540984 [53,] -0.08107 -0.538564 [54,] 0.77443 0.538946 [55,] -0.19193 -0.483588 [56,] 0.86406 0.805699 [57,] -0.99380 1.243560 [58,] 0.29925 -0.387832 [59,] 0.01998 -0.549778 [60,] 0.07580 -0.540068 [61,] 0.17501 -0.494864 [62,] -0.35955 -0.315670 [63,] -0.25705 -0.430479 [64,] -0.67246 0.270919 [65,] -0.70471 0.351608 [66,] 0.12888 -0.520330 [67,] 0.21658 -0.465300 [68,] -0.02296 -0.549546 [69,] 0.50391 -0.089252 [70,] 1.18748 2.011006 [71,] -0.35271 -0.324525 [72,] -1.45469 3.293497 [73,] 0.55450 0.008018 [74,] -0.48494 -0.123317 [75,] -0.47210 -0.145648 [76,] 0.56652 0.032497 [77,] -0.22769 -0.456329 [78,] -0.79498 0.597510 [79,] 0.05480 -0.545048 [80,] -0.13927 -0.515269 [81,] -0.05159 -0.545668 [82,] 0.17843 -0.492668 [83,] -0.27975 -0.408344 [84,] 0.33547 -0.346068 [85,] -0.18873 -0.485803 [86,] 0.14601 -0.511779 [87,] 0.60971 0.124791 [88,] 0.20868 -0.471400 [89,] -0.25264 -0.434562 [90,] 0.64121 0.196366 [91,] 0.54708 -0.006821 [92,] 0.27730 -0.410822 [93,] 0.08961 -0.535917 [94,] -0.43567 -0.205717 [95,] 0.76961 0.525431 [96,] -0.41891 -0.231730 [97,] 1.27067 2.382461 [98,] 0.87384 0.836582 [99,] -0.19795 -0.479326 [100,] -0.67721 0.282577 > > # with unused analytical gradients and Hessian > mlghSANN <- maxLik( llf, gf, hf, start = startVal, method = "SANN" ) > all.equal( mlgSANN, mlghSANN, tolerance = 1e-3 ) [1] TRUE > > # with a user-specified function to generate a new candidate point > mlSANNCand <- maxLik( llf, start = startVal, method = "SANN", + cand = function(x)c(rnorm(1,x[1]),rnorm(1,x[2])) ) > summary( mlSANNCand ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.200 0.181 6.62 3.5e-11 *** sigma 1.813 0.128 14.18 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlSANNCand[-c(3,4)], mlSANN[-c(3,4)], tolerance = 1e-2 ) [1] TRUE > > ############### with fixed parameters ############### > # start values > startValFix <- c( mu = 1, sigma = 1 ) > > # fix mu (the mean ) at its start value > isFixed <- c( TRUE, FALSE ) > > ## NR method with fixed parameters > mlFix <- maxLik( llf, start = startValFix, activePar = !isFixed ) > mlFix1 <- maxLik( llf, start = startValFix, activePar = 2 ) > all.equal( mlFix, mlFix1, tolerance = 1e-3 ) [1] TRUE > mlFix2 <- maxLik( llf, start = startValFix, fixed = isFixed ) > all.equal( mlFix, mlFix2, tolerance = 1e-3 ) [1] TRUE > mlFix3 <- maxLik( llf, start = startValFix, fixed = "mu" ) > all.equal( mlFix, mlFix3, tolerance = 1e-3 ) [1] TRUE > mlFix4 <- maxLik( llf, start = startValFix, fixed = 1 ) > all.equal( mlFix, mlFix4, tolerance = 1e-3 ) [1] TRUE > print( mlFix ) Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -202.1 (1 free parameter(s)) Estimate(s): 1 1.825 > summary( mlFix ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.129 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( mlFix ) mu sigma FALSE TRUE > AIC( mlFix ) [1] 406.2 > coef( mlFix ) mu sigma 1.000 1.825 > condiNumber( mlFix ) sigma 1 > round( hessian( mlFix ), 2 ) mu sigma mu NA NA sigma NA -59.97 > logLik( mlFix ) [1] -202.1 > maximType( mlFix ) [1] "Newton-Raphson maximisation" > nIter( mlFix ) [1] 7 > nParam( mlFix ) [1] 2 > returnCode( mlFix ) [1] 1 > returnMessage( mlFix ) [1] "gradient close to zero" > round( vcov( mlFix ), 4 ) mu sigma mu 0 0.0000 sigma 0 0.0167 > logLik( summary( mlFix ) ) [1] -202.1 > mlIndFix <- maxLik( llfInd, start = startValFix, activePar = !isFixed ) > mlIndFix1 <- maxLik( llfInd, start = startValFix, activePar = 2 ) > all.equal( mlIndFix, mlIndFix1, tolerance = 1e-3 ) [1] TRUE > mlIndFix2 <- maxLik( llfInd, start = startValFix, fixed = isFixed ) > all.equal( mlIndFix, mlIndFix2, tolerance = 1e-3 ) [1] TRUE > mlIndFix3 <- maxLik( llfInd, start = startValFix, fixed = "mu" ) > all.equal( mlIndFix, mlIndFix3, tolerance = 1e-3 ) [1] TRUE > mlIndFix4 <- maxLik( llfInd, start = startValFix, fixed = 1 ) > all.equal( mlIndFix, mlIndFix4, tolerance = 1e-3 ) [1] TRUE > summary( mlIndFix ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.129 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlFix[ ], mlIndFix[ -11 ], tolerance = 1e-3 ) [1] TRUE > round( mlFix[[3]], 5 ) mu sigma NA 0 > round( mlIndFix[[3]], 5 ) mu sigma NA 0 > round( mlIndFix[[ 11 ]], 3 ) mu sigma [1,] NA -0.341 [2,] NA -0.513 [3,] NA 1.050 [4,] NA -0.545 [5,] NA -0.537 [6,] NA 1.386 [7,] NA -0.408 [8,] NA 0.505 [9,] NA -0.238 [10,] NA -0.417 [11,] NA 0.437 [12,] NA -0.463 [13,] NA -0.442 [14,] NA -0.540 [15,] NA -0.345 [16,] NA 1.552 [17,] NA -0.385 [18,] NA 1.995 [19,] NA -0.224 [20,] NA -0.401 [21,] NA 0.202 [22,] NA -0.517 [23,] NA 0.144 [24,] NA -0.198 [25,] NA -0.291 [26,] NA 1.323 [27,] NA -0.086 [28,] NA -0.532 [29,] NA 0.304 [30,] NA 0.486 [31,] NA -0.428 [32,] NA -0.491 [33,] NA -0.021 [34,] NA -0.041 [35,] NA -0.104 [36,] NA -0.236 [37,] NA -0.346 [38,] NA -0.545 [39,] NA -0.486 [40,] NA -0.453 [41,] NA -0.230 [42,] NA -0.519 [43,] NA 0.505 [44,] NA 2.546 [45,] NA 0.412 [46,] NA 0.282 [47,] NA -0.441 [48,] NA -0.405 [49,] NA -0.148 [50,] NA -0.543 [51,] NA -0.506 [52,] NA -0.547 [53,] NA -0.547 [54,] NA 0.684 [55,] NA -0.514 [56,] NA 0.964 [57,] NA 1.029 [58,] NA -0.323 [59,] NA -0.538 [60,] NA -0.517 [61,] NA -0.453 [62,] NA -0.382 [63,] NA -0.475 [64,] NA 0.134 [65,] NA 0.208 [66,] NA -0.487 [67,] NA -0.416 [68,] NA -0.546 [69,] NA 0.012 [70,] NA 2.216 [71,] NA -0.389 [72,] NA 2.959 [73,] NA 0.117 [74,] NA -0.217 [75,] NA -0.237 [76,] NA 0.144 [77,] NA -0.494 [78,] NA 0.432 [79,] NA -0.526 [80,] NA -0.535 [81,] NA -0.548 [82,] NA -0.450 [83,] NA -0.457 [84,] NA -0.275 [85,] NA -0.516 [86,] NA -0.475 [87,] NA 0.243 [88,] NA -0.423 [89,] NA -0.478 [90,] NA 0.320 [91,] NA 0.101 [92,] NA -0.350 [93,] NA -0.510 [94,] NA -0.289 [95,] NA 0.670 [96,] NA -0.311 [97,] NA 2.598 [98,] NA 0.997 [99,] NA -0.511 [100,] NA 0.145 > nObs( mlIndFix ) [1] 100 > > # with analytical gradients > mlgFix <- maxLik( llf, gf, start = startValFix, activePar = !isFixed ) > mlgFix1 <- maxLik( llf, gf, start = startValFix, activePar = 2 ) > all.equal( mlgFix, mlgFix1, tolerance = 1e-3 ) [1] TRUE > mlgFix2 <- maxLik( llf, gf, start = startValFix, fixed = isFixed ) > all.equal( mlgFix, mlgFix2, tolerance = 1e-3 ) [1] TRUE > summary( mlgFix ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 7 iterations Return code 1: gradient close to zero Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.129 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlFix, mlgFix, tolerance = 1e-3 ) [1] TRUE > round( mlFix[[3]], 5 ) mu sigma NA 0 > round( mlgFix[[3]], 5 ) mu sigma NA 0 > mlFix[[4]] mu sigma mu NA NA sigma NA -59.97 > mlgFix[[4]] mu sigma mu NA NA sigma NA -60.02 > mlgIndFix <- maxLik( llfInd, gfInd, start = startValFix, activePar = !isFixed ) > all.equal( mlIndFix, mlgIndFix, tolerance = 1e-3 ) [1] TRUE > round( mlIndFix[[3]], 5 ) mu sigma NA 0 > round( mlgIndFix[[3]], 5 ) mu sigma NA 0 > mlIndFix[[4]] mu sigma mu NA NA sigma NA -60 > mlgIndFix[[4]] mu sigma mu NA NA sigma NA -60.02 > all.equal( mlgFix[ ], mlgIndFix[ -11 ], tolerance = 1e-3 ) [1] TRUE > round( mlgIndFix[[ 11 ]], 3 ) mu sigma [1,] NA -0.341 [2,] NA -0.513 [3,] NA 1.050 [4,] NA -0.545 [5,] NA -0.537 [6,] NA 1.386 [7,] NA -0.408 [8,] NA 0.505 [9,] NA -0.238 [10,] NA -0.417 [11,] NA 0.437 [12,] NA -0.463 [13,] NA -0.442 [14,] NA -0.540 [15,] NA -0.345 [16,] NA 1.552 [17,] NA -0.385 [18,] NA 1.995 [19,] NA -0.224 [20,] NA -0.401 [21,] NA 0.202 [22,] NA -0.517 [23,] NA 0.144 [24,] NA -0.198 [25,] NA -0.291 [26,] NA 1.323 [27,] NA -0.086 [28,] NA -0.532 [29,] NA 0.304 [30,] NA 0.486 [31,] NA -0.428 [32,] NA -0.491 [33,] NA -0.021 [34,] NA -0.041 [35,] NA -0.104 [36,] NA -0.236 [37,] NA -0.346 [38,] NA -0.545 [39,] NA -0.486 [40,] NA -0.453 [41,] NA -0.230 [42,] NA -0.519 [43,] NA 0.505 [44,] NA 2.546 [45,] NA 0.412 [46,] NA 0.282 [47,] NA -0.441 [48,] NA -0.405 [49,] NA -0.148 [50,] NA -0.543 [51,] NA -0.506 [52,] NA -0.547 [53,] NA -0.547 [54,] NA 0.684 [55,] NA -0.514 [56,] NA 0.964 [57,] NA 1.029 [58,] NA -0.323 [59,] NA -0.538 [60,] NA -0.517 [61,] NA -0.453 [62,] NA -0.382 [63,] NA -0.475 [64,] NA 0.134 [65,] NA 0.208 [66,] NA -0.487 [67,] NA -0.416 [68,] NA -0.546 [69,] NA 0.012 [70,] NA 2.216 [71,] NA -0.389 [72,] NA 2.959 [73,] NA 0.117 [74,] NA -0.217 [75,] NA -0.237 [76,] NA 0.144 [77,] NA -0.494 [78,] NA 0.432 [79,] NA -0.526 [80,] NA -0.535 [81,] NA -0.548 [82,] NA -0.450 [83,] NA -0.457 [84,] NA -0.275 [85,] NA -0.516 [86,] NA -0.475 [87,] NA 0.243 [88,] NA -0.423 [89,] NA -0.478 [90,] NA 0.320 [91,] NA 0.101 [92,] NA -0.350 [93,] NA -0.510 [94,] NA -0.289 [95,] NA 0.670 [96,] NA -0.311 [97,] NA 2.598 [98,] NA 0.997 [99,] NA -0.511 [100,] NA 0.145 > > # with analytical gradients and Hessians > mlghFix <- maxLik( llf, gf, hf, start = startValFix, activePar = !isFixed ) > all.equal( mlgFix, mlghFix, tolerance = 1e-3 ) [1] TRUE > mlgFix[[4]] mu sigma mu NA NA sigma NA -60.02 > mlghFix[[4]] mu sigma mu NA NA sigma NA -60.02 > > ## BHHH method with fixed parameters > mlFixBHHH <- maxLik( llfInd, start = startValFix, activePar = !isFixed, + method = "BHHH" ) > mlFixBHHH1 <- maxLik( llfInd, start = startValFix, activePar = 2, + method = "BHHH" ) > all.equal( mlFixBHHH, mlFixBHHH1, tolerance = 1e-3 ) [1] TRUE > mlFixBHHH2 <- maxLik( llfInd, start = startValFix, fixed = isFixed, + method = "BHHH" ) > all.equal( mlFixBHHH, mlFixBHHH2, tolerance = 1e-3 ) [1] TRUE > mlFixBHHH3 <- maxLik( llfInd, start = startValFix, fixed = "mu", + method = "BHHH" ) > all.equal( mlFixBHHH, mlFixBHHH3, tolerance = 1e-3 ) [1] TRUE > mlFixBHHH4 <- maxLik( llfInd, start = startValFix, fixed = 1, + method = "BHHH" ) > all.equal( mlFixBHHH, mlFixBHHH4, tolerance = 1e-3 ) [1] TRUE > print( mlFixBHHH ) Maximum Likelihood estimation BHHH maximisation, 10 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -202.1 (1 free parameter(s)) Estimate(s): 1 1.825 > summary( mlFixBHHH ) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 10 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.134 13.7 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( mlFixBHHH ) mu sigma FALSE TRUE > AIC( mlFixBHHH ) [1] 406.2 > coef( mlFixBHHH ) mu sigma 1.000 1.825 > condiNumber( mlFixBHHH ) sigma 1 > round( hessian( mlFixBHHH ), 2 ) mu sigma mu NA NA sigma NA -55.98 attr(,"type") [1] "BHHH" > logLik( mlFixBHHH ) [1] -202.1 > maximType( mlFixBHHH ) [1] "BHHH maximisation" > nIter( mlFixBHHH ) [1] 10 > nParam( mlFixBHHH ) [1] 2 > returnCode( mlFixBHHH ) [1] 2 > returnMessage( mlFixBHHH ) [1] "successive function values within tolerance limit" > round( vcov( mlFixBHHH ), 4 ) mu sigma mu 0 0.0000 sigma 0 0.0179 > logLik( summary( mlFixBHHH ) ) [1] -202.1 > all.equal( mlFix[ -c( 4, 5, 6, 9, 10 ) ], mlFixBHHH[ -c( 4, 5, 6, 9, 10, 11 ) ], + tolerance = 1e-3 ) [1] TRUE > mlFix[[ 3 ]] mu sigma NA 2.842e-08 > mlFixBHHH[[ 3 ]] mu sigma NA -8.457e-06 > mlFix[[ 4 ]] mu sigma mu NA NA sigma NA -59.97 > mlFixBHHH[[ 4 ]] mu sigma mu NA NA sigma NA -55.98 attr(,"type") [1] "BHHH" > round( mlFixBHHH[[ 11 ]], 3 ) mu sigma [1,] NA -0.341 [2,] NA -0.513 [3,] NA 1.050 [4,] NA -0.545 [5,] NA -0.537 [6,] NA 1.386 [7,] NA -0.408 [8,] NA 0.505 [9,] NA -0.238 [10,] NA -0.417 [11,] NA 0.437 [12,] NA -0.463 [13,] NA -0.442 [14,] NA -0.540 [15,] NA -0.345 [16,] NA 1.552 [17,] NA -0.385 [18,] NA 1.995 [19,] NA -0.224 [20,] NA -0.401 [21,] NA 0.202 [22,] NA -0.517 [23,] NA 0.144 [24,] NA -0.198 [25,] NA -0.291 [26,] NA 1.323 [27,] NA -0.086 [28,] NA -0.532 [29,] NA 0.304 [30,] NA 0.486 [31,] NA -0.428 [32,] NA -0.491 [33,] NA -0.021 [34,] NA -0.041 [35,] NA -0.104 [36,] NA -0.236 [37,] NA -0.346 [38,] NA -0.545 [39,] NA -0.486 [40,] NA -0.453 [41,] NA -0.230 [42,] NA -0.519 [43,] NA 0.505 [44,] NA 2.546 [45,] NA 0.412 [46,] NA 0.282 [47,] NA -0.441 [48,] NA -0.405 [49,] NA -0.148 [50,] NA -0.543 [51,] NA -0.506 [52,] NA -0.547 [53,] NA -0.547 [54,] NA 0.684 [55,] NA -0.514 [56,] NA 0.964 [57,] NA 1.029 [58,] NA -0.323 [59,] NA -0.538 [60,] NA -0.517 [61,] NA -0.453 [62,] NA -0.382 [63,] NA -0.475 [64,] NA 0.134 [65,] NA 0.208 [66,] NA -0.487 [67,] NA -0.416 [68,] NA -0.546 [69,] NA 0.012 [70,] NA 2.216 [71,] NA -0.389 [72,] NA 2.959 [73,] NA 0.117 [74,] NA -0.217 [75,] NA -0.237 [76,] NA 0.144 [77,] NA -0.494 [78,] NA 0.432 [79,] NA -0.526 [80,] NA -0.535 [81,] NA -0.548 [82,] NA -0.450 [83,] NA -0.457 [84,] NA -0.275 [85,] NA -0.516 [86,] NA -0.475 [87,] NA 0.243 [88,] NA -0.423 [89,] NA -0.478 [90,] NA 0.320 [91,] NA 0.101 [92,] NA -0.350 [93,] NA -0.510 [94,] NA -0.289 [95,] NA 0.670 [96,] NA -0.311 [97,] NA 2.598 [98,] NA 0.997 [99,] NA -0.511 [100,] NA 0.145 > nObs( mlFixBHHH ) [1] 100 > > # with analytical gradients > mlgFixBHHH <- maxLik( llfInd, gfInd, start = startValFix, activePar = !isFixed, + method = "BHHH" ) > mlgFixBHHH1 <- maxLik( llfInd, gfInd, start = startValFix, activePar = 2, + method = "BHHH" ) > all.equal( mlgFixBHHH, mlgFixBHHH1, tolerance = 1e-3 ) [1] TRUE > mlgFixBHHH2 <- maxLik( llfInd, gfInd, start = startValFix, fixed = isFixed, + method = "BHHH" ) > all.equal( mlgFixBHHH, mlgFixBHHH2, tolerance = 1e-3 ) [1] TRUE > mlgFixBHHH3 <- maxLik( llfInd, gfInd, start = startValFix, fixed = "mu", + method = "BHHH" ) > all.equal( mlgFixBHHH, mlgFixBHHH3, tolerance = 1e-3 ) [1] TRUE > mlgFixBHHH4 <- maxLik( llfInd, gfInd, start = startValFix, fixed = 1, + method = "BHHH" ) > all.equal( mlgFixBHHH, mlgFixBHHH4, tolerance = 1e-3 ) [1] TRUE > summary( mlgFixBHHH ) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 10 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.134 13.7 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlFixBHHH, mlgFixBHHH, tolerance = 1e-3 ) [1] TRUE > mlgFixBHHH2 <- maxLik( llf, gfInd, start = startValFix, activePar = !isFixed, + method = "BHHH") > all.equal( mlgFixBHHH, mlgFixBHHH2, tolerance = 1e-3 ) [1] TRUE > > # with unused Hessians > mlghFixBHHH <- maxLik( llfInd, gfInd, hf, start = startValFix, activePar = !isFixed, + method = "BHHH" ) > all.equal( mlgFixBHHH, mlghFixBHHH, tolerance = 1e-3 ) [1] TRUE > > ## BFGS method with fixed parameters > mlFixBfgs <- maxLik( llf, start = startValFix, fixed = isFixed, + method = "BFGS" ) > mlFixBfgs3 <- maxLik( llf, start = startValFix, fixed = "mu", + method = "BFGS" ) > all.equal( mlFixBfgs, mlFixBfgs3, tolerance = 1e-3 ) [1] TRUE > mlFixBfgs4 <- maxLik( llf, start = startValFix, fixed = 1, + method = "BFGS" ) > all.equal( mlFixBfgs, mlFixBfgs4, tolerance = 1e-3 ) [1] TRUE > print( mlFixBfgs ) Maximum Likelihood estimation BFGS maximisation, 27 iterations Return code 0: successful convergence Log-Likelihood: -202.1 (1 free parameter(s)) Estimate(s): 1 1.825 > summary( mlFixBfgs ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 27 iterations Return code 0: successful convergence Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.129 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( mlFixBfgs ) mu sigma FALSE TRUE > AIC( mlFixBfgs ) [1] 406.2 > coef( mlFixBfgs ) mu sigma 1.000 1.825 > condiNumber( mlFixBfgs ) sigma 1 > round( hessian( mlFixBfgs ), 2 ) mu sigma mu -30.01 -5.94 sigma -5.94 -60.03 > logLik( mlFixBfgs ) [1] -202.1 > maximType( mlFixBfgs ) [1] "BFGS maximisation" > nIter( mlFixBfgs ) function 27 > nParam( mlFixBfgs ) [1] 2 > returnCode( mlFixBfgs ) [1] 0 > returnMessage( mlFixBfgs ) [1] "successful convergence " > round( vcov( mlFixBfgs ), 4 ) mu sigma mu 0 0.0000 sigma 0 0.0167 > logLik( summary( mlFixBfgs ) ) [1] -202.1 > all.equal( mlghFix[ -c( 5, 6, 9, 10 ) ], mlFixBfgs[ -c( 5, 6, 9, 10, 11 ) ], + tolerance = 1e-3 ) [1] "Component 3: 'is.NA' value mismatch: 0 in current 1 in target" [2] "Component 4: 'is.NA' value mismatch: 0 in current 3 in target" > mlIndFixBfgs <- maxLik( llfInd, start = startValFix, fixed = isFixed, + method = "BFGS" ) > all.equal( mlFixBfgs[-c(4,9)], mlIndFixBfgs[ -c(4,9,12) ], tolerance = 1e-3 ) [1] TRUE > print(formatC(mlIndFixBfgs$gradientObs, format="f", digits=4, width=7), quote=FALSE) mu sigma [1,] -0.3364 -0.3412 [2,] -0.1381 -0.5130 [3,] 0.9355 1.0498 [4,] 0.0423 -0.5445 [5,] 0.0776 -0.5368 [6,] 1.0294 1.3864 [7,] 0.2766 -0.4081 [8,] -0.7593 0.5046 [9,] -0.4122 -0.2376 [10,] -0.2675 -0.4172 [11,] 0.7347 0.4375 [12,] 0.2160 -0.4627 [13,] 0.2405 -0.4422 [14,] 0.0664 -0.5398 [15,] -0.3336 -0.3446 [16,] 1.0725 1.5519 [17,] 0.2988 -0.3848 [18,] -1.1803 1.9954 [19,] 0.4209 -0.2243 [20,] -0.2838 -0.4008 [21,] -0.6409 0.2020 [22,] -0.1308 -0.5166 [23,] -0.6158 0.1444 [24,] -0.4375 -0.1985 [25,] -0.3751 -0.2909 [26,] -1.0123 1.3229 [27,] 0.5028 -0.0863 [28,] 0.0921 -0.5323 [29,] -0.6831 0.3040 [30,] 0.7525 0.4859 [31,] 0.2560 -0.4282 [32,] -0.1771 -0.4906 [33,] 0.5372 -0.0209 [34,] 0.5270 -0.0407 [35,] 0.4931 -0.1039 [36,] 0.4133 -0.2360 [37,] 0.3325 -0.3460 [38,] -0.0372 -0.5453 [39,] -0.1836 -0.4863 [40,] -0.2284 -0.4526 [41,] -0.4170 -0.2305 [42,] -0.1248 -0.5194 [43,] -0.7595 0.5051 [44,] 1.3018 2.5457 [45,] 0.7250 0.4117 [46,] -0.6741 0.2816 [47,] -0.2418 -0.4411 [48,] -0.2801 -0.4046 [49,] 0.4681 -0.1478 [50,] -0.0500 -0.5432 [51,] 0.1520 -0.5056 [52,] -0.0171 -0.5473 [53,] -0.0257 -0.5466 [54,] 0.8214 0.6839 [55,] -0.1355 -0.5143 [56,] 0.9102 0.9644 [57,] -0.9295 1.0295 [58,] 0.3509 -0.3231 [59,] 0.0743 -0.5377 [60,] 0.1296 -0.5171 [61,] 0.2279 -0.4530 [62,] -0.3015 -0.3819 [63,] -0.2000 -0.4748 [64,] -0.6113 0.1344 [65,] -0.6433 0.2076 [66,] 0.1822 -0.4872 [67,] 0.2690 -0.4157 [68,] 0.0318 -0.5460 [69,] 0.5535 0.0115 [70,] 1.2304 2.2159 [71,] -0.2947 -0.3893 [72,] -1.3859 2.9585 [73,] 0.6036 0.1173 [74,] -0.4257 -0.2171 [75,] -0.4129 -0.2365 [76,] 0.6155 0.1438 [77,] -0.1709 -0.4945 [78,] -0.7327 0.4321 [79,] 0.1088 -0.5262 [80,] -0.0834 -0.5351 [81,] 0.0035 -0.5478 [82,] 0.2312 -0.4502 [83,] -0.2225 -0.4575 [84,] 0.3867 -0.2748 [85,] -0.1323 -0.5158 [86,] 0.1991 -0.4754 [87,] 0.6583 0.2433 [88,] 0.2612 -0.4233 [89,] -0.1956 -0.4780 [90,] 0.6895 0.3200 [91,] 0.5963 0.1013 [92,] 0.3291 -0.3500 [93,] 0.1433 -0.5103 [94,] -0.3769 -0.2885 [95,] 0.8166 0.6696 [96,] -0.3603 -0.3109 [97,] 1.3128 2.5983 [98,] 0.9199 0.9968 [99,] -0.1415 -0.5113 [100,] -0.6160 0.1450 > # print fradient, only 4 digits to avoid clutter in R CMD tests > mlIndFixBfgs3 <- maxLik( llfInd, start = startValFix, fixed = "mu", + method = "BFGS" ) > all.equal( mlIndFixBfgs, mlIndFixBfgs3, tolerance = 1e-3 ) [1] TRUE > mlIndFixBfgs4 <- maxLik( llfInd, start = startValFix, fixed = 1, + method = "BFGS" ) > all.equal( mlIndFixBfgs, mlIndFixBfgs4, tolerance = 1e-3 ) [1] TRUE > nObs( mlIndFixBfgs ) [1] 100 > > # with analytical gradients > mlgFixBfgs <- maxLik( llf, gf, start = startValFix, fixed = isFixed, + method = "BFGS" ) > mlgFixBfgs3 <- maxLik( llf, gf, start = startValFix, fixed = "mu", + method = "BFGS" ) > all.equal( mlgFixBfgs, mlgFixBfgs3, tolerance = 1e-3 ) [1] TRUE > mlgFixBfgs4 <- maxLik( llf, gf, start = startValFix, fixed = 1, + method = "BFGS" ) > all.equal( mlgFixBfgs, mlgFixBfgs4, tolerance = 1e-3 ) [1] TRUE > summary( mlgFixBfgs ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 27 iterations Return code 0: successful convergence Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.129 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlFixBfgs[ -9 ], mlgFixBfgs[ -9 ], tolerance = 1e-3 ) [1] TRUE > mlgIndFixBfgs <- maxLik( llfInd, gfInd, start = startValFix, fixed = isFixed, + method = "BFGS") > all.equal( mlgFixBfgs[ ], mlgIndFixBfgs[ -12 ], tolerance = 1e-3 ) [1] TRUE > round( mlgIndFixBfgs[[ 12 ]], 3 ) mu sigma [1,] -0.336 -0.341 [2,] -0.138 -0.513 [3,] 0.936 1.050 [4,] 0.042 -0.545 [5,] 0.078 -0.537 [6,] 1.029 1.386 [7,] 0.277 -0.408 [8,] -0.759 0.505 [9,] -0.412 -0.238 [10,] -0.267 -0.417 [11,] 0.735 0.437 [12,] 0.216 -0.463 [13,] 0.241 -0.442 [14,] 0.066 -0.540 [15,] -0.334 -0.345 [16,] 1.072 1.552 [17,] 0.299 -0.385 [18,] -1.180 1.995 [19,] 0.421 -0.224 [20,] -0.284 -0.401 [21,] -0.641 0.202 [22,] -0.131 -0.517 [23,] -0.616 0.144 [24,] -0.437 -0.198 [25,] -0.375 -0.291 [26,] -1.012 1.323 [27,] 0.503 -0.086 [28,] 0.092 -0.532 [29,] -0.683 0.304 [30,] 0.753 0.486 [31,] 0.256 -0.428 [32,] -0.177 -0.491 [33,] 0.537 -0.021 [34,] 0.527 -0.041 [35,] 0.493 -0.104 [36,] 0.413 -0.236 [37,] 0.332 -0.346 [38,] -0.037 -0.545 [39,] -0.184 -0.486 [40,] -0.228 -0.453 [41,] -0.417 -0.230 [42,] -0.125 -0.519 [43,] -0.759 0.505 [44,] 1.302 2.546 [45,] 0.725 0.412 [46,] -0.674 0.282 [47,] -0.242 -0.441 [48,] -0.280 -0.405 [49,] 0.468 -0.148 [50,] -0.050 -0.543 [51,] 0.152 -0.506 [52,] -0.017 -0.547 [53,] -0.026 -0.547 [54,] 0.821 0.684 [55,] -0.136 -0.514 [56,] 0.910 0.964 [57,] -0.930 1.029 [58,] 0.351 -0.323 [59,] 0.074 -0.538 [60,] 0.130 -0.517 [61,] 0.228 -0.453 [62,] -0.301 -0.382 [63,] -0.200 -0.475 [64,] -0.611 0.134 [65,] -0.643 0.208 [66,] 0.182 -0.487 [67,] 0.269 -0.416 [68,] 0.032 -0.546 [69,] 0.554 0.012 [70,] 1.230 2.216 [71,] -0.295 -0.389 [72,] -1.386 2.959 [73,] 0.604 0.117 [74,] -0.426 -0.217 [75,] -0.413 -0.237 [76,] 0.616 0.144 [77,] -0.171 -0.494 [78,] -0.733 0.432 [79,] 0.109 -0.526 [80,] -0.083 -0.535 [81,] 0.003 -0.548 [82,] 0.231 -0.450 [83,] -0.222 -0.457 [84,] 0.387 -0.275 [85,] -0.132 -0.516 [86,] 0.199 -0.475 [87,] 0.658 0.243 [88,] 0.261 -0.423 [89,] -0.196 -0.478 [90,] 0.689 0.320 [91,] 0.596 0.101 [92,] 0.329 -0.350 [93,] 0.143 -0.510 [94,] -0.377 -0.289 [95,] 0.817 0.670 [96,] -0.360 -0.311 [97,] 1.313 2.598 [98,] 0.920 0.997 [99,] -0.141 -0.511 [100,] -0.616 0.145 > mlgIndFixBfgs3 <- maxLik( llfInd, gfInd, start = startValFix, fixed = "mu", + method = "BFGS" ) > all.equal( mlgIndFixBfgs, mlgIndFixBfgs3, tolerance = 1e-3 ) [1] TRUE > mlgIndFixBfgs4 <- maxLik( llfInd, gfInd, start = startValFix, fixed = 1, + method = "BFGS" ) > all.equal( mlgIndFixBfgs, mlgIndFixBfgs4, tolerance = 1e-3 ) [1] TRUE > > # with unused Hessians > mlghFixBfgs <- maxLik( llf, gf, hf, start = startValFix, fixed = isFixed, + method = "BFGS" ) > all.equal( mlgFixBfgs, mlghFixBfgs, tolerance = 1e-3 ) [1] TRUE > mlghFixBfgs3 <- maxLik( llf, gf, hf, start = startValFix, fixed = "mu", + method = "BFGS" ) > all.equal( mlghFixBfgs, mlghFixBfgs3, tolerance = 1e-3 ) [1] TRUE > mlghFixBfgs4 <- maxLik( llf, gf, hf, start = startValFix, fixed = 1, + method = "BFGS" ) > all.equal( mlghFixBfgs, mlghFixBfgs4, tolerance = 1e-3 ) [1] TRUE > > ## NM method with fixed parameters > mlFixNm <- maxLik( llf, start = startValFix, fixed = isFixed, + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > mlFixNm3 <- maxLik( llf, start = startValFix, fixed = "mu", + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlFixNm, mlFixNm3, tolerance = 1e-3 ) [1] TRUE > mlFixNm4 <- maxLik( llf, start = startValFix, fixed = 1, + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlFixNm, mlFixNm4, tolerance = 1e-3 ) [1] TRUE > print( mlFixNm ) Maximum Likelihood estimation Nelder-Mead maximisation, 28 iterations Return code 0: successful convergence Log-Likelihood: -202.1 (1 free parameter(s)) Estimate(s): 1 1.825 > summary( mlFixNm ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 28 iterations Return code 0: successful convergence Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.129 14.2 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( mlFixNm ) mu sigma FALSE TRUE > AIC( mlFixNm ) [1] 406.2 > coef( mlFixNm ) mu sigma 1.000 1.825 > condiNumber( mlFixNm ) sigma 1 > round( hessian( mlFixNm ), 2 ) mu sigma mu -30.01 -5.94 sigma -5.94 -60.06 > logLik( mlFixNm ) [1] -202.1 > maximType( mlFixNm ) [1] "Nelder-Mead maximisation" > nIter( mlFixNm ) function 28 > nParam( mlFixNm ) [1] 2 > returnCode( mlFixNm ) [1] 0 > returnMessage( mlFixNm ) [1] "successful convergence " > round( vcov( mlFixNm ), 4 ) mu sigma mu 0 0.0000 sigma 0 0.0167 > logLik( summary( mlFixNm ) ) [1] -202.1 > all.equal( mlFixBfgs[ -c(4,9,10) ], mlFixNm[ -c(4,9,10) ], tolerance = 1e-3 ) [1] TRUE > mlIndFixNm <- maxLik( llfInd, start = startValFix, fixed = isFixed, + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlFixNm[-4], mlIndFixNm[-c(4,12)], tolerance = 1e-3 ) [1] TRUE > round( mlIndFixNm[[ 12 ]], 3 ) mu sigma [1,] -0.336 -0.341 [2,] -0.138 -0.513 [3,] 0.936 1.050 [4,] 0.042 -0.545 [5,] 0.078 -0.537 [6,] 1.029 1.387 [7,] 0.277 -0.408 [8,] -0.759 0.505 [9,] -0.412 -0.238 [10,] -0.268 -0.417 [11,] 0.735 0.438 [12,] 0.216 -0.463 [13,] 0.241 -0.442 [14,] 0.066 -0.540 [15,] -0.334 -0.345 [16,] 1.073 1.552 [17,] 0.299 -0.385 [18,] -1.180 1.996 [19,] 0.421 -0.224 [20,] -0.284 -0.401 [21,] -0.641 0.202 [22,] -0.131 -0.517 [23,] -0.616 0.144 [24,] -0.438 -0.198 [25,] -0.375 -0.291 [26,] -1.012 1.323 [27,] 0.503 -0.086 [28,] 0.092 -0.532 [29,] -0.683 0.304 [30,] 0.753 0.486 [31,] 0.256 -0.428 [32,] -0.177 -0.491 [33,] 0.537 -0.021 [34,] 0.527 -0.041 [35,] 0.493 -0.104 [36,] 0.413 -0.236 [37,] 0.332 -0.346 [38,] -0.037 -0.545 [39,] -0.184 -0.486 [40,] -0.228 -0.453 [41,] -0.417 -0.230 [42,] -0.125 -0.519 [43,] -0.760 0.505 [44,] 1.302 2.546 [45,] 0.725 0.412 [46,] -0.674 0.282 [47,] -0.242 -0.441 [48,] -0.280 -0.405 [49,] 0.468 -0.148 [50,] -0.050 -0.543 [51,] 0.152 -0.506 [52,] -0.017 -0.547 [53,] -0.026 -0.547 [54,] 0.821 0.684 [55,] -0.136 -0.514 [56,] 0.910 0.965 [57,] -0.930 1.030 [58,] 0.351 -0.323 [59,] 0.074 -0.538 [60,] 0.130 -0.517 [61,] 0.228 -0.453 [62,] -0.302 -0.382 [63,] -0.200 -0.475 [64,] -0.611 0.134 [65,] -0.643 0.208 [66,] 0.182 -0.487 [67,] 0.269 -0.416 [68,] 0.032 -0.546 [69,] 0.554 0.012 [70,] 1.231 2.216 [71,] -0.295 -0.389 [72,] -1.386 2.959 [73,] 0.604 0.117 [74,] -0.426 -0.217 [75,] -0.413 -0.237 [76,] 0.616 0.144 [77,] -0.171 -0.494 [78,] -0.733 0.432 [79,] 0.109 -0.526 [80,] -0.083 -0.535 [81,] 0.003 -0.548 [82,] 0.231 -0.450 [83,] -0.222 -0.457 [84,] 0.387 -0.275 [85,] -0.132 -0.516 [86,] 0.199 -0.475 [87,] 0.658 0.243 [88,] 0.261 -0.423 [89,] -0.196 -0.478 [90,] 0.690 0.320 [91,] 0.596 0.101 [92,] 0.329 -0.350 [93,] 0.143 -0.510 [94,] -0.377 -0.289 [95,] 0.817 0.670 [96,] -0.360 -0.311 [97,] 1.313 2.599 [98,] 0.920 0.997 [99,] -0.141 -0.511 [100,] -0.616 0.145 > mlIndFixNm3 <- maxLik( llfInd, start = startValFix, fixed = "mu", + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlIndFixNm, mlIndFixNm3, tolerance = 1e-3 ) [1] TRUE > mlIndFixNm4 <- maxLik( llfInd, start = startValFix, fixed = 1, + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlIndFixNm, mlIndFixNm4, tolerance = 1e-3 ) [1] TRUE > nObs( mlIndFixNm ) [1] 100 > > # with analytical gradients > mlgFixNm <- maxLik( llf, gf, start = startValFix, fixed = isFixed, + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > mlgFixNm3 <- maxLik( llf, gf, start = startValFix, fixed = "mu", + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlgFixNm, mlgFixNm3, tolerance = 1e-3 ) [1] TRUE > mlgFixNm4 <- maxLik( llf, gf, start = startValFix, fixed = 1, + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlgFixNm, mlgFixNm4, tolerance = 1e-3 ) [1] TRUE > summary( mlgFixNm ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 28 iterations Return code 0: successful convergence Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.129 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlFixNm, mlgFixNm, tolerance = 1e-3 ) [1] TRUE > mlgIndFixNm <- maxLik( llfInd, gfInd, start = startValFix, fixed = isFixed, + method = "NM") Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlgFixNm[ ], mlgIndFixNm[ -12 ], tolerance = 1e-3 ) [1] TRUE > round( mlgIndFixNm[[ 12 ]], 3 ) mu sigma [1,] -0.336 -0.341 [2,] -0.138 -0.513 [3,] 0.936 1.050 [4,] 0.042 -0.545 [5,] 0.078 -0.537 [6,] 1.029 1.387 [7,] 0.277 -0.408 [8,] -0.759 0.505 [9,] -0.412 -0.238 [10,] -0.268 -0.417 [11,] 0.735 0.438 [12,] 0.216 -0.463 [13,] 0.241 -0.442 [14,] 0.066 -0.540 [15,] -0.334 -0.345 [16,] 1.073 1.552 [17,] 0.299 -0.385 [18,] -1.180 1.996 [19,] 0.421 -0.224 [20,] -0.284 -0.401 [21,] -0.641 0.202 [22,] -0.131 -0.517 [23,] -0.616 0.144 [24,] -0.438 -0.198 [25,] -0.375 -0.291 [26,] -1.012 1.323 [27,] 0.503 -0.086 [28,] 0.092 -0.532 [29,] -0.683 0.304 [30,] 0.753 0.486 [31,] 0.256 -0.428 [32,] -0.177 -0.491 [33,] 0.537 -0.021 [34,] 0.527 -0.041 [35,] 0.493 -0.104 [36,] 0.413 -0.236 [37,] 0.332 -0.346 [38,] -0.037 -0.545 [39,] -0.184 -0.486 [40,] -0.228 -0.453 [41,] -0.417 -0.230 [42,] -0.125 -0.519 [43,] -0.760 0.505 [44,] 1.302 2.546 [45,] 0.725 0.412 [46,] -0.674 0.282 [47,] -0.242 -0.441 [48,] -0.280 -0.405 [49,] 0.468 -0.148 [50,] -0.050 -0.543 [51,] 0.152 -0.506 [52,] -0.017 -0.547 [53,] -0.026 -0.547 [54,] 0.821 0.684 [55,] -0.136 -0.514 [56,] 0.910 0.965 [57,] -0.930 1.030 [58,] 0.351 -0.323 [59,] 0.074 -0.538 [60,] 0.130 -0.517 [61,] 0.228 -0.453 [62,] -0.302 -0.382 [63,] -0.200 -0.475 [64,] -0.611 0.134 [65,] -0.643 0.208 [66,] 0.182 -0.487 [67,] 0.269 -0.416 [68,] 0.032 -0.546 [69,] 0.554 0.012 [70,] 1.231 2.216 [71,] -0.295 -0.389 [72,] -1.386 2.959 [73,] 0.604 0.117 [74,] -0.426 -0.217 [75,] -0.413 -0.237 [76,] 0.616 0.144 [77,] -0.171 -0.494 [78,] -0.733 0.432 [79,] 0.109 -0.526 [80,] -0.083 -0.535 [81,] 0.003 -0.548 [82,] 0.231 -0.450 [83,] -0.222 -0.457 [84,] 0.387 -0.275 [85,] -0.132 -0.516 [86,] 0.199 -0.475 [87,] 0.658 0.243 [88,] 0.261 -0.423 [89,] -0.196 -0.478 [90,] 0.690 0.320 [91,] 0.596 0.101 [92,] 0.329 -0.350 [93,] 0.143 -0.510 [94,] -0.377 -0.289 [95,] 0.817 0.670 [96,] -0.360 -0.311 [97,] 1.313 2.599 [98,] 0.920 0.997 [99,] -0.141 -0.511 [100,] -0.616 0.145 > > # with unused Hessians > mlghFixNm <- maxLik( llf, gf, hf, start = startValFix, fixed = isFixed, + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlgFixNm, mlghFixNm, tolerance = 1e-3 ) [1] TRUE > mlghFixNm3 <- maxLik( llf, gf, hf, start = startValFix, fixed = "mu", + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlghFixNm, mlghFixNm3, tolerance = 1e-3 ) [1] TRUE > mlghFixNm4 <- maxLik( llf, gf, hf, start = startValFix, fixed = 1, + method = "NM" ) Warning message: In optim(par = start[!fixed], fn = logLikFunc, control = control, : one-dimensional optimization by Nelder-Mead is unreliable: use "Brent" or optimize() directly > all.equal( mlghFixNm, mlghFixNm4, tolerance = 1e-3 ) [1] TRUE > > ## SANN method with fixed parameters > mlFixSann <- maxLik( llf, start = startValFix, fixed = isFixed, + method = "SANN" ) > mlFixSann3 <- maxLik( llf, start = startValFix, fixed = "mu", + method = "SANN" ) > all.equal( mlFixSann, mlFixSann3, tolerance = 1e-3 ) [1] TRUE > mlFixSann4 <- maxLik( llf, start = startValFix, fixed = 1, + method = "SANN" ) > all.equal( mlFixSann, mlFixSann4, tolerance = 1e-3 ) [1] TRUE > print( mlFixSann ) Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -202.1 (1 free parameter(s)) Estimate(s): 1 1.825 > summary( mlFixSann ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.129 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > activePar( mlFixSann ) mu sigma FALSE TRUE > AIC( mlFixSann ) [1] 406.2 > coef( mlFixSann ) mu sigma 1.000 1.825 > condiNumber( mlFixSann ) sigma 1 > round( hessian( mlFixSann ), 2 ) mu sigma mu -29.98 -5.94 sigma -5.94 -60.03 > logLik( mlFixSann ) [1] -202.1 > maximType( mlFixSann ) [1] "SANN maximisation" > nIter( mlFixSann ) function 10000 > nParam( mlFixSann ) [1] 2 > returnCode( mlFixSann ) [1] 0 > returnMessage( mlFixSann ) [1] "successful convergence " > round( vcov( mlFixSann ), 4 ) mu sigma mu 0 0.0000 sigma 0 0.0167 > logLik( summary( mlFixSann ) ) [1] -202.1 > all.equal( mlFixBfgs[ -c(4,9,10) ], mlFixSann[ -c(4,9,10) ], + tolerance = 1e-3 ) [1] TRUE > mlIndFixSann <- maxLik( llfInd, start = startValFix, fixed = isFixed, + method = "SANN" ) > all.equal( mlFixSann[ ], mlIndFixSann[ -12 ], tolerance = 1e-3 ) [1] TRUE > round( mlIndFixSann[[ 12 ]], 3 ) mu sigma [1,] -0.336 -0.341 [2,] -0.138 -0.513 [3,] 0.936 1.050 [4,] 0.042 -0.545 [5,] 0.078 -0.537 [6,] 1.029 1.386 [7,] 0.277 -0.408 [8,] -0.759 0.505 [9,] -0.412 -0.238 [10,] -0.267 -0.417 [11,] 0.735 0.438 [12,] 0.216 -0.463 [13,] 0.241 -0.442 [14,] 0.066 -0.540 [15,] -0.334 -0.345 [16,] 1.073 1.552 [17,] 0.299 -0.385 [18,] -1.180 1.995 [19,] 0.421 -0.224 [20,] -0.284 -0.401 [21,] -0.641 0.202 [22,] -0.131 -0.517 [23,] -0.616 0.144 [24,] -0.437 -0.198 [25,] -0.375 -0.291 [26,] -1.012 1.323 [27,] 0.503 -0.086 [28,] 0.092 -0.532 [29,] -0.683 0.304 [30,] 0.753 0.486 [31,] 0.256 -0.428 [32,] -0.177 -0.491 [33,] 0.537 -0.021 [34,] 0.527 -0.041 [35,] 0.493 -0.104 [36,] 0.413 -0.236 [37,] 0.332 -0.346 [38,] -0.037 -0.545 [39,] -0.184 -0.486 [40,] -0.228 -0.453 [41,] -0.417 -0.230 [42,] -0.125 -0.519 [43,] -0.759 0.505 [44,] 1.302 2.546 [45,] 0.725 0.412 [46,] -0.674 0.282 [47,] -0.242 -0.441 [48,] -0.280 -0.405 [49,] 0.468 -0.148 [50,] -0.050 -0.543 [51,] 0.152 -0.506 [52,] -0.017 -0.547 [53,] -0.026 -0.547 [54,] 0.821 0.684 [55,] -0.136 -0.514 [56,] 0.910 0.964 [57,] -0.930 1.030 [58,] 0.351 -0.323 [59,] 0.074 -0.538 [60,] 0.130 -0.517 [61,] 0.228 -0.453 [62,] -0.301 -0.382 [63,] -0.200 -0.475 [64,] -0.611 0.134 [65,] -0.643 0.208 [66,] 0.182 -0.487 [67,] 0.269 -0.416 [68,] 0.032 -0.546 [69,] 0.554 0.012 [70,] 1.230 2.216 [71,] -0.295 -0.389 [72,] -1.386 2.959 [73,] 0.604 0.117 [74,] -0.426 -0.217 [75,] -0.413 -0.237 [76,] 0.616 0.144 [77,] -0.171 -0.494 [78,] -0.733 0.432 [79,] 0.109 -0.526 [80,] -0.083 -0.535 [81,] 0.003 -0.548 [82,] 0.231 -0.450 [83,] -0.222 -0.457 [84,] 0.387 -0.275 [85,] -0.132 -0.516 [86,] 0.199 -0.475 [87,] 0.658 0.243 [88,] 0.261 -0.423 [89,] -0.196 -0.478 [90,] 0.690 0.320 [91,] 0.596 0.101 [92,] 0.329 -0.350 [93,] 0.143 -0.510 [94,] -0.377 -0.289 [95,] 0.817 0.670 [96,] -0.360 -0.311 [97,] 1.313 2.598 [98,] 0.920 0.997 [99,] -0.141 -0.511 [100,] -0.616 0.145 > nObs( mlIndFixSann ) [1] 100 > > # with analytical gradients > mlgFixSann <- maxLik( llf, gf, start = startValFix, fixed = isFixed, + method = "SANN" ) > summary( mlgFixSann ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -202.1 1 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.000 0.000 NA NA sigma 1.825 0.129 14.1 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 -------------------------------------------- > all.equal( mlFixSann[-4], mlgFixSann[-4], tolerance = 1e-3 ) [1] TRUE > mlgIndFixSann <- maxLik( llfInd, gfInd, start = startValFix, fixed = isFixed, + method = "SANN") > all.equal( mlgFixSann[ ], mlgIndFixSann[ -12 ], tolerance = 1e-3 ) [1] TRUE > round( mlgIndFixSann[[ 12 ]], 3 ) mu sigma [1,] -0.336 -0.341 [2,] -0.138 -0.513 [3,] 0.936 1.050 [4,] 0.042 -0.545 [5,] 0.078 -0.537 [6,] 1.029 1.386 [7,] 0.277 -0.408 [8,] -0.759 0.505 [9,] -0.412 -0.238 [10,] -0.267 -0.417 [11,] 0.735 0.438 [12,] 0.216 -0.463 [13,] 0.241 -0.442 [14,] 0.066 -0.540 [15,] -0.334 -0.345 [16,] 1.073 1.552 [17,] 0.299 -0.385 [18,] -1.180 1.995 [19,] 0.421 -0.224 [20,] -0.284 -0.401 [21,] -0.641 0.202 [22,] -0.131 -0.517 [23,] -0.616 0.144 [24,] -0.437 -0.198 [25,] -0.375 -0.291 [26,] -1.012 1.323 [27,] 0.503 -0.086 [28,] 0.092 -0.532 [29,] -0.683 0.304 [30,] 0.753 0.486 [31,] 0.256 -0.428 [32,] -0.177 -0.491 [33,] 0.537 -0.021 [34,] 0.527 -0.041 [35,] 0.493 -0.104 [36,] 0.413 -0.236 [37,] 0.332 -0.346 [38,] -0.037 -0.545 [39,] -0.184 -0.486 [40,] -0.228 -0.453 [41,] -0.417 -0.230 [42,] -0.125 -0.519 [43,] -0.759 0.505 [44,] 1.302 2.546 [45,] 0.725 0.412 [46,] -0.674 0.282 [47,] -0.242 -0.441 [48,] -0.280 -0.405 [49,] 0.468 -0.148 [50,] -0.050 -0.543 [51,] 0.152 -0.506 [52,] -0.017 -0.547 [53,] -0.026 -0.547 [54,] 0.821 0.684 [55,] -0.136 -0.514 [56,] 0.910 0.964 [57,] -0.930 1.030 [58,] 0.351 -0.323 [59,] 0.074 -0.538 [60,] 0.130 -0.517 [61,] 0.228 -0.453 [62,] -0.301 -0.382 [63,] -0.200 -0.475 [64,] -0.611 0.134 [65,] -0.643 0.208 [66,] 0.182 -0.487 [67,] 0.269 -0.416 [68,] 0.032 -0.546 [69,] 0.554 0.012 [70,] 1.230 2.216 [71,] -0.295 -0.389 [72,] -1.386 2.959 [73,] 0.604 0.117 [74,] -0.426 -0.217 [75,] -0.413 -0.237 [76,] 0.616 0.144 [77,] -0.171 -0.494 [78,] -0.733 0.432 [79,] 0.109 -0.526 [80,] -0.083 -0.535 [81,] 0.003 -0.548 [82,] 0.231 -0.450 [83,] -0.222 -0.457 [84,] 0.387 -0.275 [85,] -0.132 -0.516 [86,] 0.199 -0.475 [87,] 0.658 0.243 [88,] 0.261 -0.423 [89,] -0.196 -0.478 [90,] 0.690 0.320 [91,] 0.596 0.101 [92,] 0.329 -0.350 [93,] 0.143 -0.510 [94,] -0.377 -0.289 [95,] 0.817 0.670 [96,] -0.360 -0.311 [97,] 1.313 2.598 [98,] 0.920 0.997 [99,] -0.141 -0.511 [100,] -0.616 0.145 > > # with unused Hessians > mlghFixSann <- maxLik( llf, gf, hf, start = startValFix, fixed = isFixed, + method = "SANN" ) > all.equal( mlgFixSann, mlghFixSann, tolerance = 1e-3 ) [1] TRUE > > > ############### with parameter constraints ############### > A <- matrix( -1, nrow = 1, ncol = 2 ) > > > ############### inequality constraints ############### > inEq <- list( ineqA = A, ineqB = 2.5 ) > > ## NR method with inequality constraints > try( maxLik( llf, start = startVal, constraints = inEq, method = "NR" ) ) Error in maxRoutine(fn = logLik, grad = grad, hess = hess, start = start, : Inequality constraints not implemented for maxNR > > ## BHHH method with inequality constraints > try( maxLik( llf, start = startVal, constraints = inEq, method = "BHHH" ) ) Error in maxNR(fn = fn, grad = grad, hess = hess, start = start, iterlim = iterlim, : Inequality constraints not implemented for maxNR > > ## BFGS method with inequality constraints > mlBfgsInEq <- maxLik( llf, start = startVal, constraints = inEq, + method = "BFGS" ) > print( mlBfgsInEq ) Maximum Likelihood estimation BFGS maximisation, 130 iterations Return code 0: successful convergence Log-Likelihood: -204.5 (2 free parameter(s)) Estimate(s): 0.8197 1.68 > summary( mlBfgsInEq ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 130 iterations Return code 0: successful convergence Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.71 2.5e-06 *** sigma 1.680 0.107 15.69 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -0.00145 -------------------------------------------- > activePar( mlBfgsInEq ) mu sigma TRUE TRUE > AIC( mlBfgsInEq ) [1] 413.1 > coef( mlBfgsInEq ) mu sigma 0.8197 1.6803 > condiNumber( mlBfgsInEq ) mu 1 sigma 3.611 > round( hessian( mlBfgsInEq ), 2 ) mu sigma mu -35.44 -15.23 sigma -15.23 -93.71 > logLik( mlBfgsInEq ) [1] -204.5 > maximType( mlBfgsInEq ) [1] "BFGS maximisation" > nIter( mlBfgsInEq ) function 130 > nParam( mlBfgsInEq ) [1] 2 > returnCode( mlBfgsInEq ) [1] 0 > returnMessage( mlBfgsInEq ) [1] "successful convergence " > round( vcov( mlBfgsInEq ), 4 ) mu sigma mu 0.0303 -0.0049 sigma -0.0049 0.0115 > logLik( summary( mlBfgsInEq ) ) [1] -204.5 > mlBfgsInEqInd <- maxLik( llfInd, start = startVal, constraints = inEq, + method = "BFGS" ) > summary( mlBfgsInEqInd ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 130 iterations Return code 0: successful convergence Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.71 2.5e-06 *** sigma 1.680 0.107 15.69 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -0.00145 -------------------------------------------- > all.equal( mlBfgsInEq[ ], mlBfgsInEqInd[ -12 ], tolerance = 1e-3 ) [1] TRUE > round( mlBfgsInEqInd[[ 12 ]], 3 ) mu sigma [1,] -0.333 -0.409 [2,] -0.099 -0.579 [3,] 1.168 1.697 [4,] 0.114 -0.573 [5,] 0.155 -0.555 [6,] 1.279 2.153 [7,] 0.390 -0.339 [8,] -0.832 0.569 [9,] -0.423 -0.295 [10,] -0.252 -0.489 [11,] 0.931 0.861 [12,] 0.319 -0.424 [13,] 0.348 -0.392 [14,] 0.142 -0.561 [15,] -0.330 -0.412 [16,] 1.330 2.376 [17,] 0.417 -0.304 [18,] -1.329 2.374 [19,] 0.561 -0.067 [20,] -0.271 -0.472 [21,] -0.693 0.211 [22,] -0.091 -0.581 [23,] -0.663 0.143 [24,] -0.452 -0.251 [25,] -0.379 -0.354 [26,] -1.131 1.554 [27,] 0.657 0.131 [28,] 0.173 -0.545 [29,] -0.742 0.331 [30,] 0.952 0.928 [31,] 0.366 -0.370 [32,] -0.145 -0.560 [33,] 0.698 0.223 [34,] 0.686 0.195 [35,] 0.646 0.106 [36,] 0.552 -0.084 [37,] 0.456 -0.245 [38,] 0.020 -0.594 [39,] -0.153 -0.556 [40,] -0.206 -0.524 [41,] -0.428 -0.287 [42,] -0.083 -0.583 [43,] -0.833 0.569 [44,] 1.600 3.708 [45,] 0.920 0.826 [46,] -0.732 0.305 [47,] -0.222 -0.513 [48,] -0.267 -0.476 [49,] 0.616 0.043 [50,] 0.005 -0.595 [51,] 0.243 -0.496 [52,] 0.044 -0.592 [53,] 0.033 -0.593 [54,] 1.033 1.199 [55,] -0.096 -0.580 [56,] 1.138 1.581 [57,] -1.033 1.199 [58,] 0.478 -0.211 [59,] 0.152 -0.557 [60,] 0.217 -0.516 [61,] 0.333 -0.409 [62,] -0.292 -0.452 [63,] -0.172 -0.545 [64,] -0.658 0.132 [65,] -0.695 0.217 [66,] 0.279 -0.464 [67,] 0.381 -0.351 [68,] 0.101 -0.578 [69,] 0.717 0.269 [70,] 1.516 3.267 [71,] -0.284 -0.460 [72,] -1.572 3.557 [73,] 0.776 0.417 [74,] -0.439 -0.272 [75,] -0.424 -0.294 [76,] 0.790 0.454 [77,] -0.138 -0.563 [78,] -0.801 0.483 [79,] 0.192 -0.533 [80,] -0.035 -0.593 [81,] 0.068 -0.587 [82,] 0.337 -0.405 [83,] -0.199 -0.529 [84,] 0.520 -0.140 [85,] -0.092 -0.581 [86,] 0.299 -0.445 [87,] 0.841 0.593 [88,] 0.372 -0.362 [89,] -0.167 -0.548 [90,] 0.878 0.699 [91,] 0.768 0.395 [92,] 0.452 -0.251 [93,] 0.233 -0.504 [94,] -0.381 -0.351 [95,] 1.028 1.180 [96,] -0.361 -0.376 [97,] 1.613 3.778 [98,] 1.150 1.625 [99,] -0.103 -0.577 [100,] -0.663 0.144 > nObs( mlBfgsInEqInd ) [1] 100 > > # with analytical gradients > mlgBfgsInEq <- maxLik( llf, gf, start = startVal, constraints = inEq, + method = "BFGS" ) > all.equal( mlBfgsInEq, mlgBfgsInEq, tolerance = 1e-3 ) [1] TRUE > mlgBfgsInEqInd <- maxLik( llfInd, gfInd, start = startVal, constraints = inEq, + method = "BFGS" ) > all.equal( mlgBfgsInEqInd[ -12 ], mlgBfgsInEq[ ], tolerance = 1e-3 ) [1] TRUE > round( mlgBfgsInEqInd[[ 12 ]], 3 ) mu sigma [1,] -0.333 -0.409 [2,] -0.099 -0.579 [3,] 1.168 1.697 [4,] 0.114 -0.573 [5,] 0.155 -0.555 [6,] 1.279 2.153 [7,] 0.390 -0.339 [8,] -0.832 0.569 [9,] -0.423 -0.295 [10,] -0.252 -0.489 [11,] 0.931 0.861 [12,] 0.319 -0.424 [13,] 0.348 -0.392 [14,] 0.142 -0.561 [15,] -0.330 -0.412 [16,] 1.330 2.376 [17,] 0.417 -0.304 [18,] -1.329 2.374 [19,] 0.561 -0.067 [20,] -0.271 -0.472 [21,] -0.693 0.211 [22,] -0.091 -0.581 [23,] -0.663 0.143 [24,] -0.452 -0.251 [25,] -0.379 -0.354 [26,] -1.131 1.554 [27,] 0.657 0.131 [28,] 0.173 -0.545 [29,] -0.742 0.331 [30,] 0.952 0.928 [31,] 0.366 -0.370 [32,] -0.145 -0.560 [33,] 0.698 0.223 [34,] 0.686 0.195 [35,] 0.646 0.106 [36,] 0.552 -0.084 [37,] 0.456 -0.245 [38,] 0.020 -0.594 [39,] -0.153 -0.556 [40,] -0.206 -0.524 [41,] -0.428 -0.287 [42,] -0.083 -0.583 [43,] -0.833 0.569 [44,] 1.600 3.708 [45,] 0.920 0.826 [46,] -0.732 0.305 [47,] -0.222 -0.513 [48,] -0.267 -0.476 [49,] 0.616 0.043 [50,] 0.005 -0.595 [51,] 0.243 -0.496 [52,] 0.044 -0.592 [53,] 0.033 -0.593 [54,] 1.033 1.199 [55,] -0.096 -0.580 [56,] 1.138 1.581 [57,] -1.033 1.199 [58,] 0.478 -0.211 [59,] 0.152 -0.557 [60,] 0.217 -0.516 [61,] 0.333 -0.409 [62,] -0.292 -0.452 [63,] -0.172 -0.545 [64,] -0.658 0.132 [65,] -0.695 0.217 [66,] 0.279 -0.464 [67,] 0.381 -0.351 [68,] 0.101 -0.578 [69,] 0.717 0.269 [70,] 1.516 3.267 [71,] -0.284 -0.460 [72,] -1.572 3.557 [73,] 0.776 0.417 [74,] -0.439 -0.272 [75,] -0.424 -0.294 [76,] 0.790 0.454 [77,] -0.138 -0.563 [78,] -0.801 0.483 [79,] 0.192 -0.533 [80,] -0.035 -0.593 [81,] 0.068 -0.587 [82,] 0.337 -0.405 [83,] -0.199 -0.529 [84,] 0.520 -0.140 [85,] -0.092 -0.581 [86,] 0.299 -0.445 [87,] 0.841 0.593 [88,] 0.372 -0.362 [89,] -0.167 -0.548 [90,] 0.878 0.699 [91,] 0.768 0.395 [92,] 0.452 -0.251 [93,] 0.233 -0.504 [94,] -0.381 -0.351 [95,] 1.028 1.180 [96,] -0.361 -0.376 [97,] 1.613 3.778 [98,] 1.150 1.625 [99,] -0.103 -0.577 [100,] -0.663 0.144 > mlgBfgsInEqInd2 <- maxLik( llf, gfInd, start = startVal, constraints = inEq, + method = "BFGS" ) > all.equal( mlgBfgsInEqInd, mlgBfgsInEqInd2, tolerance = 1e-3 ) [1] TRUE > > # with unused Hessian > mlghBfgsInEq <- maxLik( llf, gf, hf, start = startVal, constraints = inEq, + method = "BFGS" ) > all.equal( mlgBfgsInEq, mlghBfgsInEq, tolerance = 1e-3 ) [1] TRUE > > ## NM method with inequality constraints > mlNmInEq <- maxLik( llf, start = startVal, constraints = inEq, method = "NM" ) > print( mlNmInEq ) Maximum Likelihood estimation Nelder-Mead maximisation, 103 iterations Return code 0: successful convergence Log-Likelihood: -204.5 (2 free parameter(s)) Estimate(s): 0.8197 1.68 > summary( mlNmInEq ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 103 iterations Return code 0: successful convergence Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.71 2.5e-06 *** sigma 1.680 0.107 15.68 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -0.001468 -------------------------------------------- > activePar( mlNmInEq ) mu sigma TRUE TRUE > AIC( mlNmInEq ) [1] 413.1 > coef( mlNmInEq ) mu sigma 0.8197 1.6803 > condiNumber( mlNmInEq ) mu 1 sigma 3.61 > round( hessian( mlNmInEq ), 2 ) mu sigma mu -35.44 -15.23 sigma -15.23 -93.68 > logLik( mlNmInEq ) [1] -204.5 > maximType( mlNmInEq ) [1] "Nelder-Mead maximisation" > nIter( mlNmInEq ) function 103 > nParam( mlNmInEq ) [1] 2 > returnCode( mlNmInEq ) [1] 0 > returnMessage( mlNmInEq ) [1] "successful convergence " > round( vcov( mlNmInEq ), 4 ) mu sigma mu 0.0303 -0.0049 sigma -0.0049 0.0115 > logLik( summary( mlNmInEq ) ) [1] -204.5 > all.equal( mlBfgsInEq[-c(9,10,11)], mlNmInEq[-c(9,10,11)], tolerance = 1e-3 ) [1] TRUE > mlNmInEqInd <- maxLik( llfInd, start = startVal, constraints = inEq, + method = "NM" ) > summary( mlNmInEqInd ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 103 iterations Return code 0: successful convergence Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.71 2.5e-06 *** sigma 1.680 0.107 15.69 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -0.001468 -------------------------------------------- > all.equal( mlNmInEq[-4], mlNmInEqInd[-c(4,12)], tolerance = 1e-3 ) [1] TRUE > round( mlNmInEqInd[[ 12 ]], 3 ) mu sigma [1,] -0.333 -0.409 [2,] -0.099 -0.579 [3,] 1.168 1.697 [4,] 0.114 -0.573 [5,] 0.155 -0.555 [6,] 1.279 2.153 [7,] 0.390 -0.339 [8,] -0.832 0.569 [9,] -0.423 -0.295 [10,] -0.252 -0.489 [11,] 0.931 0.861 [12,] 0.319 -0.424 [13,] 0.348 -0.392 [14,] 0.142 -0.561 [15,] -0.330 -0.412 [16,] 1.330 2.376 [17,] 0.417 -0.304 [18,] -1.329 2.374 [19,] 0.561 -0.067 [20,] -0.271 -0.472 [21,] -0.693 0.211 [22,] -0.091 -0.581 [23,] -0.663 0.143 [24,] -0.452 -0.251 [25,] -0.379 -0.354 [26,] -1.131 1.554 [27,] 0.657 0.131 [28,] 0.173 -0.545 [29,] -0.742 0.331 [30,] 0.952 0.928 [31,] 0.366 -0.370 [32,] -0.145 -0.560 [33,] 0.698 0.223 [34,] 0.686 0.195 [35,] 0.646 0.106 [36,] 0.552 -0.084 [37,] 0.456 -0.245 [38,] 0.020 -0.594 [39,] -0.153 -0.556 [40,] -0.206 -0.524 [41,] -0.428 -0.287 [42,] -0.083 -0.583 [43,] -0.833 0.569 [44,] 1.600 3.708 [45,] 0.920 0.826 [46,] -0.732 0.305 [47,] -0.222 -0.513 [48,] -0.267 -0.476 [49,] 0.616 0.043 [50,] 0.005 -0.595 [51,] 0.243 -0.496 [52,] 0.044 -0.592 [53,] 0.033 -0.593 [54,] 1.033 1.199 [55,] -0.096 -0.580 [56,] 1.138 1.581 [57,] -1.033 1.199 [58,] 0.478 -0.211 [59,] 0.152 -0.557 [60,] 0.217 -0.516 [61,] 0.333 -0.409 [62,] -0.292 -0.452 [63,] -0.172 -0.545 [64,] -0.658 0.132 [65,] -0.695 0.217 [66,] 0.279 -0.464 [67,] 0.381 -0.351 [68,] 0.101 -0.578 [69,] 0.717 0.269 [70,] 1.516 3.267 [71,] -0.284 -0.460 [72,] -1.572 3.557 [73,] 0.776 0.417 [74,] -0.439 -0.272 [75,] -0.424 -0.294 [76,] 0.790 0.454 [77,] -0.138 -0.563 [78,] -0.801 0.483 [79,] 0.192 -0.533 [80,] -0.035 -0.593 [81,] 0.068 -0.587 [82,] 0.337 -0.405 [83,] -0.199 -0.529 [84,] 0.520 -0.140 [85,] -0.092 -0.581 [86,] 0.299 -0.445 [87,] 0.841 0.593 [88,] 0.372 -0.362 [89,] -0.167 -0.548 [90,] 0.878 0.699 [91,] 0.768 0.395 [92,] 0.452 -0.251 [93,] 0.233 -0.504 [94,] -0.381 -0.351 [95,] 1.028 1.180 [96,] -0.361 -0.376 [97,] 1.613 3.778 [98,] 1.150 1.625 [99,] -0.103 -0.577 [100,] -0.663 0.144 > nObs( mlNmInEqInd ) [1] 100 > > # with unused analytical gradients > mlgNmInEq <- maxLik( llf, gf, start = startVal, constraints = inEq, + method = "NM" ) > all.equal( mlNmInEq, mlgNmInEq, tolerance = 1e-3 ) [1] TRUE > > # with unused analytical gradients and Hessians > mlghNmInEq <- maxLik( llf, gf, hf, start = startVal, constraints = inEq, + method = "NM" ) > all.equal( mlgNmInEq, mlghNmInEq, tolerance = 1e-3 ) [1] TRUE > > ## SANN method with inequality constraints > mlSannInEq <- maxLik( llf, start = startVal, constraints = inEq, + method = "SANN" ) > print( mlSannInEq ) Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -204.5 (2 free parameter(s)) Estimate(s): 0.8297 1.67 > summary( mlSannInEq ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.830 0.173 4.8 1.6e-06 *** sigma 1.670 0.106 15.8 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -0.001043 -------------------------------------------- > activePar( mlSannInEq ) mu sigma TRUE TRUE > AIC( mlSannInEq ) [1] 413.1 > coef( mlSannInEq ) mu sigma 0.8297 1.6702 > condiNumber( mlSannInEq ) mu 1 sigma 3.601 > round( hessian( mlSannInEq ), 2 ) mu sigma mu -35.84 -15.06 sigma -15.06 -96.12 > logLik( mlSannInEq ) [1] -204.5 > maximType( mlSannInEq ) [1] "SANN maximisation" > nIter( mlSannInEq ) function 10000 > nParam( mlSannInEq ) [1] 2 > returnCode( mlSannInEq ) [1] 0 > returnMessage( mlSannInEq ) [1] "successful convergence " > round( vcov( mlSannInEq ), 4 ) mu sigma mu 0.0299 -0.0047 sigma -0.0047 0.0111 > logLik( summary( mlSannInEq ) ) [1] -204.5 > all.equal( mlBfgsInEq[-c(2,3,4,9,10,11)], mlSannInEq[-c(2,3,4,9,10,11)], + tolerance = 1e-3 ) [1] TRUE > all.equal( mlBfgsInEq[-c(3,4,9,10,11)], mlSannInEq[-c(3,4,9,10,11)], + tolerance = 1e-2 ) [1] TRUE > # with unused analytical gradients > mlgSannInEq <- maxLik( llf, gf, start = startVal, constraints = inEq, + method = "SANN" ) > all.equal( mlSannInEq, mlgSannInEq, tolerance = 1e-3 ) [1] TRUE > > # with a user-specified function to generate a new candidate point > mlSannInEqCand <- maxLik( llf, start = startVal, constraints = inEq, + method = "SANN", cand = function(x)c(rnorm(1,x[1]),rnorm(1,x[2])) ) > summary( mlSannInEqCand ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -204.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.812 0.175 4.65 3.4e-06 *** sigma 1.682 0.108 15.65 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -0.0005163 -------------------------------------------- > all.equal( mlSannInEqCand[-c(2,3,4)], mlSannInEq[-c(2,3,4)], tolerance = 1e-3 ) [1] TRUE > all.equal( mlSannInEqCand, mlSannInEq, tolerance = 1e-1 ) [1] TRUE > > ############### equality constraints ############### > eqCon <- list( eqA = A, eqB = 2.5 ) > > ## NR method with equality constraints > mlCon <- maxLik( llf, start = startVal, constraints = eqCon ) > print( mlCon ) Maximum Likelihood estimation Newton-Raphson maximisation, 2 iterations Return code 1: gradient close to zero Log-Likelihood: -204.5 (2 free parameter(s)) Estimate(s): 0.8198 1.68 > summary( mlCon ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 2 iterations Return code 1: gradient close to zero Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.71 2.5e-06 *** sigma 1.680 0.107 15.68 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 10 outer iterations, barrier value 2.07e-10 -------------------------------------------- > activePar( mlCon ) mu sigma TRUE TRUE > AIC( mlCon ) [1] 413.1 > coef( mlCon ) mu sigma 0.8198 1.6803 > condiNumber( mlCon ) mu 1 sigma 3.614 > round( hessian( mlCon ), 2 ) mu sigma mu -35.44 -15.26 sigma -15.26 -93.71 > logLik( mlCon ) [1] -204.5 > maximType( mlCon ) [1] "Newton-Raphson maximisation" > nIter( mlCon ) [1] 2 > nParam( mlCon ) [1] 2 > returnCode( mlCon ) [1] 1 > returnMessage( mlCon ) [1] "gradient close to zero" > round( vcov( mlCon ), 4 ) mu sigma mu 0.0303 -0.0049 sigma -0.0049 0.0115 > logLik( summary( mlCon ) ) [1] -204.5 > mlConInd <- maxLik( llfInd, start = startVal, constraints = eqCon ) > summary( mlConInd ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 2 iterations Return code 1: gradient close to zero Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.71 2.5e-06 *** sigma 1.680 0.107 15.69 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 10 outer iterations, barrier value 2.07e-10 -------------------------------------------- > all.equal( mlCon[-4], mlConInd[-c(4,11)], tolerance = 1e-3 ) [1] TRUE > mlConInd[11] $gradientObs mu sigma [1,] -0.333193 -0.40861 [2,] -0.099212 -0.57861 [3,] 1.168023 1.69720 [4,] 0.113792 -0.57339 [5,] 0.155431 -0.55455 [6,] 1.278785 2.15258 [7,] 0.390354 -0.33911 [8,] -0.832318 0.56886 [9,] -0.422718 -0.29490 [10,] -0.251860 -0.48856 [11,] 0.930976 0.86117 [12,] 0.318734 -0.42444 [13,] 0.347748 -0.39195 [14,] 0.142251 -0.56114 [15,] -0.329910 -0.41226 [16,] 1.329682 2.37565 [17,] 0.416518 -0.30364 [18,] -1.329295 2.37393 [19,] 0.560680 -0.06693 [20,] -0.271079 -0.47167 [21,] -0.692596 0.21086 [22,] -0.090568 -0.58136 [23,] -0.662971 0.14338 [24,] -0.452498 -0.25110 [25,] -0.378930 -0.35388 [26,] -1.130999 1.55418 [27,] 0.657327 0.13086 [28,] 0.172493 -0.54515 [29,] -0.742405 0.33096 [30,] 0.952039 0.92781 [31,] 0.365949 -0.37013 [32,] -0.145183 -0.55973 [33,] 0.697945 0.22336 [34,] 0.685908 0.19537 [35,] 0.645847 0.10572 [36,] 0.551672 -0.08377 [37,] 0.456236 -0.24540 [38,] 0.019986 -0.59447 [39,] -0.152898 -0.55586 [40,] -0.205679 -0.52406 [41,] -0.428282 -0.28694 [42,] -0.083443 -0.58345 [43,] -0.832555 0.56953 [44,] 1.600319 3.70805 [45,] 0.919557 0.82566 [46,] -0.731759 0.30459 [47,] -0.221557 -0.51266 [48,] -0.266732 -0.47560 [49,] 0.616366 0.04320 [50,] 0.004786 -0.59511 [51,] 0.243293 -0.49569 [52,] 0.043622 -0.59195 [53,] 0.033475 -0.59326 [54,] 1.033353 1.19907 [55,] -0.096091 -0.57963 [56,] 1.138102 1.58126 [57,] -1.033283 1.19883 [58,] 0.477981 -0.21126 [59,] 0.151582 -0.55654 [60,] 0.216816 -0.51616 [61,] 0.332778 -0.40907 [62,] -0.291999 -0.45188 [63,] -0.172198 -0.54532 [64,] -0.657709 0.13171 [65,] -0.695406 0.21741 [66,] 0.278862 -0.46448 [67,] 0.381353 -0.35078 [68,] 0.101392 -0.57787 [69,] 0.717173 0.26908 [70,] 1.516111 3.26710 [71,] -0.283999 -0.45962 [72,] -1.571957 3.55687 [73,] 0.776303 0.41746 [74,] -0.438549 -0.27199 [75,] -0.423537 -0.29373 [76,] 0.790352 0.45444 [77,] -0.137887 -0.56320 [78,] -0.800905 0.48266 [79,] 0.192278 -0.53302 [80,] -0.034546 -0.59314 [81,] 0.067927 -0.58739 [82,] 0.336774 -0.40457 [83,] -0.198729 -0.52879 [84,] 0.520316 -0.14025 [85,] -0.092347 -0.58082 [86,] 0.298876 -0.44505 [87,] 0.840838 0.59282 [88,] 0.372124 -0.36247 [89,] -0.167044 -0.54826 [90,] 0.877652 0.69912 [91,] 0.767636 0.39498 [92,] 0.452325 -0.25137 [93,] 0.232960 -0.50396 [94,] -0.380961 -0.35129 [95,] 1.027722 1.17957 [96,] -0.361376 -0.37571 [97,] 1.613337 3.77834 [98,] 1.149536 1.62521 [99,] -0.103125 -0.57728 [100,] -0.663266 0.14404 > nObs( mlConInd ) [1] 100 > > # with analytical gradients > mlgCon <- maxLik( llf, gf, start = startVal, constraints = eqCon ) > summary( mlgCon ) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 1 iterations Return code 3: Last step could not find a value above the current. Boundary of parameter space? Consider switching to a more robust optimisation method temporarily. Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.842 0.172 4.88 1e-06 *** sigma 1.670 0.105 15.84 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 2 successive function values within tolerance limit 7 outer iterations, barrier value 0.0001299 -------------------------------------------- > all.equal( mlCon[ -c(2,3,4,5,6,7,9,11) ], mlgCon[ -c(2,3,4,5,6,7,9,11) ], + tolerance = 1e-3 ) [1] TRUE > all.equal( mlCon[ -c( 5, 6, 7, 9, 11 ) ], mlgCon[ -c( 5, 6, 7, 9, 11 ) ], + tolerance = 1e-1 ) [1] TRUE > mlgConInd <- maxLik( llfInd, gfInd, start = startVal, constraints = eqCon ) > all.equal( mlConInd[ -c(2,3,4,5,6,7,9,11,12) ], mlgConInd[ -c(2,3,4,5,6,7,9,11,12) ], + tolerance = 1e-3 ) [1] TRUE > all.equal( mlConInd[ -c(5,6,7,9,12) ], mlgConInd[ -c(5,6,7,9,12) ], + tolerance = 1e-1 ) [1] TRUE > all.equal( mlgCon[], mlgConInd[-11], tolerance = 1e-3 ) [1] TRUE > mlgConInd[11] $gradientObs mu sigma [1,] -0.345249 -0.39986 [2,] -0.108315 -0.57930 [3,] 1.174915 1.70609 [4,] 0.107378 -0.57964 [5,] 0.149542 -0.56155 [6,] 1.287075 2.16717 [7,] 0.387431 -0.34825 [8,] -0.850673 0.60942 [9,] -0.435904 -0.28161 [10,] -0.262889 -0.48349 [11,] 0.934876 0.86047 [12,] 0.314907 -0.43330 [13,] 0.344287 -0.40097 [14,] 0.136196 -0.56792 [15,] -0.341925 -0.40367 [16,] 1.338615 2.39314 [17,] 0.413925 -0.31280 [18,] -1.353923 2.46196 [19,] 0.559907 -0.07543 [20,] -0.282350 -0.46577 [21,] -0.709187 0.24091 [22,] -0.099561 -0.58234 [23,] -0.679189 0.17137 [24,] -0.466059 -0.23620 [25,] -0.391563 -0.34288 [26,] -1.153124 1.62138 [27,] 0.657773 0.12356 [28,] 0.166819 -0.55242 [29,] -0.759626 0.36461 [30,] 0.956205 0.92782 [31,] 0.362717 -0.37921 [32,] -0.154865 -0.55884 [33,] 0.698904 0.21673 [34,] 0.686715 0.18853 [35,] 0.646148 0.09825 [36,] 0.550785 -0.09234 [37,] 0.454144 -0.25451 [38,] 0.012388 -0.59863 [39,] -0.162678 -0.55470 [40,] -0.216125 -0.52089 [41,] -0.441538 -0.27336 [42,] -0.092347 -0.58465 [43,] -0.850913 0.61011 [44,] 1.612667 3.74365 [45,] 0.923313 0.82459 [46,] -0.748845 0.33746 [47,] -0.232204 -0.50886 [48,] -0.277948 -0.46989 [49,] 0.616296 0.03532 [50,] -0.003004 -0.59887 [51,] 0.238514 -0.50390 [52,] 0.036322 -0.59669 [53,] 0.026047 -0.59776 [54,] 1.038546 1.20208 [55,] -0.105154 -0.58043 [56,] 1.144617 1.58874 [57,] -1.054175 1.25669 [58,] 0.476163 -0.22030 [59,] 0.145645 -0.56347 [60,] 0.211702 -0.52405 [61,] 0.329128 -0.41801 [62,] -0.303534 -0.44505 [63,] -0.182222 -0.54344 [64,] -0.673860 0.15933 [65,] -0.712033 0.24767 [66,] 0.274531 -0.47304 [67,] 0.378316 -0.35991 [68,] 0.094821 -0.58388 [69,] 0.718374 0.26281 [70,] 1.527397 3.29656 [71,] -0.295434 -0.45315 [72,] -1.599648 3.67381 [73,] 0.778251 0.41244 [74,] -0.451935 -0.25785 [75,] -0.436733 -0.28041 [76,] 0.792478 0.44976 [77,] -0.147478 -0.56257 [78,] -0.818864 0.52075 [79,] 0.186855 -0.54059 [80,] -0.042832 -0.59583 [81,] 0.060934 -0.59269 [82,] 0.333175 -0.41354 [83,] -0.209088 -0.52589 [84,] 0.519033 -0.14906 [85,] -0.101363 -0.58173 [86,] 0.294798 -0.45378 [87,] 0.843600 0.58942 [88,] 0.368970 -0.37157 [89,] -0.177002 -0.54658 [90,] 0.880879 0.69676 [91,] 0.769474 0.38976 [92,] 0.450184 -0.26049 [93,] 0.228050 -0.51205 [94,] -0.393619 -0.34018 [95,] 1.032843 1.18235 [96,] -0.373787 -0.36559 [97,] 1.625850 3.81493 [98,] 1.156195 1.63322 [99,] -0.112276 -0.57784 [100,] -0.679488 0.17205 > > # with analytical gradients as attribute > mlGCon <- maxLik( llfGrad, start = startVal, constraints = eqCon ) > all.equal( mlGCon, mlgCon, tolerance = 1e-3 ) [1] TRUE > all.equal( mlGCon[-c(2,3,4,5,6,7,9,11)], mlCon[-c(2,3,4,5,6,7,9,11)], + tolerance = 1e-3 ) [1] TRUE > all.equal( mlGCon[-c(5,6,7,9,11)], mlCon[-c(5,6,7,9,11)], + tolerance = 1e-1 ) [1] TRUE > > # with analytical gradients and Hessians > mlghCon <- maxLik( llf, gf, hf, start = startVal, constraints = eqCon ) > all.equal( mlgCon, mlghCon, tolerance = 1e-3 ) [1] "Component 7: Component 2: Attributes: < Component 2: Attributes: < Length mismatch: comparison on first 1 components > >" > > # with analytical gradients and Hessians as attributes > mlGHCon <- maxLik( llfGradHess, start = startVal, constraints = eqCon ) > all.equal( mlGHCon, mlghCon, tolerance = 1e-3 ) [1] TRUE > all.equal( mlGHCon[-c(2,3,4,5,6,7,9,11)], mlCon[-c(2,3,4,5,6,7,9,11)], + tolerance = 1e-3 ) [1] TRUE > all.equal( mlGHCon[-c(5,6,7,9,11)], mlCon[-c(5,6,7,9,11)], + tolerance = 1e-1 ) [1] TRUE > > > ## BHHH method with equality constraints > mlBhhhCon <- maxLik( llfInd, start = startVal, constraints = eqCon, + method = "BHHH" ) > print( mlBhhhCon ) Maximum Likelihood estimation BHHH maximisation, 8 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -204.5 (2 free parameter(s)) Estimate(s): 0.8199 1.68 > summary( mlBhhhCon ) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 8 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.71 2.5e-06 *** sigma 1.680 0.107 15.69 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 10 outer iterations, barrier value 2.085e-10 -------------------------------------------- > activePar( mlBhhhCon ) mu sigma TRUE TRUE > AIC( mlBhhhCon ) [1] 413.1 > coef( mlBhhhCon ) mu sigma 0.8199 1.6801 > condiNumber( mlBhhhCon ) mu 1 sigma 3.615 > round( hessian( mlBhhhCon ), 2 ) mu sigma mu -35.41 -15.23 sigma -15.23 -93.76 > logLik( mlBhhhCon ) [1] -204.5 > maximType( mlBhhhCon ) [1] "BHHH maximisation" > nIter( mlBhhhCon ) [1] 8 > nParam( mlBhhhCon ) [1] 2 > returnCode( mlBhhhCon ) [1] 2 > returnMessage( mlBhhhCon ) [1] "successive function values within tolerance limit" > round( vcov( mlBhhhCon ), 4 ) mu sigma mu 0.0304 -0.0049 sigma -0.0049 0.0115 > logLik( summary( mlBhhhCon ) ) [1] -204.5 > all.equal( mlCon[ -c( 5, 6, 7, 9, 10 ) ], mlBhhhCon[ -c( 5, 6, 7, 9, 10, 11 ) ], + tolerance = 1e-3 ) [1] TRUE > mlBhhhCon[11] $gradientObs mu sigma [1,] -0.333308 -0.40855 [2,] -0.099285 -0.57864 [3,] 1.168182 1.69756 [4,] 0.113758 -0.57346 [5,] 0.155405 -0.55462 [6,] 1.278964 2.15304 [7,] 0.390371 -0.33917 [8,] -0.832523 0.56928 [9,] -0.422849 -0.29479 [10,] -0.251960 -0.48854 [11,] 0.931091 0.86134 [12,] 0.318738 -0.42451 [13,] 0.347757 -0.39201 [14,] 0.142223 -0.56121 [15,] -0.330025 -0.41221 [16,] 1.329870 2.37617 [17,] 0.416540 -0.30369 [18,] -1.329591 2.37493 [19,] 0.560728 -0.06694 [20,] -0.271182 -0.47164 [21,] -0.692776 0.21115 [22,] -0.090639 -0.58140 [23,] -0.663146 0.14365 [24,] -0.452635 -0.25098 [25,] -0.379053 -0.35380 [26,] -1.131259 1.55492 [27,] 0.657393 0.13089 [28,] 0.172470 -0.54522 [29,] -0.742594 0.33129 [30,] 0.952158 0.92800 [31,] 0.365961 -0.37019 [32,] -0.145263 -0.55975 [33,] 0.698018 0.22340 [34,] 0.685979 0.19541 [35,] 0.645910 0.10574 [36,] 0.551719 -0.08378 [37,] 0.456265 -0.24544 [38,] 0.019936 -0.59453 [39,] -0.152980 -0.55588 [40,] -0.205771 -0.52406 [41,] -0.428414 -0.28683 [42,] -0.083513 -0.58348 [43,] -0.832761 0.56994 [44,] 1.600556 3.70888 [45,] 0.919670 0.82583 [46,] -0.731946 0.30491 [47,] -0.221652 -0.51266 [48,] -0.266834 -0.47557 [49,] 0.616424 0.04321 [50,] 0.004733 -0.59516 [51,] 0.243283 -0.49576 [52,] 0.043575 -0.59201 [53,] 0.033427 -0.59332 [54,] 1.033487 1.19932 [55,] -0.096162 -0.57966 [56,] 1.138255 1.58160 [57,] -1.033525 1.19945 [58,] 0.478014 -0.21130 [59,] 0.151555 -0.55661 [60,] 0.216801 -0.51623 [61,] 0.332785 -0.40913 [62,] -0.292106 -0.45184 [63,] -0.172284 -0.54533 [64,] -0.657882 0.13197 [65,] -0.695587 0.21771 [66,] 0.278858 -0.46455 [67,] 0.381368 -0.35084 [68,] 0.101356 -0.57794 [69,] 0.717249 0.26913 [70,] 1.516333 3.26782 [71,] -0.284105 -0.45959 [72,] -1.572297 3.55824 [73,] 0.776390 0.41754 [74,] -0.438683 -0.27187 [75,] -0.423668 -0.29363 [76,] 0.790442 0.45453 [77,] -0.137967 -0.56322 [78,] -0.801105 0.48304 [79,] 0.192259 -0.53310 [80,] -0.034606 -0.59319 [81,] 0.067885 -0.58746 [82,] 0.336781 -0.40464 [83,] -0.198820 -0.52879 [84,] 0.520357 -0.14027 [85,] -0.092418 -0.58085 [86,] 0.298876 -0.44512 [87,] 0.840937 0.59293 [88,] 0.372137 -0.36253 [89,] -0.167128 -0.54827 [90,] 0.877758 0.69926 [91,] 0.767721 0.39505 [92,] 0.452353 -0.25141 [93,] 0.232948 -0.50403 [94,] -0.381084 -0.35120 [95,] 1.027855 1.17981 [96,] -0.361496 -0.37564 [97,] 1.613577 3.77919 [98,] 1.149691 1.62555 [99,] -0.103198 -0.57731 [100,] -0.663441 0.14431 > nObs( mlBhhhCon ) [1] 100 > > # with analytical gradients > mlgBhhhCon <- maxLik( llf, gfInd, start = startVal, constraints = eqCon, + method = "BHHH" ) > summary( mlgBhhhCon ) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 7 iterations Return code 3: Last step could not find a value above the current. Boundary of parameter space? Consider switching to a more robust optimisation method temporarily. Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.834 0.172 4.84 1.3e-06 *** sigma 1.666 0.105 15.88 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 9 outer iterations, barrier value 1.178e-08 -------------------------------------------- > all.equal( mlBhhhCon[-c(2,3,4,5,6,7,9,11,12)], mlgBhhhCon[-c(2,3,4,5,6,7,9,11,12)], + tolerance = 1e-3 ) [1] TRUE > all.equal( mlBhhhCon[-c(5,6,7,9,12)], mlgBhhhCon[-c(5,6,7,9,12)], + tolerance = 1e-1 ) [1] TRUE > mlgBhhhConInd <- maxLik( llfInd, gfInd, start = startVal, constraints = eqCon, + method = "BHHH" ) > all.equal( mlgBhhhCon, mlgBhhhConInd, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients as attribute > mlGBhhhCon <- maxLik( llfGradInd, start = startVal, constraints = eqCon, + method = "BHHH" ) > summary( mlGBhhhCon ) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 7 iterations Return code 3: Last step could not find a value above the current. Boundary of parameter space? Consider switching to a more robust optimisation method temporarily. Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.834 0.172 4.84 1.3e-06 *** sigma 1.666 0.105 15.88 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 9 outer iterations, barrier value 1.178e-08 -------------------------------------------- > all.equal( mlGBhhhCon, mlgBhhhCon, tolerance = 1e-3 ) [1] TRUE > all.equal( mlGBhhhCon[-c(2,3,4,5,6,7,9,11,12)], mlBhhhCon[-c(2,3,4,5,6,7,9,11,12)], + tolerance = 1e-3 ) [1] TRUE > all.equal( mlGBhhhCon[-c(5,6,7,9,12)], mlBhhhCon[-c(5,6,7,9,12)], + tolerance = 1e-1 ) [1] TRUE > > # with analytical gradients and unused Hessians > mlghBhhhCon <- maxLik( llf, gfInd, hf, start = startVal, constraints = eqCon, + method = "BHHH" ) > all.equal( mlgBhhhCon, mlghBhhhCon, tolerance = 1e-3 ) [1] TRUE > > # with analytical gradients and unused Hessians as attributes > mlGHBhhhCon <- maxLik( llfGradHessInd, start = startVal, constraints = eqCon, + method = "BHHH" ) > all.equal( mlGHBhhhCon, mlghBhhhCon, tolerance = 1e-3 ) [1] TRUE > all.equal( mlGHBhhhCon, mlGBhhhCon, tolerance = 1e-3 ) [1] TRUE > > > ## BFGS method with equality constraints > mlBfgsCon <- maxLik( llf, start = startVal, constraints = eqCon, + method = "BFGS" ) > print( mlBfgsCon ) Maximum Likelihood estimation BFGS maximisation, 31 iterations Return code 0: successful convergence Log-Likelihood: -204.5 (2 free parameter(s)) Estimate(s): 0.8198 1.68 > summary( mlBfgsCon ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 31 iterations Return code 0: successful convergence Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.71 2.5e-06 *** sigma 1.680 0.107 15.68 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 10 outer iterations, barrier value 2.07e-10 -------------------------------------------- > activePar( mlBfgsCon ) mu sigma TRUE TRUE > AIC( mlBfgsCon ) [1] 413.1 > coef( mlBfgsCon ) mu sigma 0.8198 1.6803 > condiNumber( mlBfgsCon ) mu 1 sigma 3.609 > round( hessian( mlBfgsCon ), 2 ) mu sigma mu -35.41 -15.21 sigma -15.21 -93.65 > logLik( mlBfgsCon ) [1] -204.5 > maximType( mlBfgsCon ) [1] "BFGS maximisation" > nIter( mlBfgsCon ) function 31 > nParam( mlBfgsCon ) [1] 2 > returnCode( mlBfgsCon ) [1] 0 > returnMessage( mlBfgsCon ) [1] "successful convergence " > round( vcov( mlBfgsCon ), 4 ) mu sigma mu 0.0304 -0.0049 sigma -0.0049 0.0115 > logLik( summary( mlBfgsCon ) ) [1] -204.5 > all.equal( mlBfgsCon[ -c( 4, 5, 6, 9, 10 ) ], mlCon[ -c( 4, 5, 6, 9, 10 ) ], + tolerance = 1e-3 ) [1] TRUE > mlBfgsConInd <- maxLik( llfInd, start = startVal, constraints = eqCon, + method = "BFGS" ) > summary( mlBfgsConInd ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 31 iterations Return code 0: successful convergence Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.7 2.5e-06 *** sigma 1.680 0.107 15.7 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 10 outer iterations, barrier value 2.07e-10 -------------------------------------------- > all.equal( mlBfgsCon[-4], mlBfgsConInd[-c(4,12)], tolerance = 1e-3 ) [1] TRUE > mlBfgsConInd[12] $gradientObs mu sigma [1,] -0.333193 -0.40861 [2,] -0.099212 -0.57861 [3,] 1.168023 1.69720 [4,] 0.113792 -0.57339 [5,] 0.155431 -0.55455 [6,] 1.278785 2.15258 [7,] 0.390354 -0.33911 [8,] -0.832318 0.56886 [9,] -0.422718 -0.29490 [10,] -0.251860 -0.48856 [11,] 0.930976 0.86117 [12,] 0.318734 -0.42444 [13,] 0.347748 -0.39195 [14,] 0.142251 -0.56114 [15,] -0.329910 -0.41226 [16,] 1.329682 2.37565 [17,] 0.416518 -0.30364 [18,] -1.329295 2.37393 [19,] 0.560680 -0.06693 [20,] -0.271079 -0.47167 [21,] -0.692596 0.21086 [22,] -0.090568 -0.58136 [23,] -0.662971 0.14338 [24,] -0.452498 -0.25110 [25,] -0.378930 -0.35388 [26,] -1.130999 1.55418 [27,] 0.657327 0.13086 [28,] 0.172493 -0.54515 [29,] -0.742405 0.33096 [30,] 0.952039 0.92781 [31,] 0.365949 -0.37013 [32,] -0.145183 -0.55973 [33,] 0.697945 0.22336 [34,] 0.685908 0.19537 [35,] 0.645847 0.10572 [36,] 0.551672 -0.08377 [37,] 0.456236 -0.24540 [38,] 0.019986 -0.59447 [39,] -0.152898 -0.55586 [40,] -0.205679 -0.52406 [41,] -0.428282 -0.28694 [42,] -0.083443 -0.58345 [43,] -0.832555 0.56953 [44,] 1.600319 3.70805 [45,] 0.919557 0.82566 [46,] -0.731759 0.30459 [47,] -0.221557 -0.51266 [48,] -0.266732 -0.47560 [49,] 0.616366 0.04320 [50,] 0.004786 -0.59511 [51,] 0.243293 -0.49569 [52,] 0.043622 -0.59195 [53,] 0.033475 -0.59326 [54,] 1.033353 1.19907 [55,] -0.096091 -0.57963 [56,] 1.138102 1.58126 [57,] -1.033283 1.19883 [58,] 0.477981 -0.21126 [59,] 0.151582 -0.55654 [60,] 0.216816 -0.51616 [61,] 0.332778 -0.40907 [62,] -0.291999 -0.45188 [63,] -0.172198 -0.54532 [64,] -0.657709 0.13171 [65,] -0.695406 0.21741 [66,] 0.278862 -0.46448 [67,] 0.381353 -0.35078 [68,] 0.101392 -0.57787 [69,] 0.717173 0.26908 [70,] 1.516111 3.26710 [71,] -0.283999 -0.45962 [72,] -1.571957 3.55687 [73,] 0.776303 0.41746 [74,] -0.438549 -0.27199 [75,] -0.423537 -0.29373 [76,] 0.790352 0.45444 [77,] -0.137887 -0.56320 [78,] -0.800905 0.48266 [79,] 0.192278 -0.53302 [80,] -0.034546 -0.59314 [81,] 0.067927 -0.58739 [82,] 0.336774 -0.40457 [83,] -0.198729 -0.52879 [84,] 0.520316 -0.14025 [85,] -0.092347 -0.58082 [86,] 0.298876 -0.44505 [87,] 0.840838 0.59282 [88,] 0.372124 -0.36247 [89,] -0.167044 -0.54826 [90,] 0.877652 0.69912 [91,] 0.767636 0.39498 [92,] 0.452325 -0.25137 [93,] 0.232960 -0.50396 [94,] -0.380961 -0.35129 [95,] 1.027722 1.17957 [96,] -0.361376 -0.37571 [97,] 1.613337 3.77834 [98,] 1.149536 1.62521 [99,] -0.103125 -0.57728 [100,] -0.663266 0.14404 > nObs( mlBfgsConInd ) [1] 100 > > # with analytical gradients > mlgBfgsCon <- maxLik( llf, gf, start = startVal, constraints = eqCon, + method = "BFGS" ) > summary( mlgBfgsCon ) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 30 iterations Return code 0: successful convergence Log-Likelihood: -204.9 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.814 0.173 4.7 2.7e-06 *** sigma 1.670 0.106 15.8 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 2 successive function values within tolerance limit 7 outer iterations, barrier value 0.0002481 -------------------------------------------- > all.equal( mlBfgsCon[-c(3,4,9,11)], mlgBfgsCon[-c(3,4,9,11)], tolerance = 1e-2 ) [1] TRUE > mlgBfgsConInd <- maxLik( llfInd, gfInd, start = startVal, constraints = eqCon, + method = "BFGS" ) > all.equal( mlgBfgsCon[], mlgBfgsConInd[-12], tolerance = 1e-3 ) [1] TRUE > mlgBfgsConInd[12] $gradientObs mu sigma [1,] -0.335092 -0.41114 [2,] -0.098314 -0.58255 [3,] 1.184065 1.74311 [4,] 0.117235 -0.57573 [5,] 0.159372 -0.55627 [6,] 1.296151 2.20745 [7,] 0.397103 -0.33530 [8,] -0.840181 0.58039 [9,] -0.425686 -0.29601 [10,] -0.252786 -0.49196 [11,] 0.944185 0.89037 [12,] 0.324627 -0.42267 [13,] 0.353987 -0.38939 [14,] 0.146035 -0.56307 [15,] -0.331769 -0.41484 [16,] 1.347656 2.43490 [17,] 0.423579 -0.29900 [18,] -1.343098 2.41441 [19,] 0.569464 -0.05702 [20,] -0.272234 -0.47490 [21,] -0.698789 0.21693 [22,] -0.089567 -0.58529 [23,] -0.668810 0.14845 [24,] -0.455822 -0.25164 [25,] -0.381375 -0.35575 [26,] -1.142431 1.58132 [27,] 0.667266 0.14501 [28,] 0.176638 -0.54657 [29,] -0.749194 0.33884 [30,] 0.965500 0.95836 [31,] 0.372406 -0.36704 [32,] -0.144834 -0.56365 [33,] 0.708370 0.23945 [34,] 0.696189 0.21088 [35,] 0.655649 0.11934 [36,] 0.560349 -0.07423 [37,] 0.463772 -0.23943 [38,] 0.022309 -0.59786 [39,] -0.152642 -0.55977 [40,] -0.206054 -0.52777 [41,] -0.431317 -0.28795 [42,] -0.082357 -0.58736 [43,] -0.840421 0.58106 [44,] 1.621527 3.79315 [45,] 0.932630 0.85415 [46,] -0.738420 0.31207 [47,] -0.222121 -0.51628 [48,] -0.267836 -0.47887 [49,] 0.625816 0.05548 [50,] 0.006927 -0.59861 [51,] 0.248284 -0.49572 [52,] 0.046227 -0.59512 [53,] 0.035959 -0.59653 [54,] 1.047786 1.23507 [55,] -0.095155 -0.58357 [56,] 1.153787 1.62487 [57,] -1.043548 1.22027 [58,] 0.485777 -0.20453 [59,] 0.155477 -0.55831 [60,] 0.221490 -0.51675 [61,] 0.338839 -0.40692 [62,] -0.293405 -0.45490 [63,] -0.172172 -0.54918 [64,] -0.663485 0.13660 [65,] -0.701633 0.22359 [66,] 0.284278 -0.46371 [67,] 0.387994 -0.34724 [68,] 0.104687 -0.58038 [69,] 0.727827 0.28613 [70,] 1.536313 3.34368 [71,] -0.285310 -0.46272 [72,] -1.588659 3.61691 [73,] 0.787664 0.43760 [74,] -0.441707 -0.27280 [75,] -0.426515 -0.29484 [76,] 0.801881 0.47534 [77,] -0.137452 -0.56713 [78,] -0.808392 0.49286 [79,] 0.196660 -0.53409 [80,] -0.032875 -0.59688 [81,] 0.070823 -0.59031 [82,] 0.342883 -0.40231 [83,] -0.199021 -0.53253 [84,] 0.528618 -0.13194 [85,] -0.091367 -0.58475 [86,] 0.304532 -0.44379 [87,] 0.852970 0.61656 [88,] 0.378655 -0.35920 [89,] -0.166957 -0.55213 [90,] 0.890224 0.72503 [91,] 0.778893 0.41465 [92,] 0.459814 -0.24554 [93,] 0.237828 -0.50421 [94,] -0.383430 -0.35312 [95,] 1.042087 1.21518 [96,] -0.363611 -0.37785 [97,] 1.634701 3.86480 [98,] 1.165357 1.66969 [99,] -0.102273 -0.58122 [100,] -0.669109 0.14912 > > # with analytical gradients and unused Hessians > mlghBfgsCon <- maxLik( llf, gf, hf, start = startVal, constraints = eqCon, + method = "BFGS" ) > all.equal( mlgBfgsCon, mlghBfgsCon, tolerance = 1e-3 ) [1] TRUE > > ## NM method with equality constraints > mlNmCon <- maxLik( llf, start = startVal, constraints = eqCon, method = "NM", SUMTTol=0) > print( mlNmCon ) Maximum Likelihood estimation Nelder-Mead maximisation, 57 iterations Return code 0: successful convergence Log-Likelihood: -204.5 (2 free parameter(s)) Estimate(s): 0.8197 1.68 > summary( mlNmCon ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 57 iterations Return code 0: successful convergence Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.7 2.5e-06 *** sigma 1.680 0.107 15.7 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 10 outer iterations, barrier value 2.418e-10 -------------------------------------------- > activePar( mlNmCon ) mu sigma TRUE TRUE > AIC( mlNmCon ) [1] 413.1 > coef( mlNmCon ) mu sigma 0.8197 1.6803 > condiNumber( mlNmCon ) mu 1 sigma 3.609 > round( hessian( mlNmCon ), 2 ) mu sigma mu -35.41 -15.21 sigma -15.21 -93.65 > logLik( mlNmCon ) [1] -204.5 > maximType( mlNmCon ) [1] "Nelder-Mead maximisation" > nIter( mlNmCon ) function 57 > nParam( mlNmCon ) [1] 2 > returnCode( mlNmCon ) [1] 0 > returnMessage( mlNmCon ) [1] "successful convergence " > round( vcov( mlNmCon ), 4 ) mu sigma mu 0.0304 -0.0049 sigma -0.0049 0.0115 > logLik( summary( mlNmCon ) ) [1] -204.5 > all.equal( mlNmCon[ -c( 4, 5, 6, 9, 10 ) ], mlCon[ -c( 4, 5, 6, 9, 10 ) ], + tolerance = 1e-3 ) [1] TRUE > mlNmConInd <- maxLik( llfInd, start = startVal, constraints = eqCon, + method = "NM", SUMTTol=0) > summary( mlNmConInd ) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 57 iterations Return code 0: successful convergence Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.820 0.174 4.7 2.5e-06 *** sigma 1.680 0.107 15.7 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 10 outer iterations, barrier value 2.418e-10 -------------------------------------------- > all.equal( mlNmCon[], mlNmConInd[-12], tolerance = 1e-3 ) [1] TRUE > mlNmConInd[12] $gradientObs mu sigma [1,] -0.333153 -0.40862 [2,] -0.099187 -0.57859 [3,] 1.167967 1.69707 [4,] 0.113803 -0.57336 [5,] 0.155439 -0.55453 [6,] 1.278722 2.15241 [7,] 0.390348 -0.33909 [8,] -0.832245 0.56872 [9,] -0.422672 -0.29493 [10,] -0.251825 -0.48857 [11,] 0.930935 0.86110 [12,] 0.318732 -0.42442 [13,] 0.347744 -0.39193 [14,] 0.142261 -0.56112 [15,] -0.329870 -0.41228 [16,] 1.329615 2.37547 [17,] 0.416510 -0.30362 [18,] -1.329191 2.37357 [19,] 0.560663 -0.06693 [20,] -0.271042 -0.47168 [21,] -0.692532 0.21076 [22,] -0.090543 -0.58135 [23,] -0.662910 0.14329 [24,] -0.452450 -0.25115 [25,] -0.378887 -0.35391 [26,] -1.130907 1.55392 [27,] 0.657303 0.13085 [28,] 0.172500 -0.54513 [29,] -0.742338 0.33084 [30,] 0.951996 0.92774 [31,] 0.365944 -0.37011 [32,] -0.145155 -0.55972 [33,] 0.697919 0.22334 [34,] 0.685883 0.19536 [35,] 0.645824 0.10572 [36,] 0.551656 -0.08376 [37,] 0.456225 -0.24538 [38,] 0.020004 -0.59445 [39,] -0.152869 -0.55586 [40,] -0.205647 -0.52406 [41,] -0.428236 -0.28698 [42,] -0.083419 -0.58343 [43,] -0.832482 0.56938 [44,] 1.600234 3.70775 [45,] 0.919516 0.82560 [46,] -0.731693 0.30447 [47,] -0.221524 -0.51267 [48,] -0.266696 -0.47561 [49,] 0.616345 0.04320 [50,] 0.004804 -0.59509 [51,] 0.243296 -0.49566 [52,] 0.043638 -0.59193 [53,] 0.033491 -0.59324 [54,] 1.033305 1.19898 [55,] -0.096066 -0.57962 [56,] 1.138048 1.58114 [57,] -1.033197 1.19861 [58,] 0.477969 -0.21125 [59,] 0.151591 -0.55651 [60,] 0.216820 -0.51613 [61,] 0.332776 -0.40905 [62,] -0.291961 -0.45189 [63,] -0.172168 -0.54532 [64,] -0.657647 0.13161 [65,] -0.695343 0.21731 [66,] 0.278863 -0.46446 [67,] 0.381347 -0.35076 [68,] 0.101404 -0.57785 [69,] 0.717145 0.26906 [70,] 1.516032 3.26684 [71,] -0.283962 -0.45963 [72,] -1.571837 3.55639 [73,] 0.776272 0.41743 [74,] -0.438502 -0.27203 [75,] -0.423491 -0.29377 [76,] 0.790320 0.45441 [77,] -0.137860 -0.56319 [78,] -0.800834 0.48252 [79,] 0.192285 -0.53300 [80,] -0.034525 -0.59312 [81,] 0.067942 -0.58737 [82,] 0.336771 -0.40455 [83,] -0.198698 -0.52878 [84,] 0.520302 -0.14024 [85,] -0.092323 -0.58080 [86,] 0.298876 -0.44503 [87,] 0.840802 0.59277 [88,] 0.372119 -0.36245 [89,] -0.167014 -0.54825 [90,] 0.877614 0.69907 [91,] 0.767605 0.39495 [92,] 0.452315 -0.25135 [93,] 0.232964 -0.50393 [94,] -0.380917 -0.35131 [95,] 1.027674 1.17948 [96,] -0.361334 -0.37574 [97,] 1.613252 3.77804 [98,] 1.149480 1.62509 [99,] -0.103099 -0.57726 [100,] -0.663205 0.14395 > nObs( mlNmConInd ) [1] 100 > > # with unused analytical gradients > mlgNmCon <- maxLik( llf, gf, start = startVal, constraints = eqCon, + method = "NM", SUMTTol=0) > all.equal( mlNmCon, mlgNmCon, tolerance = 1e-3 ) [1] TRUE > mlgNmConInd <- maxLik( llfInd, gfInd, start = startVal, constraints = eqCon, + method = "NM", SUMTTol=0) > all.equal( mlgNmCon[], mlgNmConInd[-12], tolerance = 1e-3 ) [1] TRUE > mlgNmConInd[12] $gradientObs mu sigma [1,] -0.333153 -0.40862 [2,] -0.099187 -0.57859 [3,] 1.167967 1.69707 [4,] 0.113803 -0.57336 [5,] 0.155439 -0.55453 [6,] 1.278722 2.15241 [7,] 0.390348 -0.33909 [8,] -0.832245 0.56872 [9,] -0.422672 -0.29493 [10,] -0.251825 -0.48857 [11,] 0.930935 0.86110 [12,] 0.318732 -0.42442 [13,] 0.347744 -0.39193 [14,] 0.142261 -0.56112 [15,] -0.329870 -0.41228 [16,] 1.329615 2.37547 [17,] 0.416510 -0.30362 [18,] -1.329191 2.37357 [19,] 0.560663 -0.06693 [20,] -0.271042 -0.47168 [21,] -0.692532 0.21076 [22,] -0.090543 -0.58135 [23,] -0.662910 0.14329 [24,] -0.452450 -0.25115 [25,] -0.378887 -0.35391 [26,] -1.130907 1.55392 [27,] 0.657303 0.13085 [28,] 0.172500 -0.54513 [29,] -0.742338 0.33084 [30,] 0.951996 0.92774 [31,] 0.365944 -0.37011 [32,] -0.145155 -0.55972 [33,] 0.697919 0.22334 [34,] 0.685883 0.19536 [35,] 0.645824 0.10572 [36,] 0.551656 -0.08376 [37,] 0.456225 -0.24538 [38,] 0.020004 -0.59445 [39,] -0.152869 -0.55586 [40,] -0.205647 -0.52406 [41,] -0.428236 -0.28698 [42,] -0.083419 -0.58343 [43,] -0.832482 0.56938 [44,] 1.600234 3.70775 [45,] 0.919516 0.82560 [46,] -0.731693 0.30447 [47,] -0.221524 -0.51267 [48,] -0.266696 -0.47561 [49,] 0.616345 0.04320 [50,] 0.004804 -0.59509 [51,] 0.243296 -0.49566 [52,] 0.043638 -0.59193 [53,] 0.033491 -0.59324 [54,] 1.033305 1.19898 [55,] -0.096066 -0.57962 [56,] 1.138048 1.58114 [57,] -1.033197 1.19861 [58,] 0.477969 -0.21125 [59,] 0.151591 -0.55651 [60,] 0.216820 -0.51613 [61,] 0.332776 -0.40905 [62,] -0.291961 -0.45189 [63,] -0.172168 -0.54532 [64,] -0.657647 0.13161 [65,] -0.695343 0.21731 [66,] 0.278863 -0.46446 [67,] 0.381347 -0.35076 [68,] 0.101404 -0.57785 [69,] 0.717145 0.26906 [70,] 1.516032 3.26684 [71,] -0.283962 -0.45963 [72,] -1.571837 3.55639 [73,] 0.776272 0.41743 [74,] -0.438502 -0.27203 [75,] -0.423491 -0.29377 [76,] 0.790320 0.45441 [77,] -0.137860 -0.56319 [78,] -0.800834 0.48252 [79,] 0.192285 -0.53300 [80,] -0.034525 -0.59312 [81,] 0.067942 -0.58737 [82,] 0.336771 -0.40455 [83,] -0.198698 -0.52878 [84,] 0.520302 -0.14024 [85,] -0.092323 -0.58080 [86,] 0.298876 -0.44503 [87,] 0.840802 0.59277 [88,] 0.372119 -0.36245 [89,] -0.167014 -0.54825 [90,] 0.877614 0.69907 [91,] 0.767605 0.39495 [92,] 0.452315 -0.25135 [93,] 0.232964 -0.50393 [94,] -0.380917 -0.35131 [95,] 1.027674 1.17948 [96,] -0.361334 -0.37574 [97,] 1.613252 3.77804 [98,] 1.149480 1.62509 [99,] -0.103099 -0.57726 [100,] -0.663205 0.14395 > > # with unused analytical gradients and Hessians > mlghNmCon <- maxLik( llf, gf, hf, start = startVal, constraints = eqCon, + method = "NM", SUMTTol=0) > all.equal( mlgNmCon, mlghNmCon, tolerance = 1e-3 ) [1] TRUE > > ## SANN method with equality constraints > mlSannCon <- maxLik( llf, start = startVal, constraints = eqCon, + method = "SANN", SUMTTol=0) > print( mlSannCon ) Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -204.5 (2 free parameter(s)) Estimate(s): 0.816 1.684 > summary( mlSannCon ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -204.5 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 0.816 0.175 4.67 3e-06 *** sigma 1.684 0.108 15.63 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 9 outer iterations, barrier value 1.234e-09 -------------------------------------------- > activePar( mlSannCon ) mu sigma TRUE TRUE > AIC( mlSannCon ) [1] 413.1 > coef( mlSannCon ) mu sigma 0.816 1.684 > condiNumber( mlSannCon ) mu 1 sigma 3.616 > round( hessian( mlSannCon ), 2 ) mu sigma mu -35.27 -15.29 sigma -15.29 -92.80 > logLik( mlSannCon ) [1] -204.5 > maximType( mlSannCon ) [1] "SANN maximisation" > nIter( mlSannCon ) function 10000 > nParam( mlSannCon ) [1] 2 > returnCode( mlSannCon ) [1] 0 > returnMessage( mlSannCon ) [1] "successful convergence " > round( vcov( mlSannCon ), 4 ) mu sigma mu 0.0305 -0.0050 sigma -0.0050 0.0116 > logLik( summary( mlSannCon ) ) [1] -204.5 > all.equal( mlSannCon[ -c(2,3,4,5,6,9,10,11) ], mlBfgsCon[ -c(2,3,4,5,6,9,10,11) ], + tolerance = 1e-3 ) [1] TRUE > all.equal( mlSannCon[ -c(3,4,5,6,9,10,11) ], mlBfgsCon[ -c(3,4,5,6,9,10,11) ], + tolerance = 1e-2 ) [1] TRUE > > # with unused analytical gradients > mlgSannCon <- maxLik( llf, gf, start = startVal, constraints = eqCon, + method = "SANN", SUMTTol=0) > all.equal( mlSannCon, mlgSannCon, tolerance = 1e-3 ) [1] TRUE > > # with a user-specified function to generate a new candidate point > mlSannConCand <- maxLik( llf, start = startVal, constraints = eqCon, + method = "SANN", cand = function(x)c(rnorm(1,x[1]),rnorm(1,x[2])) ) Warning message: In (function (fn, grad = NULL, hess = NULL, start, maxRoutine, constraints, : problem in imposing equality constraints: the constraints are not satisfied (barrier value = 0.254780368286163). Try setting 'SUMTTol' to 0 > summary( mlSannConCand ) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -201.6 2 free parameters Estimates: Estimate Std. error t value Pr(> t) mu 1.183 0.182 6.5 8.2e-11 *** sigma 1.822 0.129 14.1 < 2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 2 successive function values within tolerance limit 2 outer iterations, barrier value 0.2548 -------------------------------------------- > all.equal( mlSannConCand[-c(1,2,3,4,11)], mlSannCon[-c(1,2,3,4,11)], + tolerance = 1e-3 ) [1] TRUE > all.equal( mlSannConCand[-c(2,3,4,11)], mlSannCon[-c(2,3,4,11)], + tolerance = 1e-1 ) [1] TRUE > > > ## test for method "estfun" > library( sandwich ) > try( estfun( ml ) ) Error in estfun.maxLik(ml) : cannot return the gradients of the log-likelihood function evaluated at each observation: please re-run 'maxLik' and provide a gradient function using argument 'grad' or (if no gradient function is specified) a log-likelihood function using argument 'logLik' that return the gradients or log-likelihood values, respectively, at each observation > estfun( mlInd )[ 1:5, ] mu sigma [1,] -0.39452 -0.2678 [2,] -0.19432 -0.4819 [3,] 0.88999 0.8883 [4,] -0.01206 -0.5503 [5,] 0.02357 -0.5495 > estfun( mlgInd )[ 1:5, ] mu sigma [1,] -0.39452 -0.2678 [2,] -0.19432 -0.4819 [3,] 0.88999 0.8883 [4,] -0.01206 -0.5503 [5,] 0.02357 -0.5495 > estfun( mlBHHH )[ 1:5, ] mu sigma [1,] -0.39452 -0.2678 [2,] -0.19432 -0.4819 [3,] 0.88999 0.8883 [4,] -0.01206 -0.5503 [5,] 0.02357 -0.5495 > estfun( mlgBHHH )[ 1:5, ] mu sigma [1,] -0.39452 -0.2678 [2,] -0.19432 -0.4819 [3,] 0.88999 0.8883 [4,] -0.01206 -0.5503 [5,] 0.02357 -0.5495 > estfun( mlIndBFGS )[ 1:5, ] mu sigma [1,] -0.39452 -0.2678 [2,] -0.19432 -0.4819 [3,] 0.88999 0.8883 [4,] -0.01206 -0.5503 [5,] 0.02357 -0.5495 > estfun( mlgIndBFGS )[ 1:5, ] mu sigma [1,] -0.39452 -0.2678 [2,] -0.19432 -0.4819 [3,] 0.88999 0.8883 [4,] -0.01206 -0.5503 [5,] 0.02357 -0.5495 > estfun( mlIndNM )[ 1:5, ] mu sigma [1,] -0.39439 -0.2679 [2,] -0.19422 -0.4819 [3,] 0.88990 0.8882 [4,] -0.01200 -0.5502 [5,] 0.02363 -0.5495 > estfun( mlgIndNM )[ 1:5, ] mu sigma [1,] -0.39439 -0.2679 [2,] -0.19422 -0.4819 [3,] 0.88990 0.8882 [4,] -0.01200 -0.5502 [5,] 0.02363 -0.5495 > estfun( mlIndSANN )[ 1:5, ] mu sigma [1,] -0.39480 -0.2674 [2,] -0.19460 -0.4817 [3,] 0.88966 0.8873 [4,] -0.01235 -0.5502 [5,] 0.02327 -0.5495 > estfun( mlgIndSANN )[ 1:5, ] mu sigma [1,] -0.39480 -0.2674 [2,] -0.19460 -0.4817 [3,] 0.88966 0.8873 [4,] -0.01235 -0.5502 [5,] 0.02327 -0.5495 > estfun( mlIndFix )[ 1:5, ] mu sigma [1,] NA -0.3412 [2,] NA -0.5130 [3,] NA 1.0498 [4,] NA -0.5445 [5,] NA -0.5368 > estfun( mlgIndFix )[ 1:5, ] mu sigma [1,] NA -0.3412 [2,] NA -0.5130 [3,] NA 1.0498 [4,] NA -0.5445 [5,] NA -0.5368 > estfun( mlFixBHHH )[ 1:5, ] mu sigma [1,] NA -0.3412 [2,] NA -0.5130 [3,] NA 1.0498 [4,] NA -0.5445 [5,] NA -0.5368 > estfun( mlgFixBHHH )[ 1:5, ] mu sigma [1,] NA -0.3412 [2,] NA -0.5130 [3,] NA 1.0498 [4,] NA -0.5445 [5,] NA -0.5368 > estfun( mlIndFixBfgs )[ 1:5, ] mu sigma [1,] -0.33639 -0.3412 [2,] -0.13815 -0.5130 [3,] 0.93552 1.0498 [4,] 0.04232 -0.5445 [5,] 0.07760 -0.5368 > estfun( mlgIndFixBfgs )[ 1:5, ] mu sigma [1,] -0.33639 -0.3412 [2,] -0.13815 -0.5130 [3,] 0.93552 1.0498 [4,] 0.04232 -0.5445 [5,] 0.07760 -0.5368 > estfun( mlIndFixNm )[ 1:5, ] mu sigma [1,] -0.33641 -0.3412 [2,] -0.13816 -0.5130 [3,] 0.93558 1.0500 [4,] 0.04232 -0.5446 [5,] 0.07760 -0.5368 > estfun( mlgIndFixNm )[ 1:5, ] mu sigma [1,] -0.33641 -0.3412 [2,] -0.13816 -0.5130 [3,] 0.93558 1.0500 [4,] 0.04232 -0.5446 [5,] 0.07760 -0.5368 > estfun( mlIndFixSann )[ 1:5, ] mu sigma [1,] -0.33640 -0.3412 [2,] -0.13815 -0.5130 [3,] 0.93553 1.0499 [4,] 0.04232 -0.5445 [5,] 0.07760 -0.5368 > estfun( mlgIndFixSann )[ 1:5, ] mu sigma [1,] -0.33640 -0.3412 [2,] -0.13815 -0.5130 [3,] 0.93553 1.0499 [4,] 0.04232 -0.5445 [5,] 0.07760 -0.5368 > estfun( mlBfgsInEqInd )[ 1:5, ] mu sigma [1,] -0.3332 -0.4086 [2,] -0.0992 -0.5786 [3,] 1.1680 1.6972 [4,] 0.1138 -0.5734 [5,] 0.1554 -0.5545 > estfun( mlgBfgsInEqInd )[ 1:5, ] mu sigma [1,] -0.3332 -0.4086 [2,] -0.0992 -0.5786 [3,] 1.1680 1.6972 [4,] 0.1138 -0.5734 [5,] 0.1554 -0.5545 > estfun( mlNmInEqInd )[ 1:5, ] mu sigma [1,] -0.3332 -0.4086 [2,] -0.0992 -0.5786 [3,] 1.1680 1.6972 [4,] 0.1138 -0.5734 [5,] 0.1554 -0.5545 > estfun( mlConInd )[ 1:5, ] mu sigma [1,] -0.33319 -0.4086 [2,] -0.09921 -0.5786 [3,] 1.16802 1.6972 [4,] 0.11379 -0.5734 [5,] 0.15543 -0.5546 > estfun( mlgConInd )[ 1:5, ] mu sigma [1,] -0.3452 -0.3999 [2,] -0.1083 -0.5793 [3,] 1.1749 1.7061 [4,] 0.1074 -0.5796 [5,] 0.1495 -0.5615 > estfun( mlBhhhCon )[ 1:5, ] mu sigma [1,] -0.33331 -0.4085 [2,] -0.09928 -0.5786 [3,] 1.16818 1.6976 [4,] 0.11376 -0.5735 [5,] 0.15540 -0.5546 > estfun( mlgBhhhCon )[ 1:5, ] mu sigma [1,] -0.3440 -0.4030 [2,] -0.1060 -0.5815 [3,] 1.1828 1.7307 [4,] 0.1106 -0.5798 [5,] 0.1530 -0.5612 > estfun( mlBfgsConInd )[ 1:5, ] mu sigma [1,] -0.33319 -0.4086 [2,] -0.09921 -0.5786 [3,] 1.16802 1.6972 [4,] 0.11379 -0.5734 [5,] 0.15543 -0.5546 > estfun( mlgBfgsConInd )[ 1:5, ] mu sigma [1,] -0.33509 -0.4111 [2,] -0.09831 -0.5825 [3,] 1.18407 1.7431 [4,] 0.11724 -0.5757 [5,] 0.15937 -0.5563 > estfun( mlNmConInd )[ 1:5, ] mu sigma [1,] -0.33315 -0.4086 [2,] -0.09919 -0.5786 [3,] 1.16797 1.6971 [4,] 0.11380 -0.5734 [5,] 0.15544 -0.5545 > estfun( mlgNmConInd )[ 1:5, ] mu sigma [1,] -0.33315 -0.4086 [2,] -0.09919 -0.5786 [3,] 1.16797 1.6971 [4,] 0.11380 -0.5734 [5,] 0.15544 -0.5545 > > > ## test for method "bread" > try( bread( ml ) ) Error in nObs.maxLik(x) : cannot return the number of observations: please re-run 'maxLik' and provide a gradient function using argument 'grad' or (if no gradient function is specified) a log-likelihood function using argument 'logLik' that return the gradients or log-likelihood values, respectively, at each observation > round( bread( mlInd ), 3 ) mu sigma mu 3.301 0.000 sigma 0.000 1.649 > round( bread( mlgInd ), 3 ) mu sigma mu 3.3 0.00 sigma 0.0 1.65 > round( bread( mlBHHH ), 3 ) mu sigma mu 3.306 -0.109 sigma -0.109 1.798 > round( bread( mlgBHHH ), 3 ) mu sigma mu 3.306 -0.109 sigma -0.109 1.798 > round( bread( mlIndBFGS ), 3 ) mu sigma mu 3.301 0.000 sigma 0.000 1.649 > round( bread( mlgIndBFGS ), 3 ) mu sigma mu 3.3 0.00 sigma 0.0 1.65 > round( bread( mlIndNM ), 3 ) mu sigma mu 3.298 -0.002 sigma -0.002 1.650 > round( bread( mlgIndNM ), 3 ) mu sigma mu 3.3 0.000 sigma 0.0 1.651 > round( bread( mlIndSANN ), 3 ) mu sigma mu 3.298 0.000 sigma 0.000 1.649 > round( bread( mlgIndSANN ), 3 ) mu sigma mu 3.300 0.002 sigma 0.002 1.650 > round( bread( mlIndFix ), 3 ) mu sigma mu 0 0.000 sigma 0 1.667 > round( bread( mlgIndFix ), 3 ) mu sigma mu 0 0.000 sigma 0 1.666 > round( bread( mlFixBHHH ), 3 ) mu sigma mu 0 0.000 sigma 0 1.786 > round( bread( mlgFixBHHH ), 3 ) mu sigma mu 0 0.000 sigma 0 1.786 > round( bread( mlIndFixBfgs ), 3 ) mu sigma mu 0 0.000 sigma 0 1.667 > round( bread( mlgIndFixBfgs ), 3 ) mu sigma mu 0 0.000 sigma 0 1.666 > round( bread( mlIndFixNm ), 3 ) mu sigma mu 0 0.000 sigma 0 1.665 > round( bread( mlgIndFixNm ), 3 ) mu sigma mu 0 0.000 sigma 0 1.666 > round( bread( mlIndFixSann ), 3 ) mu sigma mu 0 0.000 sigma 0 1.667 > round( bread( mlgIndFixSann ), 3 ) mu sigma mu 0 0.000 sigma 0 1.666 > round( bread( mlBfgsInEqInd ), 3 ) mu sigma mu 3.035 -0.493 sigma -0.493 1.147 > round( bread( mlgBfgsInEqInd ), 3 ) mu sigma mu 3.035 -0.493 sigma -0.493 1.148 > round( bread( mlNmInEqInd ), 3 ) mu sigma mu 3.034 -0.494 sigma -0.494 1.147 > round( bread( mlConInd ), 3 ) mu sigma mu 3.033 -0.493 sigma -0.493 1.147 > round( bread( mlgConInd ), 3 ) mu sigma mu 2.971 -0.451 sigma -0.451 1.111 > round( bread( mlBhhhCon ), 3 ) mu sigma mu 3.036 -0.493 sigma -0.493 1.147 > round( bread( mlgBhhhCon ), 3 ) mu sigma mu 2.967 -0.458 sigma -0.458 1.101 > round( bread( mlBfgsConInd ), 3 ) mu sigma mu 3.036 -0.494 sigma -0.494 1.148 > round( bread( mlgBfgsConInd ), 3 ) mu sigma mu 3.005 -0.490 sigma -0.490 1.116 > round( bread( mlNmConInd ), 3 ) mu sigma mu 3.036 -0.494 sigma -0.494 1.147 > round( bread( mlgNmConInd ), 3 ) mu sigma mu 3.036 -0.493 sigma -0.493 1.148 > > > ## test for method "sandwich" > try( sandwich( ml ) ) Error in nObs.maxLik(x) : cannot return the number of observations: please re-run 'maxLik' and provide a gradient function using argument 'grad' or (if no gradient function is specified) a log-likelihood function using argument 'logLik' that return the gradients or log-likelihood values, respectively, at each observation > printSandwich <- function( x ) { + print( round( sandwich( x ), 3 ) ) + tmp <- all.equal( sandwich( x ), vcov( x ) ) + if( isTRUE( tmp ) ) { + print( tmp ) + } + } > printSandwich( mlInd ) mu sigma mu 0.033 0.001 sigma 0.001 0.015 > printSandwich( mlgInd ) mu sigma mu 0.033 0.001 sigma 0.001 0.015 > printSandwich( mlBHHH ) mu sigma mu 0.033 -0.001 sigma -0.001 0.018 [1] TRUE > printSandwich( mlgBHHH ) mu sigma mu 0.033 -0.001 sigma -0.001 0.018 [1] TRUE > printSandwich( mlIndBFGS ) mu sigma mu 0.033 0.001 sigma 0.001 0.015 > printSandwich( mlgIndBFGS ) mu sigma mu 0.033 0.001 sigma 0.001 0.015 > printSandwich( mlIndNM ) mu sigma mu 0.033 0.001 sigma 0.001 0.015 > printSandwich( mlgIndNM ) mu sigma mu 0.033 0.001 sigma 0.001 0.015 > printSandwich( mlIndSANN ) mu sigma mu 0.033 0.001 sigma 0.001 0.015 > printSandwich( mlgIndSANN ) mu sigma mu 0.033 0.001 sigma 0.001 0.015 > printSandwich( mlIndFix ) mu sigma mu NA NA sigma NA NA > printSandwich( mlgIndFix ) mu sigma mu NA NA sigma NA NA > printSandwich( mlFixBHHH ) mu sigma mu NA NA sigma NA NA > printSandwich( mlgFixBHHH ) mu sigma mu NA NA sigma NA NA > printSandwich( mlIndFixBfgs ) mu sigma mu 0 0.000 sigma 0 0.016 > printSandwich( mlgIndFixBfgs ) mu sigma mu 0 0.000 sigma 0 0.016 > printSandwich( mlIndFixNm ) mu sigma mu 0 0.000 sigma 0 0.016 > printSandwich( mlgIndFixNm ) mu sigma mu 0 0.000 sigma 0 0.016 > printSandwich( mlIndFixSann ) mu sigma mu 0 0.000 sigma 0 0.016 > printSandwich( mlgIndFixSann ) mu sigma mu 0 0.000 sigma 0 0.016 > printSandwich( mlBfgsInEqInd ) mu sigma mu 0.035 -0.004 sigma -0.004 0.012 > printSandwich( mlgBfgsInEqInd ) mu sigma mu 0.035 -0.004 sigma -0.004 0.012 > printSandwich( mlNmInEqInd ) mu sigma mu 0.035 -0.004 sigma -0.004 0.012 > printSandwich( mlConInd ) mu sigma mu 0.035 -0.004 sigma -0.004 0.012 > printSandwich( mlgConInd ) mu sigma mu 0.035 -0.003 sigma -0.003 0.011 > printSandwich( mlBhhhCon ) mu sigma mu 0.035 -0.004 sigma -0.004 0.012 > printSandwich( mlgBhhhCon ) mu sigma mu 0.035 -0.004 sigma -0.004 0.011 > printSandwich( mlBfgsConInd ) mu sigma mu 0.035 -0.004 sigma -0.004 0.012 > printSandwich( mlgBfgsConInd ) mu sigma mu 0.036 -0.004 sigma -0.004 0.012 > printSandwich( mlNmConInd ) mu sigma mu 0.035 -0.004 sigma -0.004 0.012 > printSandwich( mlgNmConInd ) mu sigma mu 0.035 -0.004 sigma -0.004 0.012 > > proc.time() user system elapsed 29.92 0.06 30.00 maxLik/tests/methods.R0000644000176000001440000000022211711073173014454 0ustar ripleyuserslibrary(maxLik) set.seed(0) ## Test standard methods for "lm" x <- runif(100) y <- x + rnorm(100) m <- lm(y ~ x) print(nObs(m)) print(stdEr(m)) maxLik/tests/BFGSR.Rout.save0000644000176000001440000001033212222631714015343 0ustar ripleyusers R version 3.0.2 (2013-09-25) -- "Frisbee Sailing" Copyright (C) 2013 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ### BFGSR-related tests > > ## 1. Test maximization algorithm for convex regions > ## > ## Optimize quadratic form t(D) %*% W %*% D with p.d. weight matrix > ## (ie unbounded problems). > ## All solutions should go to large values with a message about successful convergence > set.seed(0) > options(digits=4) > quadForm <- function(D) { + return( - t(D - (1:N) ) %*% W %*% ( D - (1:N) ) ) + } > N <- 3 > # 3-dimensional case > ## a) test quadratic function t(D) %*% D > W <- diag(N) > library(maxLik) Loading required package: miscTools Please cite the 'maxLik' package as: Henningsen, Arne and Toomet, Ott (2011). maxLik: A package for maximum likelihood estimation in R. Computational Statistics 26(3), 443-458. DOI 10.1007/s00180-010-0217-1. If you have questions, suggestions, or comments regarding the 'maxLik' package, please use a forum or 'tracker' at maxLik's R-Forge site: https://r-forge.r-project.org/projects/maxlik/ > D <- rep(1/N, N) > res <- maxBFGSR(quadForm, start=D) > summary(res) -------------------------------------------- BFGSR maximization Number of iterations: 3 Return code: 2 successive function values within tolerance limit Function value: -2.306e-08 Estimates: estimate gradient [1,] 1 6.299e-05 [2,] 2 1.575e-04 [3,] 3 2.520e-04 -------------------------------------------- > > ## b) add noice to > W <- diag(N) + matrix(runif(N*N), N, N) > # diagonal weight matrix with some noise > D <- rep(1/N, N) > res <- maxBFGSR(quadForm, start=D) > summary(res) -------------------------------------------- BFGSR maximization Number of iterations: 27 Return code: 2 successive function values within tolerance limit Function value: -2.4e-06 Estimates: estimate gradient [1,] 1.001 -0.0027577 [2,] 1.999 0.0025758 [3,] 3.000 -0.0001638 -------------------------------------------- > > ## Next, optimize hat function in non-concave region. Does not work well. > hat <- function(param) { + ## Hat function. Hessian negative definite if sqrt(x^2 + y^2) < 0.5 + x <- param[1] + y <- param[2] + exp(-(x-2)^2 - (y-2)^2) + } > > summary(hatNC <- maxBFGSR(hat, start=c(1,1), tol=0, reltol=0)) -------------------------------------------- BFGSR maximization Number of iterations: 26 Return code: 1 gradient close to zero Function value: 1 Estimates: estimate gradient [1,] 2 3.708e-07 [2,] 2 3.708e-07 -------------------------------------------- > # should converge to c(0,0). > > ## Test BFGSR with fixed parameters and equality constraints > ## Optimize 3D hat with one parameter fixed (== 2D hat). > ## Add an equality constraint on that > hat3 <- function(param) { + ## Hat function. Hessian negative definite if sqrt((x-2)^2 + (y-2)^2) < 0.5 + x <- param[1] + y <- param[2] + z <- param[3] + exp(-(x-2)^2-(y-2)^2-(z-2)^2) + } > sv <- c(1,1,1) > ## constraints: x + y + z = 8 > A <- matrix(c(1,1,1), 1, 3) > B <- -8 > constraints <- list(eqA=A, eqB=B) > summary(hat3CF <- maxBFGSR(hat3, start=sv, constraints=constraints, fixed=3)) -------------------------------------------- BFGSR maximization Number of iterations: 3 Return code: 2 successive function values within tolerance limit Function value: 0.004087 Estimates: estimate gradient [1,] 3.5 -0.012262 [2,] 3.5 -0.012262 [3,] 1.0 0.008175 Constrained optimization based on SUMT Return code: 1 penalty close to zero 5 outer iterations, barrier value 2.127e-09 -------------------------------------------- > > > proc.time() user system elapsed 0.316 0.020 0.326 maxLik/tests/methods.Rout.save0000644000176000001440000000250312215600672016145 0ustar ripleyusers R version 3.0.1 (2013-05-16) -- "Good Sport" Copyright (C) 2013 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(maxLik) Loading required package: miscTools Please cite the 'maxLik' package as: Henningsen, Arne and Toomet, Ott (2011). maxLik: A package for maximum likelihood estimation in R. Computational Statistics 26(3), 443-458. DOI 10.1007/s00180-010-0217-1. If you have questions, suggestions, or comments regarding the 'maxLik' package, please use a forum or 'tracker' at maxLik's R-Forge site: https://r-forge.r-project.org/projects/maxlik/ > set.seed(0) > > ## Test standard methods for "lm" > x <- runif(100) > y <- x + rnorm(100) > m <- lm(y ~ x) > print(nObs(m)) [1] 100 > print(stdEr(m)) (Intercept) x 0.1942235 0.3314218 > > > proc.time() user system elapsed 0.164 0.036 0.189 maxLik/tests/numericGradient.R0000644000176000001440000000052411716501626016142 0ustar ripleyusers ### test numeric methods, in particular handling of unequal ### function lengths library(maxLik) f <- function(x) { if(x[1] <= 0) return(NA) # support of x[1] is (0, Inf) return(c(log(x[1]),x[2])) } ng <- numericGradient(f, c(0.01,1), eps=0.1) nh <- try(numericHessian(f, t0=c(0.01,1), eps=0.1)) maxLik/tests/examples.R0000644000176000001440000002131512222552210014625 0ustar ripleyuserslibrary( maxLik ) options(digits=4) printRounded <- function( x ) { for( i in names( x ) ) { cat ( "$", i, "\n", sep = "" ) if( is.numeric( x[[i]] ) ) { print( round( x[[i]], 4 ) ) } else { print( x[[i]] ) } cat( "\n" ) } cat( "attr(,\"class\")\n" ) print( class( x ) ) } ### activePar # a simple two-dimensional exponential hat f <- function(a) exp(-(a[1]-2)^2 - (a[2]-4)^2) # # maximize wrt. both parameters free <- maxNR(f, start=1:2) printRounded( free ) summary(free) # results should be close to (2,4) activePar(free) # allow only the second parameter to vary cons <- maxNR(f, start=1:2, activePar=c(FALSE,TRUE)) printRounded( cons ) summary(cons) # result should be around (1,4) activePar(cons) # specify fixed par in different ways cons2 <- maxNR(f, start=1:2, fixed=1) all.equal( cons, cons2 ) cons3 <- maxNR(f, start=1:2, fixed=c(TRUE,FALSE)) all.equal( cons, cons3 ) cons4 <- maxNR(f, start=c(a=1, b=2), fixed="a") print(summary(cons4)) all.equal( cons, cons4 ) ### compareDerivatives set.seed( 2 ) ## A simple example with sin(x)' = cos(x) f <- sin compareDerivatives(f, cos, t0=1) ## ## Example of log-likelihood of normal density. Two-parameter ## function. x <- rnorm(100, 1, 2) # generate rnorm x l <- function(b) sum(log(dnorm((x-b[1])/b[2])/b[2])) # b[1] - mu, b[2] - sigma gradl <- function(b) { c(sum(x - b[1])/b[2]^2, sum((x - b[1])^2/b[2]^3 - 1/b[2])) } compareDerivatives(l, gradl, t0=c(1,2)) ### hessian set.seed( 3 ) # log-likelihood for normal density # a[1] - mean # a[2] - standard deviation ll <- function(a) sum(-log(a[2]) - (x - a[1])^2/(2*a[2]^2)) x <- rnorm(1000) # sample from standard normal ml <- maxLik(ll, start=c(1,1)) # ignore eventual warnings "NaNs produced in: log(x)" printRounded( ml ) print( ml ) summary(ml) # result should be close to c(0,1) hessian(ml) # How the Hessian looks like sqrt(-solve(hessian(ml))) # Note: standard deviations are on the diagonal print(stdEr(ml)) # test vector of stdEr-s # # Now run the same example while fixing a[2] = 1 mlf <- maxLik(ll, start=c(1,1), activePar=c(TRUE, FALSE)) printRounded( mlf ) print( mlf ) summary(mlf) # first parameter close to 0, the second exactly 1.0 hessian(mlf) # now invert only the free parameter part of the Hessian sqrt(-solve(hessian(mlf)[activePar(mlf), activePar(mlf)])) # gives the standard deviation for the mean print(stdEr(mlf)) # test standard errors with fixed par ### maxBFGS set.seed( 5 ) # Maximum Likelihood estimation of the parameter of Poissonian distribution n <- rpois(100, 3) loglik <- function(l) n*log(l) - l - lfactorial(n) # we use numeric gradient a <- maxBFGS(loglik, start=1) print( a ) summary( a ) # you would probably prefer mean(n) instead of that ;-) # Note also that maxLik is better suited for Maximum Likelihood ### logLik.maxLik set.seed( 4 ) ## ML estimation of exponential duration model: t <- rexp(100, 2) loglik <- function(theta) log(theta) - theta*t gradlik <- function(theta) 1/theta - t hesslik <- function(theta) -100/theta^2 ## Estimate with analytic gradient and hessian a <- maxLik(loglik, gradlik, hesslik, start=1) printRounded( a ) print( a ) ## print log likelihood value logLik( a ) ## compare with log likelihood value of summary object all.equal( logLik( a ), logLik( summary( a ) ) ) ### maxBHHH set.seed( 6 ) ## ML estimation of exponential duration model: t <- rexp(100, 2) ## Estimate with numeric gradient and hessian a <- maxBHHH(loglik, start=1, print.level=2) print( a ) summary(a) ## Estimate with analytic gradient a <- maxBHHH(loglik, gradlik, start=1) print( a ) summary(a) ### maxLik set.seed( 7 ) ## ML estimation of exponential duration model: t <- rexp(100, 2) ## Estimate with numeric gradient and hessian a <- maxLik(loglik, start=1, print.level=2) printRounded( a ) print( a ) summary(a) ## Estimate with analytic gradient and hessian a <- maxLik(loglik, gradlik, hesslik, start=1) printRounded( a ) print( a ) summary(a) ### maxNR set.seed( 8 ) ## ML estimation of exponential duration model: t <- rexp(100, 2) loglikSum <- function(theta) sum(log(theta) - theta*t) ## Note the log-likelihood and gradient are summed over observations gradlikSum <- function(theta) sum(1/theta - t) ## Estimate with numeric gradient and Hessian a <- maxNR(loglikSum, start=1, print.level=2) print( a ) summary(a) ## You would probably prefer 1/mean(t) instead ;-) ## Estimate with analytic gradient and Hessian a <- maxNR(loglikSum, gradlikSum, hesslik, start=1) print( a ) summary(a) ### maximType ## maximise two-dimensional exponential hat. Maximum is at c(2,1): f <- function(a) exp(-(a[1] - 2)^2 - (a[2] - 1)^2) m <- maxNR(f, start=c(0,0)) print( m ) summary(m) maximType(m) ## Now use BFGS maximisation. m <- maxBFGS(f, start=c(0,0)) print( m ) summary(m) maximType(m) ### Test maxNR with 0 iterations. Should perform no iterations ### Request by Yves Croissant f <- function(a) exp(-(a[1] - 2)^2 - (a[2] - 1)^2) m0 <- maxNR(f, start=c(1.1, 2.1), iterlim=0) summary(m0) ### nObs set.seed( 10 ) # Construct a simple OLS regression: x1 <- runif(100) x2 <- runif(100) y <- 3 + 4*x1 + 5*x2 + rnorm(100) m <- lm(y~x1+x2) # estimate it nObs(m) ### nParam set.seed( 11 ) # Construct a simple OLS regression: x1 <- runif(100) x2 <- runif(100) y <- 3 + 4*x1 + 5*x2 + rnorm(100) m <- lm(y~x1+x2) # estimate it summary(m) nParam(m) # you get 3 ### numericGradient # A simple example with Gaussian bell f0 <- function(t0) exp(-t0[1]^2 - t0[2]^2) numericGradient(f0, c(1,2)) numericHessian(f0, t0=c(1,2)) # An example with the analytic gradient gradf0 <- function(t0) -2*t0*f0(t0) numericHessian(f0, gradf0, t0=c(1,2)) # The results should be similar as in the previous case # The central numeric derivatives have usually quite a high precision compareDerivatives(f0, gradf0, t0=1:2) # The differenc is around 1e-10 ### returnCode ## maximise the exponential bell f1 <- function(x) exp(-x^2) a <- maxNR(f1, start=2) print( a ) returnCode(a) # should be success (1 or 2) ## Now try to maximise log() function f2 <- function(x) log(x) a <- maxNR(f2, start=2) print( a ) returnCode(a) # should give a failure (4) ### returnMessage ## maximise the exponential bell f1 <- function(x) exp(-x^2) a <- maxNR(f1, start=2) print( a ) returnMessage(a) # should be success (1 or 2) ## Now try to maximise log() function f2 <- function(x) log(x) a <- maxNR(f2, start=2) print( a ) returnMessage(a) # should give a failure (4) ### summary.maxLik set.seed( 15 ) ## ML estimation of exponential duration model: t <- rexp(100, 2) loglik <- function(theta) log(theta) - theta*t gradlik <- function(theta) 1/theta - t hesslik <- function(theta) -100/theta^2 ## Estimate with numeric gradient and hessian a <- maxLik(loglik, start=1, print.level=2) printRounded( a ) print( a ) summary(a) ## Estimate with analytic gradient and hessian a <- maxLik(loglik, gradlik, hesslik, start=1) printRounded( a ) print( a ) summary(a) ### summary.maxim and for "gradient"/"hessian" attributes ### Test for infinity ## maximize a 2D quadratic function: f <- function(b) { x <- b[1]; y <- b[2]; val <- (x - 2)^2 + (y - 3)^2 attr(val, "gradient") <- c(2*x - 4, 2*y - 6) attr(val, "hessian") <- matrix(c(2, 0, 0, 2), 2, 2) val } ## Use c(0,0) as initial value. result1 <- maxNR( f, start = c(0,0) ) print( result1 ) summary( result1 ) ## Now use c(1000000, -777777) as initial value and ask for hessian result2 <- maxNR( f, start = c( 1000000, -777777)) print( result2 ) summary( result2 ) ### Test for "gradient"/"hessian" attributes. A case which converges. hub <- function(x) { v <- exp(-sum(x*x)) val <- v attr(val, "gradient") <- -2*x*v attr(val, "hessian") <- 4*(x %*% t(x))*v - diag(2*c(v, v)) val } summary(a <- maxNR(hub, start=c(2,1))) ## Now test "gradient" attribute for BHHH/3-parameter probit N <- 1000 loglikProbit <- function( beta) { xb <- x %*% beta loglik <- ifelse(y == 0, pnorm( xb, log=TRUE, lower.tail=FALSE), pnorm( xb, log.p=TRUE)) grad <- ifelse(y == 0, -dnorm(xb)/pnorm(xb, lower.tail=FALSE), dnorm(xb)/pnorm(xb)) grad <- grad*x attr(loglik, "gradient") <- grad loglik } x <- runif(N) x <- cbind(x, x - runif(N), x - runif(N)) y <- x[,1] + 2*x[,2] - x[,3] + rnorm(N) > 0 summary(maxLik(loglikProbit, start=c(0,0,0), method="bhhh")) ### vcov.maxLik set.seed( 17 ) ## ML estimation of exponential duration model: t <- rexp(100, 2) ## Estimate with numeric gradient and hessian a <- maxLik(loglik, start=1, print.level=2) printRounded( a ) print( a ) vcov(a) ## Estimate with analytic gradient and hessian a <- maxLik(loglik, gradlik, hesslik, start=1) printRounded( a ) print( a ) vcov(a) print(stdEr(a)) # test single stdEr maxLik/tests/constraints.Rout.save0000644000176000001440000013537312230721772017067 0ustar ripleyusers R version 3.0.2 (2013-09-25) -- "Frisbee Sailing" Copyright (C) 2013 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ### Various tests for constrained optimization > ### > options(digits=4) > > logLikMix <- function(param) { + rho <- param[1] + if(rho < 0 || rho > 1) + return(NA) + mu1 <- param[2] + mu2 <- param[3] + ll <- log(rho*dnorm(x - mu1) + (1 - rho)*dnorm(x - mu2)) + # ll <- sum(ll) + ll + } > > gradLikMix <- function(param) { + rho <- param[1] + if(rho < 0 || rho > 1) + return(NA) + mu1 <- param[2] + mu2 <- param[3] + f1 <- dnorm(x - mu1) + f2 <- dnorm(x - mu2) + L <- rho*f1 + (1 - rho)*f2 + g <- matrix(0, length(x), 3) + g[,1] <- (f1 - f2)/L + g[,2] <- rho*(x - mu1)*f1/L + g[,3] <- (1 - rho)*(x - mu2)*f2/L + # colSums(g) + g + } > > hessLikMix <- function(param) { + rho <- param[1] + if(rho < 0 || rho > 1) + return(NA) + mu1 <- param[2] + mu2 <- param[3] + f1 <- dnorm(x - mu1) + f2 <- dnorm(x - mu2) + L <- rho*f1 + (1 - rho)*f2 + dldrho <- (f1 - f2)/L + dldmu1 <- rho*(x - mu1)*f1/L + dldmu2 <- (1 - rho)*(x - mu2)*f2/L + h <- matrix(0, 3, 3) + h[1,1] <- -sum(dldrho*(f1 - f2)/L) + h[2,1] <- h[1,2] <- sum((x - mu1)*f1/L - dldmu1*dldrho) + h[3,1] <- h[1,3] <- sum(-(x - mu2)*f2/L - dldmu2*dldrho) + h[2,2] <- sum(rho*(-f1 + (x - mu1)^2*f1)/L - dldmu1^2) + h[2,3] <- h[3,2] <- -sum(dldmu1*dldmu2) + h[3,3] <- sum((1 - rho)*(-f2 + (x - mu2)^2*f2)/L - dldmu2^2) + h + } > ### -------------------------- > library(maxLik) Loading required package: miscTools Please cite the 'maxLik' package as: Henningsen, Arne and Toomet, Ott (2011). maxLik: A package for maximum likelihood estimation in R. Computational Statistics 26(3), 443-458. DOI 10.1007/s00180-010-0217-1. If you have questions, suggestions, or comments regarding the 'maxLik' package, please use a forum or 'tracker' at maxLik's R-Forge site: https://r-forge.r-project.org/projects/maxlik/ > ## mixed normal > set.seed(1) > x <- c(rnorm(1000, mean=-1), rnorm(1000, mean=1)) > > cat("Test for inequality constraints\n") Test for inequality constraints > ## Inequality constraints: x + y + z < 0.5 > A <- matrix(c(-1, 0, 0, + 0, -1, 0, + 0, 0, 1), 3, 3, byrow=TRUE) > B <- rep(0.5, 3) > start <- c(0.4, 0, 0.9) > ## analytic gradient > a <- maxLik(logLikMix, grad=gradLikMix, hess=hessLikMix, + start=start, + constraints=list(ineqA=A, ineqB=B), + print.level=1) initial value 4001.919470 iter 2 value 3924.195358 iter 3 value 3761.072283 iter 4 value 3740.857616 iter 5 value 3739.836172 iter 6 value 3739.669432 iter 7 value 3739.505309 iter 8 value 3739.342676 iter 9 value 3739.271657 iter 10 value 3674.195874 iter 11 value 3671.068664 iter 12 value 3626.168499 iter 13 value 3589.952102 iter 14 value 3552.974446 iter 15 value 3551.811868 iter 16 value 3551.806291 iter 17 value 3551.796762 iter 18 value 3551.796515 iter 19 value 3551.782574 iter 20 value 3551.776359 iter 21 value 3551.776203 iter 22 value 3551.775817 iter 23 value 3551.774828 iter 24 value 3551.773893 iter 25 value 3551.773735 iter 26 value 3551.773660 iter 27 value 3551.773589 iter 27 value 3551.773574 iter 27 value 3551.773567 final value 3551.773567 converged > summary(a) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 135 iterations Return code 0: successful convergence Log-Likelihood: -3552 3 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.5000 0.0256 19.5 <2e-16 *** [2,] -1.0358 0.0581 -17.8 <2e-16 *** [3,] 1.0111 0.0584 17.3 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -0.0002019 -------------------------------------------- > ## No analytic gradient > a <- maxLik(logLikMix, + start=start, + constraints=list(ineqA=A, ineqB=B), + print.level=1) Nelder-Mead direct search function minimizer function value for initial parameters = 4001.919470 Scaled convergence tolerance is 4.00192e-05 Stepsize computed as 0.090000 BUILD 4 4070.076564 3935.045316 EXTENSION 6 4019.701151 3817.775378 HI-REDUCTION 8 4001.919470 3817.775378 HI-REDUCTION 10 3964.166942 3817.775378 HI-REDUCTION 12 3951.510291 3817.775378 HI-REDUCTION 14 3935.045316 3817.775378 LO-REDUCTION 16 3930.566819 3817.775378 EXTENSION 18 3921.548166 3738.446677 REFLECTION 20 3849.017731 3697.331824 HI-REDUCTION 22 3817.775378 3697.331824 EXTENSION 24 3796.154073 3640.588562 REFLECTION 26 3738.446677 3610.867576 REFLECTION 28 3697.331824 3599.961403 LO-REDUCTION 30 3640.588562 3598.894864 REFLECTION 32 3610.867576 3582.230855 LO-REDUCTION 34 3599.961403 3582.230855 LO-REDUCTION 36 3598.894864 3582.230855 LO-REDUCTION 38 3596.438166 3582.230855 HI-REDUCTION 40 3591.172140 3582.230855 REFLECTION 42 3589.835538 3581.931156 HI-REDUCTION 44 3588.467471 3581.931156 EXTENSION 46 3586.686697 3580.648495 HI-REDUCTION 48 3583.294588 3580.648495 HI-REDUCTION 50 3582.230855 3580.648495 EXTENSION 52 3581.931156 3577.586010 HI-REDUCTION 54 3581.845218 3577.586010 HI-REDUCTION 56 3580.648495 3577.586010 LO-REDUCTION 58 3580.218134 3577.586010 REFLECTION 60 3580.058076 3577.484332 HI-REDUCTION 62 3579.426189 3577.484332 HI-REDUCTION 64 3578.982021 3577.484332 HI-REDUCTION 66 3578.579385 3577.484332 HI-REDUCTION 68 3578.345785 3577.484332 REFLECTION 70 3578.107164 3576.925164 HI-REDUCTION 72 3577.646064 3576.925164 HI-REDUCTION 74 3577.586010 3576.925164 LO-REDUCTION 76 3577.484332 3576.925164 LO-REDUCTION 78 3577.429210 3576.925164 HI-REDUCTION 80 3577.265184 3576.925164 LO-REDUCTION 82 3577.219661 3576.925164 HI-REDUCTION 84 3577.099850 3576.925164 HI-REDUCTION 86 3577.094624 3576.925164 LO-REDUCTION 88 3577.049724 3576.925164 REFLECTION 90 3577.022908 3576.878815 HI-REDUCTION 92 3576.964251 3576.878815 HI-REDUCTION 94 3576.959035 3576.878815 HI-REDUCTION 96 3576.939769 3576.878815 LO-REDUCTION 98 3576.934288 3576.878815 HI-REDUCTION 100 3576.925164 3576.878815 REFLECTION 102 3576.914513 3576.868808 HI-REDUCTION 104 3576.895326 3576.868808 HI-REDUCTION 106 3576.885892 3576.868808 HI-REDUCTION 108 3576.884756 3576.868808 EXTENSION 110 3576.881216 3576.860066 EXTENSION 112 3576.878815 3576.846035 LO-REDUCTION 114 3576.868808 3576.846035 EXTENSION 116 3576.860066 3576.826644 LO-REDUCTION 118 3576.850118 3576.826644 EXTENSION 120 3576.846035 3576.795453 LO-REDUCTION 122 3576.830785 3576.795453 EXTENSION 124 3576.826644 3576.770783 EXTENSION 126 3576.813143 3576.731194 EXTENSION 128 3576.795453 3576.670398 EXTENSION 130 3576.770783 3576.579253 EXTENSION 132 3576.731194 3576.442993 EXTENSION 134 3576.670398 3576.223487 EXTENSION 136 3576.579253 3575.955768 EXTENSION 138 3576.442993 3575.458196 EXTENSION 140 3576.223487 3574.737591 EXTENSION 142 3575.955768 3574.113778 EXTENSION 144 3575.458196 3572.467004 EXTENSION 146 3574.737591 3571.094833 LO-REDUCTION 148 3574.113778 3571.094833 EXTENSION 150 3572.467004 3567.948694 LO-REDUCTION 152 3571.223239 3567.948694 EXTENSION 154 3571.094833 3566.372585 LO-REDUCTION 156 3569.258295 3566.372585 EXTENSION 158 3567.948694 3563.385318 LO-REDUCTION 160 3567.462533 3563.385318 EXTENSION 162 3566.372585 3560.575633 LO-REDUCTION 164 3565.263780 3560.575633 EXTENSION 166 3563.385318 3555.903915 LO-REDUCTION 168 3560.581821 3555.903915 EXTENSION 170 3560.575633 3553.680856 LO-REDUCTION 172 3555.956041 3553.680856 LO-REDUCTION 174 3555.903915 3553.680856 LO-REDUCTION 176 3553.731773 3553.520094 HI-REDUCTION 178 3553.718627 3553.520094 REFLECTION 180 3553.680856 3553.510276 HI-REDUCTION 182 3553.573127 3553.510276 HI-REDUCTION 184 3553.529694 3553.501609 HI-REDUCTION 186 3553.520094 3553.490628 HI-REDUCTION 188 3553.510276 3553.490074 HI-REDUCTION 190 3553.501609 3553.489874 HI-REDUCTION 192 3553.490628 3553.488166 HI-REDUCTION 194 3553.490074 3553.485596 HI-REDUCTION 196 3553.489874 3553.485526 LO-REDUCTION 198 3553.488166 3553.485526 LO-REDUCTION 200 3553.486044 3553.484862 HI-REDUCTION 202 3553.485596 3553.484790 HI-REDUCTION 204 3553.485526 3553.484790 HI-REDUCTION 206 3553.484862 3553.484686 HI-REDUCTION 208 3553.484809 3553.484638 EXTENSION 210 3553.484790 3553.484345 HI-REDUCTION 212 3553.484686 3553.484345 LO-REDUCTION 214 3553.484638 3553.484345 EXTENSION 216 3553.484449 3553.483714 LO-REDUCTION 218 3553.484392 3553.483714 LO-REDUCTION 220 3553.484345 3553.483714 EXTENSION 222 3553.484052 3553.482870 EXTENSION 224 3553.483862 3553.482183 LO-REDUCTION 226 3553.483714 3553.482183 EXTENSION 228 3553.482870 3553.479861 LO-REDUCTION 230 3553.482285 3553.479861 EXTENSION 232 3553.482183 3553.477861 EXTENSION 234 3553.479938 3553.473345 LO-REDUCTION 236 3553.479861 3553.473345 EXTENSION 238 3553.477861 3553.467046 EXTENSION 240 3553.474385 3553.460532 EXTENSION 242 3553.473345 3553.452464 EXTENSION 244 3553.467046 3553.445373 EXTENSION 246 3553.460532 3553.439398 REFLECTION 248 3553.452464 3553.437243 HI-REDUCTION 250 3553.445373 3553.437243 REFLECTION 252 3553.442210 3553.436552 HI-REDUCTION 254 3553.439398 3553.436552 EXTENSION 256 3553.437748 3553.430185 LO-REDUCTION 258 3553.437243 3553.430185 LO-REDUCTION 260 3553.436552 3553.430185 EXTENSION 262 3553.433970 3553.423188 EXTENSION 264 3553.430361 3553.414657 LO-REDUCTION 266 3553.430185 3553.414657 EXTENSION 268 3553.423188 3553.400875 LO-REDUCTION 270 3553.420802 3553.400875 EXTENSION 272 3553.414657 3553.373760 LO-REDUCTION 274 3553.401760 3553.373760 EXTENSION 276 3553.400875 3553.350247 EXTENSION 278 3553.375225 3553.304267 EXTENSION 280 3553.373760 3553.268822 EXTENSION 282 3553.350247 3553.229079 EXTENSION 284 3553.304267 3553.137320 EXTENSION 286 3553.268822 3553.072409 LO-REDUCTION 288 3553.229079 3553.072409 REFLECTION 290 3553.137320 3553.047964 LO-REDUCTION 292 3553.131070 3553.047964 EXTENSION 294 3553.077571 3552.946477 LO-REDUCTION 296 3553.072409 3552.946477 REFLECTION 298 3553.047964 3552.945615 EXTENSION 300 3552.952602 3552.765711 LO-REDUCTION 302 3552.946477 3552.765711 LO-REDUCTION 304 3552.945615 3552.765711 EXTENSION 306 3552.873655 3552.652256 EXTENSION 308 3552.820500 3552.547562 EXTENSION 310 3552.765711 3552.363444 LO-REDUCTION 312 3552.652256 3552.363444 REFLECTION 314 3552.547562 3552.342542 REFLECTION 316 3552.488021 3552.334013 REFLECTION 318 3552.363444 3552.210067 HI-REDUCTION 320 3552.342542 3552.210067 LO-REDUCTION 322 3552.334013 3552.210067 EXTENSION 324 3552.310291 3552.151579 HI-REDUCTION 326 3552.253251 3552.151579 LO-REDUCTION 328 3552.243475 3552.151579 EXTENSION 330 3552.210067 3552.038246 LO-REDUCTION 332 3552.164428 3552.038246 LO-REDUCTION 334 3552.151579 3552.038246 EXTENSION 336 3552.108582 3552.000997 EXTENSION 338 3552.065680 3551.917912 EXTENSION 340 3552.038246 3551.884966 HI-REDUCTION 342 3552.000997 3551.884966 REFLECTION 344 3551.942773 3551.819101 LO-REDUCTION 346 3551.917912 3551.819101 LO-REDUCTION 348 3551.884966 3551.819101 REFLECTION 350 3551.854207 3551.806548 REFLECTION 352 3551.838194 3551.785401 HI-REDUCTION 354 3551.819101 3551.785401 LO-REDUCTION 356 3551.811870 3551.785401 HI-REDUCTION 358 3551.806548 3551.785401 LO-REDUCTION 360 3551.799075 3551.785401 HI-REDUCTION 362 3551.791224 3551.785401 REFLECTION 364 3551.788895 3551.778850 LO-REDUCTION 366 3551.785680 3551.778850 LO-REDUCTION 368 3551.785401 3551.778850 EXTENSION 370 3551.783330 3551.775262 HI-REDUCTION 372 3551.780951 3551.775262 LO-REDUCTION 374 3551.779833 3551.775262 HI-REDUCTION 376 3551.778850 3551.775262 LO-REDUCTION 378 3551.777855 3551.775262 HI-REDUCTION 380 3551.777322 3551.775262 EXTENSION 382 3551.776790 3551.774636 HI-REDUCTION 384 3551.776184 3551.774636 REFLECTION 386 3551.775872 3551.774140 HI-REDUCTION 388 3551.775262 3551.774140 LO-REDUCTION 390 3551.775146 3551.774140 LO-REDUCTION 392 3551.774636 3551.774140 EXTENSION 394 3551.774549 3551.773588 HI-REDUCTION 396 3551.774180 3551.773588 HI-REDUCTION 398 3551.774167 3551.773588 HI-REDUCTION 400 3551.774140 3551.773588 LO-REDUCTION 402 3551.774001 3551.773588 REFLECTION 404 3551.773936 3551.773536 HI-REDUCTION 406 3551.773772 3551.773536 HI-REDUCTION 408 3551.773745 3551.773536 HI-REDUCTION 410 3551.773672 3551.773536 HI-REDUCTION 412 3551.773648 3551.773536 HI-REDUCTION 414 3551.773612 3551.773536 HI-REDUCTION 416 3551.773595 3551.773536 HI-REDUCTION 418 3551.773588 3551.773536 Exiting from Nelder Mead minimizer 420 function evaluations used > summary(a) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 420 iterations Return code 0: successful convergence Log-Likelihood: -3552 3 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.5000 0.0257 19.5 <2e-16 *** [2,] -1.0360 0.0582 -17.8 <2e-16 *** [3,] 1.0106 0.0585 17.3 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -0.0002034 -------------------------------------------- > ## No analytic gradient, BFGS > a <- maxLik(logLikMix, + start=start, + method="bfgs", + constraints=list(ineqA=A, ineqB=B), + print.level=1) initial value 4001.919470 iter 2 value 3924.195358 iter 3 value 3761.072284 iter 4 value 3740.857617 iter 5 value 3739.836172 iter 6 value 3739.669432 iter 7 value 3739.505310 iter 8 value 3739.342677 iter 9 value 3739.271657 iter 10 value 3674.195801 iter 11 value 3671.068581 iter 12 value 3626.166671 iter 13 value 3589.951141 iter 14 value 3552.974393 iter 15 value 3551.811864 iter 16 value 3551.806287 iter 17 value 3551.796760 iter 18 value 3551.796512 iter 19 value 3551.782572 iter 20 value 3551.776358 iter 21 value 3551.776202 iter 22 value 3551.775817 iter 23 value 3551.774783 iter 24 value 3551.773837 iter 25 value 3551.773689 iter 26 value 3551.773624 iter 27 value 3551.773583 iter 27 value 3551.773572 iter 27 value 3551.773564 final value 3551.773564 converged > summary(a) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 135 iterations Return code 0: successful convergence Log-Likelihood: -3552 3 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.5000 0.0256 19.5 <2e-16 *** [2,] -1.0359 0.0581 -17.8 <2e-16 *** [3,] 1.0111 0.0584 17.3 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -0.0001999 -------------------------------------------- > ## ---- > cat("Test for equality constraints\n") Test for equality constraints > A <- matrix(c(0, 1, 2), 1, 3) > B <- 0 > ## default, analytic gradient > a <- maxLik(logLikMix, grad=gradLikMix, hess=hessLikMix, + start=start, + constraints=list(eqA=A, eqB=B), + print.level=1) SUMT initial: rho = 0 , function = -3552 , penalty = 1.004 Estimate:[1] 0.5033 -1.0307 1.0163 SUMT iteration 1: rho = 0.0003086, function = -3552, penalty = 1.004 Estimate:[1] 0.5033 -1.0307 1.0163 SUMT iteration 2: rho = 0.003086, function = -3552, penalty = 1.003 Estimate:[1] 0.5033 -1.0307 1.0162 SUMT iteration 3: rho = 0.03086, function = -3552, penalty = 1.001 Estimate:[1] 0.5031 -1.0311 1.0157 SUMT iteration 4: rho = 0.3086, function = -3552, penalty = 0.9731 Estimate:[1] 0.5013 -1.0352 1.0109 SUMT iteration 5: rho = 3.086, function = -3552, penalty = 0.7532 Estimate:[1] 0.4859 -1.0710 0.9694 SUMT iteration 6: rho = 30.86, function = -3559, penalty = 0.1604 Estimate:[1] 0.4241 -1.2225 0.8115 SUMT iteration 7: rho = 308.6, function = -3570, penalty = 0.003967 Estimate:[1] 0.3789 -1.3439 0.7034 SUMT iteration 8: rho = 3086, function = -3572, penalty = 4.458e-05 Estimate:[1] 0.3714 -1.3652 0.6859 SUMT iteration 9: rho = 30864, function = -3572, penalty = 4.512e-07 Estimate:[1] 0.3706 -1.3675 0.6841 SUMT iteration 10: rho = 308642, function = -3572, penalty = 4.517e-09 Estimate:[1] 0.3705 -1.3677 0.6839 > summary(a) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 2 iterations Return code 1: gradient close to zero Log-Likelihood: -3572 3 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.3705 0.0255 14.6 <2e-16 *** [2,] -1.3677 0.0697 -19.6 <2e-16 *** [3,] 0.6839 0.0514 13.3 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 10 outer iterations, barrier value 4.517e-09 -------------------------------------------- > ## BFGS, numeric gradient > a <- maxLik(logLikMix, + start=start, method="bfgs", + constraints=list(eqA=A, eqB=B), + print.level=2, SUMTRho0=1) initial value 4005.159409 iter 2 value 3739.377952 iter 3 value 3557.996031 iter 4 value 3557.243792 iter 5 value 3553.534181 iter 6 value 3552.770637 iter 7 value 3552.720577 iter 8 value 3552.720532 iter 8 value 3552.720532 final value 3552.720532 converged SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9097 Estimate:[1] 0.4971 -1.0450 0.9994 initial value 3560.907808 iter 2 value 3559.815796 iter 3 value 3558.860634 iter 4 value 3558.554874 iter 5 value 3558.470121 iter 5 value 3558.470095 iter 5 value 3558.470095 final value 3558.470095 converged SUMT iteration 2: rho = 10, function = -3554, penalty = 0.4502 Estimate:[1] 0.4600 -1.1326 0.9018 initial value 3598.986588 iter 2 value 3581.662692 iter 3 value 3575.592026 iter 4 value 3574.296760 iter 5 value 3568.837187 iter 6 value 3568.828225 iter 6 value 3568.828218 iter 6 value 3568.828218 final value 3568.828218 converged SUMT iteration 3: rho = 100, function = -3566, penalty = 0.02948 Estimate:[1] 0.3934 -1.3036 0.7376 initial value 3595.363566 iter 2 value 3575.235881 iter 3 value 3574.538052 iter 4 value 3574.332600 iter 5 value 3571.971345 iter 6 value 3571.969966 iter 6 value 3571.969966 iter 6 value 3571.969966 final value 3571.969966 converged SUMT iteration 4: rho = 1000, function = -3572, penalty = 0.0004131 Estimate:[1] 0.3732 -1.3600 0.6902 initial value 3575.687741 iter 2 value 3572.641972 iter 3 value 3572.631516 iter 4 value 3572.627232 iter 5 value 3572.353862 iter 6 value 3572.348648 iter 6 value 3572.348648 iter 6 value 3572.348648 final value 3572.348648 converged SUMT iteration 5: rho = 10000, function = -3572, penalty = 4.286e-06 Estimate:[1] 0.3707 -1.3670 0.6845 initial value 3572.734360 iter 2 value 3572.469952 iter 3 value 3572.469793 iter 4 value 3572.469005 iter 5 value 3572.387435 iter 6 value 3572.387291 iter 6 value 3572.387291 iter 6 value 3572.387291 final value 3572.387291 converged SUMT iteration 6: rho = 1e+05, function = -3572, penalty = 4.302e-08 Estimate:[1] 0.3705 -1.3677 0.6839 initial value 3572.426005 iter 2 value 3572.391185 iter 2 value 3572.391184 iter 2 value 3572.391164 final value 3572.391164 converged SUMT iteration 7: rho = 1e+06, function = -3572, penalty = 4.348e-10 Estimate:[1] 0.3705 -1.3677 0.6839 > summary(a) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 29 iterations Return code 0: successful convergence Log-Likelihood: -3572 3 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] 0.3705 0.0255 14.5 <2e-16 *** [2,] -1.3677 0.0697 -19.6 <2e-16 *** [3,] 0.6839 0.0514 13.3 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 7 outer iterations, barrier value 4.348e-10 -------------------------------------------- > ## BHHH, analytic gradient (numeric does not converge?) > try( maxLik(logLikMix, gradLikMix, + start=start, method="bhhh", + constraints=list(eqA=A, eqB=B), + print.level=2, SUMTRho0=1) ) -------------- successive function values within tolerance limit 18 iterations estimate: 0.4902 1.031 -1.013 Function value: -3553 SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9906 Estimate:[1] 0.4902 1.0313 -1.0133 -------------- successive function values within tolerance limit 10 iterations estimate: 0.4516 1.124 -0.9126 Function value: -3559 SUMT iteration 2: rho = 10, function = -3554, penalty = 0.4917 Estimate:[1] 0.4516 1.1240 -0.9126 -------------- successive function values within tolerance limit 18 iterations estimate: 0.3819 1.307 -0.743 Function value: -3570 SUMT iteration 3: rho = 100, function = -3567, penalty = 0.03214 Estimate:[1] 0.3819 1.3068 -0.7430 -------------- successive function values within tolerance limit 88 iterations estimate: 0.3608 1.367 -0.6943 Function value: -3574 SUMT iteration 4: rho = 1000, function = -3573, penalty = 0.0004494 Estimate:[1] 0.3608 1.3674 -0.6943 -------------- Iteration limit exceeded. 100 iterations estimate: 0.3583 1.376 -0.6889 Function value: -3574 SUMT iteration 5: rho = 10000, function = -3574, penalty = 4.694e-06 Estimate:[1] 0.3583 1.3756 -0.6889 -------------- Iteration limit exceeded. 100 iterations estimate: 0.358 1.376 -0.6883 Function value: -3574 SUMT iteration 6: rho = 1e+05, function = -3574, penalty = 8.22e-08 Estimate:[1] 0.3580 1.3764 -0.6883 -------------- successive function values within tolerance limit 65 iterations estimate: 0.358 1.377 -0.6883 Function value: -3574 SUMT iteration 7: rho = 1e+06, function = -3574, penalty = 4.576e-10 Estimate:[1] 0.3580 1.3765 -0.6883 Maximum Likelihood estimation BHHH maximisation, 65 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -3574 (3 free parameter(s)) Estimate(s): 0.358 1.377 -0.6883 > > ### ------------------ Now test extra parameters for the function ---- > logLikMix2 <- function(param, rho) { + mu1 <- param[1] + mu2 <- param[2] + ll <- log(rho*dnorm(x - mu1) + (1 - rho)*dnorm(x - mu2)) + # ll <- sum(ll) + ll + } > > gradLikMix2 <- function(param, rho) { + mu1 <- param[1] + mu2 <- param[2] + f1 <- dnorm(x - mu1) + f2 <- dnorm(x - mu2) + L <- rho*f1 + (1 - rho)*f2 + g <- matrix(0, length(x), 2) + g[,1] <- rho*(x - mu1)*f1/L + g[,2] <- (1 - rho)*(x - mu2)*f2/L + # colSums(g) + g + } > > hessLikMix2 <- function(param, rho) { + mu1 <- param[1] + mu2 <- param[2] + f1 <- dnorm(x - mu1) + f2 <- dnorm(x - mu2) + L <- rho*f1 + (1 - rho)*f2 + dldrho <- (f1 - f2)/L + dldmu1 <- rho*(x - mu1)*f1/L + dldmu2 <- (1 - rho)*(x - mu2)*f2/L + h <- matrix(0, 2, 2) + h[1,1] <- sum(rho*(-f1 + (x - mu1)^2*f1)/L - dldmu1^2) + h[1,2] <- h[2,1] <- -sum(dldmu1*dldmu2) + h[2,2] <- sum((1 - rho)*(-f2 + (x - mu2)^2*f2)/L - dldmu2^2) + h + } > > ## ---------- > A <- matrix(c(1, 2), 1, 2) > B <- 0 > start <- c(0, 1) > ## nr, numeric gradient > a <- maxLik(logLikMix2, + start=start, method="nr", + constraints=list(eqA=A, eqB=B), + print.level=1, SUMTRho0=1, rho=0.5) SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9371 Estimate:[1] -1.040 1.004 SUMT iteration 2: rho = 10, function = -3553, penalty = 0.7005 Estimate:[1] -1.0715 0.9542 SUMT iteration 3: rho = 100, function = -3574, penalty = 0.1327 Estimate:[1] -1.1949 0.7796 SUMT iteration 4: rho = 1000, function = -3602, penalty = 0.003096 Estimate:[1] -1.2860 0.6708 SUMT iteration 5: rho = 10000, function = -3608, penalty = 3.455e-05 Estimate:[1] -1.3015 0.6537 SUMT iteration 6: rho = 1e+05, function = -3609, penalty = 3.494e-07 Estimate:[1] -1.3032 0.6519 SUMT iteration 7: rho = 1e+06, function = -3609, penalty = 3.498e-09 Estimate:[1] -1.3034 0.6517 > summary(a) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 2 iterations Return code 1: gradient close to zero Log-Likelihood: -3609 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.3034 0.0458 -28.4 <2e-16 *** [2,] 0.6517 0.0354 18.4 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 7 outer iterations, barrier value 3.498e-09 -------------------------------------------- > ## nr, numeric hessian > a <- maxLik(logLikMix2, gradLikMix2, + start=start, method="nr", + constraints=list(eqA=A, eqB=B), + print.level=1, SUMTRho0=1, rho=0.5) SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9371 Estimate:[1] -1.040 1.004 SUMT iteration 2: rho = 10, function = -3553, penalty = 0.7005 Estimate:[1] -1.0715 0.9542 SUMT iteration 3: rho = 100, function = -3574, penalty = 0.1327 Estimate:[1] -1.1949 0.7796 SUMT iteration 4: rho = 1000, function = -3602, penalty = 0.003096 Estimate:[1] -1.2860 0.6708 SUMT iteration 5: rho = 10000, function = -3608, penalty = 3.455e-05 Estimate:[1] -1.3015 0.6537 SUMT iteration 6: rho = 1e+05, function = -3609, penalty = 3.494e-07 Estimate:[1] -1.3032 0.6519 SUMT iteration 7: rho = 1e+06, function = -3609, penalty = 3.498e-09 Estimate:[1] -1.3034 0.6517 > summary(a) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 2 iterations Return code 1: gradient close to zero Log-Likelihood: -3609 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.3034 0.0458 -28.4 <2e-16 *** [2,] 0.6517 0.0354 18.4 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 7 outer iterations, barrier value 3.498e-09 -------------------------------------------- > ## nr, analytic hessian > a <- maxLik(logLikMix2, gradLikMix2, hessLikMix2, + start=start, method="nr", + constraints=list(eqA=A, eqB=B), + print.level=1, SUMTRho0=1, rho=0.5) SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9371 Estimate:[1] -1.040 1.004 SUMT iteration 2: rho = 10, function = -3553, penalty = 0.7005 Estimate:[1] -1.0715 0.9542 SUMT iteration 3: rho = 100, function = -3574, penalty = 0.1327 Estimate:[1] -1.1949 0.7796 SUMT iteration 4: rho = 1000, function = -3602, penalty = 0.003096 Estimate:[1] -1.2860 0.6708 SUMT iteration 5: rho = 10000, function = -3608, penalty = 3.455e-05 Estimate:[1] -1.3015 0.6537 SUMT iteration 6: rho = 1e+05, function = -3609, penalty = 3.494e-07 Estimate:[1] -1.3032 0.6519 SUMT iteration 7: rho = 1e+06, function = -3609, penalty = 3.498e-09 Estimate:[1] -1.3034 0.6517 > summary(a) -------------------------------------------- Maximum Likelihood estimation Newton-Raphson maximisation, 2 iterations Return code 1: gradient close to zero Log-Likelihood: -3609 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.3034 0.0458 -28.4 <2e-16 *** [2,] 0.6517 0.0354 18.4 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 7 outer iterations, barrier value 3.498e-09 -------------------------------------------- > ## BHHH > a <- maxLik(logLikMix2, gradLikMix2, + start=start, method="bhhh", + constraints=list(eqA=A, eqB=B), + print.level=1, SUMTRho0=1, rho=0.5) SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9371 Estimate:[1] -1.040 1.004 SUMT iteration 2: rho = 10, function = -3553, penalty = 0.7005 Estimate:[1] -1.0715 0.9542 SUMT iteration 3: rho = 100, function = -3574, penalty = 0.1327 Estimate:[1] -1.1949 0.7796 SUMT iteration 4: rho = 1000, function = -3602, penalty = 0.003096 Estimate:[1] -1.2860 0.6708 SUMT iteration 5: rho = 10000, function = -3608, penalty = 3.456e-05 Estimate:[1] -1.3016 0.6537 SUMT iteration 6: rho = 1e+05, function = -3609, penalty = 3.48e-07 Estimate:[1] -1.303 0.652 SUMT iteration 7: rho = 1e+06, function = -3609, penalty = 3.515e-09 Estimate:[1] -1.3037 0.6519 > summary(a) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 22 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -3609 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.3037 0.0458 -28.5 <2e-16 *** [2,] 0.6519 0.0354 18.4 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 7 outer iterations, barrier value 3.515e-09 -------------------------------------------- > ## BHHH, analytic > a <- maxLik(logLikMix2, gradLikMix2, + start=start, method="bhhh", + constraints=list(eqA=A, eqB=B), + print.level=1, SUMTRho0=1, rho=0.5) SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9371 Estimate:[1] -1.040 1.004 SUMT iteration 2: rho = 10, function = -3553, penalty = 0.7005 Estimate:[1] -1.0715 0.9542 SUMT iteration 3: rho = 100, function = -3574, penalty = 0.1327 Estimate:[1] -1.1949 0.7796 SUMT iteration 4: rho = 1000, function = -3602, penalty = 0.003096 Estimate:[1] -1.2860 0.6708 SUMT iteration 5: rho = 10000, function = -3608, penalty = 3.456e-05 Estimate:[1] -1.3016 0.6537 SUMT iteration 6: rho = 1e+05, function = -3609, penalty = 3.48e-07 Estimate:[1] -1.303 0.652 SUMT iteration 7: rho = 1e+06, function = -3609, penalty = 3.515e-09 Estimate:[1] -1.3037 0.6519 > summary(a) -------------------------------------------- Maximum Likelihood estimation BHHH maximisation, 22 iterations Return code 2: successive function values within tolerance limit Log-Likelihood: -3609 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.3037 0.0458 -28.5 <2e-16 *** [2,] 0.6519 0.0354 18.4 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 7 outer iterations, barrier value 3.515e-09 -------------------------------------------- > ## bfgs, no analytic gradient > a <- maxLik(logLikMix2, + start=start, method="bfgs", + constraints=list(eqA=A, eqB=B), + print.level=2, SUMTRho0=1, rho=0.5) initial value 3940.429579 iter 2 value 3568.484771 iter 3 value 3565.703967 iter 4 value 3553.364145 iter 5 value 3552.730547 iter 6 value 3552.727255 iter 6 value 3552.727251 final value 3552.727251 converged SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9371 Estimate:[1] -1.040 1.004 initial value 3561.161193 iter 2 value 3560.033226 iter 3 value 3560.021863 iter 4 value 3560.015406 iter 4 value 3560.015405 iter 4 value 3560.015405 final value 3560.015405 converged SUMT iteration 2: rho = 10, function = -3553, penalty = 0.7005 Estimate:[1] -1.0715 0.9542 initial value 3623.062827 iter 2 value 3595.991937 iter 3 value 3595.709044 iter 4 value 3587.340552 iter 5 value 3587.300893 iter 5 value 3587.300887 iter 5 value 3587.300887 final value 3587.300887 converged SUMT iteration 3: rho = 100, function = -3574, penalty = 0.1327 Estimate:[1] -1.1949 0.7796 initial value 3706.731799 iter 2 value 3616.033745 iter 3 value 3615.699091 iter 4 value 3605.546681 iter 5 value 3605.514797 iter 5 value 3605.514797 iter 5 value 3605.514797 final value 3605.514797 converged SUMT iteration 4: rho = 1000, function = -3602, penalty = 0.003096 Estimate:[1] -1.2860 0.6708 initial value 3633.375248 iter 2 value 3610.546094 iter 3 value 3610.527290 iter 4 value 3608.619657 iter 5 value 3608.458020 iter 5 value 3608.458020 iter 5 value 3608.458020 final value 3608.458020 converged SUMT iteration 5: rho = 10000, function = -3608, penalty = 3.455e-05 Estimate:[1] -1.3015 0.6537 initial value 3611.567445 iter 2 value 3609.435906 iter 3 value 3609.427333 iter 4 value 3609.093787 iter 5 value 3608.770731 iter 5 value 3608.770731 iter 5 value 3608.770731 final value 3608.770731 converged SUMT iteration 6: rho = 1e+05, function = -3609, penalty = 3.494e-07 Estimate:[1] -1.3032 0.6519 initial value 3609.085218 iter 2 value 3608.802363 iter 2 value 3608.802361 iter 3 value 3608.802198 iter 3 value 3608.802198 iter 3 value 3608.802197 final value 3608.802197 converged SUMT iteration 7: rho = 1e+06, function = -3609, penalty = 3.497e-09 Estimate:[1] -1.3034 0.6517 > summary(a) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 44 iterations Return code 0: successful convergence Log-Likelihood: -3609 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.3034 0.0458 -28.4 <2e-16 *** [2,] 0.6517 0.0354 18.4 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 7 outer iterations, barrier value 3.497e-09 -------------------------------------------- > ## bfgs, analytic gradient > a <- maxLik(logLikMix2, + start=start, method="bfgs", + constraints=list(eqA=A, eqB=B), + print.level=2, SUMTRho0=1, rho=0.5) initial value 3940.429579 iter 2 value 3568.484771 iter 3 value 3565.703967 iter 4 value 3553.364145 iter 5 value 3552.730547 iter 6 value 3552.727255 iter 6 value 3552.727251 final value 3552.727251 converged SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9371 Estimate:[1] -1.040 1.004 initial value 3561.161193 iter 2 value 3560.033226 iter 3 value 3560.021863 iter 4 value 3560.015406 iter 4 value 3560.015405 iter 4 value 3560.015405 final value 3560.015405 converged SUMT iteration 2: rho = 10, function = -3553, penalty = 0.7005 Estimate:[1] -1.0715 0.9542 initial value 3623.062827 iter 2 value 3595.991937 iter 3 value 3595.709044 iter 4 value 3587.340552 iter 5 value 3587.300893 iter 5 value 3587.300887 iter 5 value 3587.300887 final value 3587.300887 converged SUMT iteration 3: rho = 100, function = -3574, penalty = 0.1327 Estimate:[1] -1.1949 0.7796 initial value 3706.731799 iter 2 value 3616.033745 iter 3 value 3615.699091 iter 4 value 3605.546681 iter 5 value 3605.514797 iter 5 value 3605.514797 iter 5 value 3605.514797 final value 3605.514797 converged SUMT iteration 4: rho = 1000, function = -3602, penalty = 0.003096 Estimate:[1] -1.2860 0.6708 initial value 3633.375248 iter 2 value 3610.546094 iter 3 value 3610.527290 iter 4 value 3608.619657 iter 5 value 3608.458020 iter 5 value 3608.458020 iter 5 value 3608.458020 final value 3608.458020 converged SUMT iteration 5: rho = 10000, function = -3608, penalty = 3.455e-05 Estimate:[1] -1.3015 0.6537 initial value 3611.567445 iter 2 value 3609.435906 iter 3 value 3609.427333 iter 4 value 3609.093787 iter 5 value 3608.770731 iter 5 value 3608.770731 iter 5 value 3608.770731 final value 3608.770731 converged SUMT iteration 6: rho = 1e+05, function = -3609, penalty = 3.494e-07 Estimate:[1] -1.3032 0.6519 initial value 3609.085218 iter 2 value 3608.802363 iter 2 value 3608.802361 iter 3 value 3608.802198 iter 3 value 3608.802198 iter 3 value 3608.802197 final value 3608.802197 converged SUMT iteration 7: rho = 1e+06, function = -3609, penalty = 3.497e-09 Estimate:[1] -1.3034 0.6517 > summary(a) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 44 iterations Return code 0: successful convergence Log-Likelihood: -3609 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.3034 0.0458 -28.4 <2e-16 *** [2,] 0.6517 0.0354 18.4 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 7 outer iterations, barrier value 3.497e-09 -------------------------------------------- > ## SANN, analytic gradient > a <- maxLik(logLikMix2, gradLikMix2, + start=start, method="SANN", + constraints=list(eqA=A, eqB=B), + print.level=1, SUMTRho0=1, rho=0.5) SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9372 Estimate:[1] -1.041 1.004 SUMT iteration 2: rho = 10, function = -3553, penalty = 0.7039 Estimate:[1] -1.0738 0.9564 SUMT iteration 3: rho = 100, function = -3574, penalty = 0.1346 Estimate:[1] -1.1943 0.7806 SUMT iteration 4: rho = 1000, function = -3602, penalty = 0.003156 Estimate:[1] -1.2906 0.6734 SUMT iteration 5: rho = 10000, function = -3608, penalty = 3.855e-05 Estimate:[1] -1.2995 0.6528 SUMT iteration 6: rho = 1e+05, function = -3609, penalty = 3.59e-07 Estimate:[1] -1.3023 0.6514 SUMT iteration 7: rho = 1e+06, function = -3609, penalty = 6.049e-09 Estimate:[1] -1.3121 0.6561 > summary(a) -------------------------------------------- Maximum Likelihood estimation SANN maximisation, 10000 iterations Return code 0: successful convergence Log-Likelihood: -3609 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.3121 0.0459 -28.6 <2e-16 *** [2,] 0.6561 0.0354 18.5 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 7 outer iterations, barrier value 6.049e-09 -------------------------------------------- > ## NM, numeric > a <- maxLik(logLikMix2, + start=start, method="nm", + constraints=list(eqA=A, eqB=B), + print.level=1, SUMTRho0=1, rho=0.5) SUMT iteration 1: rho = 1, function = -3552, penalty = 0.9383 Estimate:[1] -1.040 1.004 SUMT iteration 2: rho = 10, function = -3553, penalty = 0.7009 Estimate:[1] -1.0715 0.9544 SUMT iteration 3: rho = 100, function = -3574, penalty = 0.1329 Estimate:[1] -1.1948 0.7797 SUMT iteration 4: rho = 1000, function = -3602, penalty = 0.003089 Estimate:[1] -1.2862 0.6709 SUMT iteration 5: rho = 10000, function = -3608, penalty = 3.433e-05 Estimate:[1] -1.3015 0.6537 SUMT iteration 6: rho = 1e+05, function = -3609, penalty = 3.391e-07 Estimate:[1] -1.3031 0.6519 SUMT iteration 7: rho = 1e+06, function = -3609, penalty = 3.755e-09 Estimate:[1] -1.3034 0.6517 > summary(a) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 55 iterations Return code 0: successful convergence Log-Likelihood: -3609 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.3034 0.0458 -28.4 <2e-16 *** [2,] 0.6517 0.0354 18.4 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on SUMT Return code: 1 penalty close to zero 7 outer iterations, barrier value 3.755e-09 -------------------------------------------- > ## ----------- inequality ------------- > A <- matrix(c(-1, 0, + 0, 1), 2,2, byrow=TRUE) > B <- c(1,1) > start <- c(0.8, 0.9) > ## > a <- maxLik(logLikMix2, gradLikMix2, + start=start, method="bfgs", + constraints=list(ineqA=A, ineqB=B), + rho=0.5) > summary(a) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 38 iterations Return code 0: successful convergence Log-Likelihood: -3552 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.0361 0.0398 -26.0 <2e-16 *** [2,] 1.0108 0.0400 25.3 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -5.775e-05 -------------------------------------------- > ## > a <- maxLik(logLikMix2, + start=start, method="bfgs", + constraints=list(ineqA=A, ineqB=B), + rho=0.5) > summary(a) -------------------------------------------- Maximum Likelihood estimation BFGS maximisation, 38 iterations Return code 0: successful convergence Log-Likelihood: -3552 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.0361 0.0399 -26.0 <2e-16 *** [2,] 1.0108 0.0400 25.3 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -5.775e-05 -------------------------------------------- > ## > a <- maxLik(logLikMix2, gradLikMix2, + start=start, method="nm", + constraints=list(ineqA=A, ineqB=B), + rho=0.5) > summary(a) -------------------------------------------- Maximum Likelihood estimation Nelder-Mead maximisation, 53 iterations Return code 0: successful convergence Log-Likelihood: -3552 2 free parameters Estimates: Estimate Std. error t value Pr(> t) [1,] -1.0362 0.0398 -26.0 <2e-16 *** [2,] 1.0104 0.0399 25.3 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Warning: constrained likelihood estimation. Inference is probably wrong Constrained optimization based on constrOptim 1 outer iterations, barrier value -5.775e-05 -------------------------------------------- > ## ---------- test vector B for inequality -------------- > B1 <- c(1,-2) > a <- maxLik(logLikMix2, gradLikMix2, + start=c(0.5, 2.5), method="bfgs", + constraints=list(ineqA=A, ineqB=B1), + rho=0.5) > coef(a) [1] -0.8019 2.0000 > # components should be larger than > # (-1, -2) > ## ---- Now test error handling: insert wrong A and B forms ---- > A2 <- c(-1, 0, 0, 1) > try(maxLik(logLikMix2, gradLikMix2, + start=start, method="bfgs", + constraints=list(ineqA=A2, ineqB=B), + print.level=1, rho=0.5) + ) Error in maxOptim(fn = fn, grad = grad, hess = hess, start = start, method = "BFGS", : Inequality constraint A must be a matrix Current dimension > # should explain that matrix needed > A2 <- matrix(c(-1, 0, 0, 1), 1, 4) > try(maxLik(logLikMix2, gradLikMix2, + start=start, method="bfgs", + constraints=list(ineqA=A2, ineqB=B), + print.level=1, rho=0.5) + ) Error in maxOptim(fn = fn, grad = grad, hess = hess, start = start, method = "BFGS", : Inequality constraint A must have the same number of columns as length of the parameter. Currently 4 and 2. > # should explain that wrong matrix > # dimension > B2 <- 1:3 > try(maxLik(logLikMix2, gradLikMix2, + start=start, method="bfgs", + constraints=list(ineqA=A, ineqB=B2), + print.level=1, rho=0.5) + ) Error in maxOptim(fn = fn, grad = grad, hess = hess, start = start, method = "BFGS", : Inequality constraints A and B suggest different number of constraints: 2 and 3 > # A & B do not match > B2 <- matrix(1,2,2) > try(maxLik(logLikMix2, gradLikMix2, + start=start, method="bfgs", + constraints=list(ineqA=A, ineqB=B2), + print.level=1, rho=0.5) + ) Error in maxOptim(fn = fn, grad = grad, hess = hess, start = start, method = "BFGS", : Inequality constraint B must be a vector (or Nx1 matrix). Currently 2 columns > # B must be a vector > > ## fixed parameters with constrained optimization, BFGS. Thanks to Bob Loos for finding this error. > ## Optimize 3D hat with one parameter fixed (== 2D hat). > ## Add an equality constraint on that > hat3 <- function(param) { + ## Hat function. Hessian negative definite if sqrt(x^2 + y^2) < 0.5 + x <- param[1] + y <- param[2] + z <- param[3] + exp(-x^2-y^2-z^2) + } > library(maxLik) > sv <- c(1,1,1) > ## constraints: x + y + z >= 2.5 > A <- matrix(c(1,1,1), 1, 3) > B <- -2.5 > constraints <- list(ineqA=A, ineqB=B) > res <- maxBFGS(hat3, start=sv, constraints=constraints, fixed=3) > summary(res) -------------------------------------------- BFGS maximisation Number of iterations: 43 Return code: 0 successful convergence Function value: 0.1194 Estimates: estimate gradient [1,] 0.7501 -0.1791 [2,] 0.7501 -0.1791 [3,] 1.0000 -0.2388 Constrained optimization based on constrOptim 1 outer iterations, barrier value -0.0006592 -------------------------------------------- > > proc.time() user system elapsed 75.708 0.084 75.890 maxLik/tests/constraints.R0000644000176000001440000002007112230721772015366 0ustar ripleyusers### Various tests for constrained optimization ### options(digits=4) logLikMix <- function(param) { rho <- param[1] if(rho < 0 || rho > 1) return(NA) mu1 <- param[2] mu2 <- param[3] ll <- log(rho*dnorm(x - mu1) + (1 - rho)*dnorm(x - mu2)) # ll <- sum(ll) ll } gradLikMix <- function(param) { rho <- param[1] if(rho < 0 || rho > 1) return(NA) mu1 <- param[2] mu2 <- param[3] f1 <- dnorm(x - mu1) f2 <- dnorm(x - mu2) L <- rho*f1 + (1 - rho)*f2 g <- matrix(0, length(x), 3) g[,1] <- (f1 - f2)/L g[,2] <- rho*(x - mu1)*f1/L g[,3] <- (1 - rho)*(x - mu2)*f2/L # colSums(g) g } hessLikMix <- function(param) { rho <- param[1] if(rho < 0 || rho > 1) return(NA) mu1 <- param[2] mu2 <- param[3] f1 <- dnorm(x - mu1) f2 <- dnorm(x - mu2) L <- rho*f1 + (1 - rho)*f2 dldrho <- (f1 - f2)/L dldmu1 <- rho*(x - mu1)*f1/L dldmu2 <- (1 - rho)*(x - mu2)*f2/L h <- matrix(0, 3, 3) h[1,1] <- -sum(dldrho*(f1 - f2)/L) h[2,1] <- h[1,2] <- sum((x - mu1)*f1/L - dldmu1*dldrho) h[3,1] <- h[1,3] <- sum(-(x - mu2)*f2/L - dldmu2*dldrho) h[2,2] <- sum(rho*(-f1 + (x - mu1)^2*f1)/L - dldmu1^2) h[2,3] <- h[3,2] <- -sum(dldmu1*dldmu2) h[3,3] <- sum((1 - rho)*(-f2 + (x - mu2)^2*f2)/L - dldmu2^2) h } ### -------------------------- library(maxLik) ## mixed normal set.seed(1) x <- c(rnorm(1000, mean=-1), rnorm(1000, mean=1)) cat("Test for inequality constraints\n") ## Inequality constraints: x + y + z < 0.5 A <- matrix(c(-1, 0, 0, 0, -1, 0, 0, 0, 1), 3, 3, byrow=TRUE) B <- rep(0.5, 3) start <- c(0.4, 0, 0.9) ## analytic gradient a <- maxLik(logLikMix, grad=gradLikMix, hess=hessLikMix, start=start, constraints=list(ineqA=A, ineqB=B), print.level=1) summary(a) ## No analytic gradient a <- maxLik(logLikMix, start=start, constraints=list(ineqA=A, ineqB=B), print.level=1) summary(a) ## No analytic gradient, BFGS a <- maxLik(logLikMix, start=start, method="bfgs", constraints=list(ineqA=A, ineqB=B), print.level=1) summary(a) ## ---- cat("Test for equality constraints\n") A <- matrix(c(0, 1, 2), 1, 3) B <- 0 ## default, analytic gradient a <- maxLik(logLikMix, grad=gradLikMix, hess=hessLikMix, start=start, constraints=list(eqA=A, eqB=B), print.level=1) summary(a) ## BFGS, numeric gradient a <- maxLik(logLikMix, start=start, method="bfgs", constraints=list(eqA=A, eqB=B), print.level=2, SUMTRho0=1) summary(a) ## BHHH, analytic gradient (numeric does not converge?) try( maxLik(logLikMix, gradLikMix, start=start, method="bhhh", constraints=list(eqA=A, eqB=B), print.level=2, SUMTRho0=1) ) ### ------------------ Now test extra parameters for the function ---- logLikMix2 <- function(param, rho) { mu1 <- param[1] mu2 <- param[2] ll <- log(rho*dnorm(x - mu1) + (1 - rho)*dnorm(x - mu2)) # ll <- sum(ll) ll } gradLikMix2 <- function(param, rho) { mu1 <- param[1] mu2 <- param[2] f1 <- dnorm(x - mu1) f2 <- dnorm(x - mu2) L <- rho*f1 + (1 - rho)*f2 g <- matrix(0, length(x), 2) g[,1] <- rho*(x - mu1)*f1/L g[,2] <- (1 - rho)*(x - mu2)*f2/L # colSums(g) g } hessLikMix2 <- function(param, rho) { mu1 <- param[1] mu2 <- param[2] f1 <- dnorm(x - mu1) f2 <- dnorm(x - mu2) L <- rho*f1 + (1 - rho)*f2 dldrho <- (f1 - f2)/L dldmu1 <- rho*(x - mu1)*f1/L dldmu2 <- (1 - rho)*(x - mu2)*f2/L h <- matrix(0, 2, 2) h[1,1] <- sum(rho*(-f1 + (x - mu1)^2*f1)/L - dldmu1^2) h[1,2] <- h[2,1] <- -sum(dldmu1*dldmu2) h[2,2] <- sum((1 - rho)*(-f2 + (x - mu2)^2*f2)/L - dldmu2^2) h } ## ---------- A <- matrix(c(1, 2), 1, 2) B <- 0 start <- c(0, 1) ## nr, numeric gradient a <- maxLik(logLikMix2, start=start, method="nr", constraints=list(eqA=A, eqB=B), print.level=1, SUMTRho0=1, rho=0.5) summary(a) ## nr, numeric hessian a <- maxLik(logLikMix2, gradLikMix2, start=start, method="nr", constraints=list(eqA=A, eqB=B), print.level=1, SUMTRho0=1, rho=0.5) summary(a) ## nr, analytic hessian a <- maxLik(logLikMix2, gradLikMix2, hessLikMix2, start=start, method="nr", constraints=list(eqA=A, eqB=B), print.level=1, SUMTRho0=1, rho=0.5) summary(a) ## BHHH a <- maxLik(logLikMix2, gradLikMix2, start=start, method="bhhh", constraints=list(eqA=A, eqB=B), print.level=1, SUMTRho0=1, rho=0.5) summary(a) ## BHHH, analytic a <- maxLik(logLikMix2, gradLikMix2, start=start, method="bhhh", constraints=list(eqA=A, eqB=B), print.level=1, SUMTRho0=1, rho=0.5) summary(a) ## bfgs, no analytic gradient a <- maxLik(logLikMix2, start=start, method="bfgs", constraints=list(eqA=A, eqB=B), print.level=2, SUMTRho0=1, rho=0.5) summary(a) ## bfgs, analytic gradient a <- maxLik(logLikMix2, start=start, method="bfgs", constraints=list(eqA=A, eqB=B), print.level=2, SUMTRho0=1, rho=0.5) summary(a) ## SANN, analytic gradient a <- maxLik(logLikMix2, gradLikMix2, start=start, method="SANN", constraints=list(eqA=A, eqB=B), print.level=1, SUMTRho0=1, rho=0.5) summary(a) ## NM, numeric a <- maxLik(logLikMix2, start=start, method="nm", constraints=list(eqA=A, eqB=B), print.level=1, SUMTRho0=1, rho=0.5) summary(a) ## ----------- inequality ------------- A <- matrix(c(-1, 0, 0, 1), 2,2, byrow=TRUE) B <- c(1,1) start <- c(0.8, 0.9) ## a <- maxLik(logLikMix2, gradLikMix2, start=start, method="bfgs", constraints=list(ineqA=A, ineqB=B), rho=0.5) summary(a) ## a <- maxLik(logLikMix2, start=start, method="bfgs", constraints=list(ineqA=A, ineqB=B), rho=0.5) summary(a) ## a <- maxLik(logLikMix2, gradLikMix2, start=start, method="nm", constraints=list(ineqA=A, ineqB=B), rho=0.5) summary(a) ## ---------- test vector B for inequality -------------- B1 <- c(1,-2) a <- maxLik(logLikMix2, gradLikMix2, start=c(0.5, 2.5), method="bfgs", constraints=list(ineqA=A, ineqB=B1), rho=0.5) coef(a) # components should be larger than # (-1, -2) ## ---- Now test error handling: insert wrong A and B forms ---- A2 <- c(-1, 0, 0, 1) try(maxLik(logLikMix2, gradLikMix2, start=start, method="bfgs", constraints=list(ineqA=A2, ineqB=B), print.level=1, rho=0.5) ) # should explain that matrix needed A2 <- matrix(c(-1, 0, 0, 1), 1, 4) try(maxLik(logLikMix2, gradLikMix2, start=start, method="bfgs", constraints=list(ineqA=A2, ineqB=B), print.level=1, rho=0.5) ) # should explain that wrong matrix # dimension B2 <- 1:3 try(maxLik(logLikMix2, gradLikMix2, start=start, method="bfgs", constraints=list(ineqA=A, ineqB=B2), print.level=1, rho=0.5) ) # A & B do not match B2 <- matrix(1,2,2) try(maxLik(logLikMix2, gradLikMix2, start=start, method="bfgs", constraints=list(ineqA=A, ineqB=B2), print.level=1, rho=0.5) ) # B must be a vector ## fixed parameters with constrained optimization, BFGS. Thanks to Bob Loos for finding this error. ## Optimize 3D hat with one parameter fixed (== 2D hat). ## Add an equality constraint on that hat3 <- function(param) { ## Hat function. Hessian negative definite if sqrt(x^2 + y^2) < 0.5 x <- param[1] y <- param[2] z <- param[3] exp(-x^2-y^2-z^2) } library(maxLik) sv <- c(1,1,1) ## constraints: x + y + z >= 2.5 A <- matrix(c(1,1,1), 1, 3) B <- -2.5 constraints <- list(ineqA=A, ineqB=B) res <- maxBFGS(hat3, start=sv, constraints=constraints, fixed=3) summary(res) maxLik/NAMESPACE0000644000176000001440000000303612222533652012752 0ustar ripleyusersexport( "activePar" ) export( "compareDerivatives" ) export( "condiNumber" ) export( "fnSubset" ) export( "hessian" ) export( "maxBFGS" ) export( "maxBFGSR" ) export( "maxBHHH" ) export( "maxCG" ) export( "maximType" ) export( "maxLik" ) export( "maxNM" ) export( "maxNR" ) export( "maxSANN" ) export( "nIter" ) export( "numericGradient" ) export( "numericHessian" ) export( "numericNHessian" ) export( "returnCode" ) export( "returnMessage" ) importFrom( "miscTools", "nObs" ) importFrom( "miscTools", "nParam" ) importFrom( "miscTools", "sumKeepAttr" ) importFrom( "sandwich", "bread" ) importFrom( "sandwich", "estfun" ) S3method( "activePar", "default" ) S3method( "AIC", "maxLik" ) S3method( "bread", "maxLik" ) S3method( "coef", "maxLik" ) S3method( "coef", "summary.maxLik" ) S3method( "condiNumber", "default" ) S3method( "condiNumber", "maxLik" ) S3method( "estfun", "maxLik" ) S3method( "hessian", "default" ) S3method( "logLik", "maxLik" ) S3method( "logLik", "summary.maxLik" ) S3method( "maximType", "default" ) S3method( "maximType", "maxim" ) S3method( "nIter", "default" ) S3method( "nObs", "maxLik" ) S3method( "nParam", "maxim" ) S3method( "print", "maxLik" ) S3method( "print", "summary.maxim" ) S3method( "print", "summary.maxLik" ) S3method( "returnCode", "default" ) S3method( "returnCode", "maxim" ) S3method( "returnCode", "maxLik" ) S3method( "returnMessage", "default" ) S3method( "returnMessage", "maxim" ) S3method( "returnMessage", "maxLik" ) S3method( "summary", "maxim" ) S3method( "summary", "maxLik" ) S3method( "vcov", "maxLik" ) maxLik/NEWS0000644000176000001440000001546112231400452012226 0ustar ripleyusersTHIS IS THE CHANGELOG OF THE "maxLik" PACKAGE Please note that only the most significant changes are reported here. A full ChangeLog is available in the log messages of the SVN repository on R-Forge. CHANGES IN VERSION 1.2-0 (2013-10-22) * Inequality constraints now support multiple constraints (B may be a vector). * Fixed a bug in documentation, inequality constraint requires A %*% theta + B > 0, not >= 0 as stated earlier. * function sumKeepAttr() is imported from the miscTools package now (before maxLik() could not be used by another package when this package imported (and not depended on) the maxLik package) (bug reported and solution provided by Martin Becker) CHANGES IN VERSION 1.1-8 (2013-09-17) * fixed bug that could occur in the Newton-Raphson algorithm if the log-likelihood function returns a vector with observation-specific values or if there are NAs in the function values, gradients, or Hessian CHANGES IN VERSION 1.1-4 (2013-09-16) * the package code is byte-compiled * if the log-likelihood function contains NA, the gradient is not calculated; if components of the gradient contain NA, the Hessian is not calculated * slightly improved documentation * improved warning messages and error messages when doing constrained optimisation * added citation information * added start-up message CHANGES IN VERSION 1.1-2 (2012-03-04) * BHHH only considers free parameters when analysing the size of gradient * numericGradient and numericHessian check for the length of vector function CHANGES IN VERSION 1.1-0 (2012-01-...) * Conjugate-gradient (CG) optimization method included. * it is guaranteed now that the variance covariance matrix returned by the vcov() method is always symmetric. * summary.maxLik is guaranteed to use maxLik specific methods, even if corresponding methods for derived classes have higher priority. CHANGES IN VERSION 1.0-2 (2011-10-16) This is mainly bugfix release. * maxBFGSR works with fixed parameters. * maxBFGS and other optim-based routines work with both fixed parameters and inequality constraints. * constrOptim2 removed from API. Names of it's formal arguments are changed. CHANGES IN VERSION 1.0-0 (2010-10-15) * moved the generic function stdEr() including a default method and a method for objects of class "lm" to the "miscTools" package (hence, this package now depends on the version 0.6-8 of the "miscTools" package that includes stdEr() * if argument print.level is 0 (the default) and some parameters are automatically fixed during the estimation, because the returned log-likelihood value has attributes "constPar" and "newVal", the adjusted "starting values" are no longer printed. CHANGES IN VERSION 0.8-0 * fixed bug that occured in maxBFGS(), mxNM(), and maxSANN if the model had only one parameter and the function specified by argument "grad" returned a vector with the analytical gradients at each observation * maxNR() now performs correctly with argument "iterlim" set to 0 * maxNR, maxBHHH(), maxBFGS(), maxNM(), and maxSANN() now use attributes "gradient" and "hessian" of the object returned by the log-likelihood function; if supplied, these are used instead of arguments "grad" and "hess" * added function maxBFGSR() that implements the BFGS algorithm (in R); this function was originally developed by Yves Croissant and placed in the "mlogit" package * maxNR() now has an argument "bhhhHessian" (defaults to FALSE): if this argument is TRUE, the Hessian is approximated by the BHHH method (using information equality), i.e. the BHHH optimization algorithm is used * maxLik() now has an argument 'finalHessian'; if it is TRUE, the final Hessian is returned; if it is the character string "BHHH", the BHHH approximation of the Hessian matrix (using information equality) with attribute "type" set to "BHHH" is returned * maxNR(), maxBHHH(), maxBFGS(), maxNM(), and maxSANN() now additionally return a component "gradientObs" that is the matrix of gradients evaluated at each observation if argument "grad" returns a matrix or argument "grad" is not specified and argument "fn" returns a vector * the definitions of the generic functions nObs() and nParam() have been moved to the "miscTools" package * added methods bread() and estfun() for objects of class "maxLik" (see documentation of the generic functions bread() and estfun() defined in package "sandwich") * replaced argument "activePar" of numericGradient(), numericHessian(), and numericNHessian() by argument "fixed" to be consistent with maxLik(), maxNR(), and the other maxXXX() functions * maxNR(), maxBHHH(), maxBFGSYC(), maxBFGS(), maxNM(), maxSANN(), and summary.maxLik() now return component "fixed" instead of component "activePar" CHANGES IN VERSION 0.7-2 * corrected negative definiteness correction of Hessian in maxNR() which led to infinite loops * changed stopping condition in sumt(): instead of checking whether estimates are stimilar, we check for penalty being low now CHANGES IN VERSION 0.7-0 * Holding parameters fixed in maxNR() (and hence, also in maxBHHH()) should now be done by the new (optional) argument "fixed", because it is convenient to use than the "old" argument "activePar" in many situations. However, the "old" argument "activePar" is kept for backward-compatibility. * added (optional) argument "fixed" to functions maxBFGS(), maxNM(), and maxSANN(), which can be used for holding parameters fixed at their starting values * added function constrOptim2(), which is a modified copy of constrOptim() from the "stats" package, but which includes a bug fix * added optional argument "cand" to function maxSANN(), which can be used to specify a function for generating a new candidate point (passed to argument "gr" of optim()) * added argument "random.seed" to maxSANN() to ensure replicability * several mainly smaller improvements in ML estimations with linear equality and inequality constraints (via sumt() and constrOptim2(), respectively) * several internal changes that make the code easier to maintain CHANGES IN VERSION 0.6-0 * maxLik() can perform maximum likelihood estimations under linear equality and inequality constraints on the parameters now (see documentation of the new argument "constraints"). Please note that estimations under constraints are experimental and have not been thoroughly tested yet. * a new method "stdEr" to extract standard errors of the estimates has been introduced * added a "coef" method for objects of class "summary.maxLik" that extracts the matrix of the estimates, standard errors, t-values, and P-values * some minor bugs have been fixed * we did some general polishing of the returned object and under the hood CHANGES IN VERSION 0.5-12 AND BEFORE * please take a look at the log messages of the SVN repository on R-Forge maxLik/R/0000755000176000001440000000000012231400472011723 5ustar ripleyusersmaxLik/R/nIter.R0000644000176000001440000000030211066630507013133 0ustar ripleyusers## Return #of iterations for maxim objects nIter <- function(x, ...) ## Number of iterations for iterative models UseMethod("nIter") nIter.default <- function(x, ...) x$iterations maxLik/R/compareDerivatives.R0000644000176000001440000000566711742216552015732 0ustar ripleyuserscompareDerivatives <- function(f, grad, hess=NULL, t0, eps=1e-6, print=TRUE, ...) { ### t0 - initial parameter vector ## ## 1. Initial function and grad eval ## if(print)cat("-------- compare derivatives -------- \n") f0 <- f(t0, ...) attributes(f0) <- NULL # keep only array data when printing if(is.function(grad)) analytic <- grad(t0, ...) else if(is.numeric(grad)) analytic = grad else stop("Argument 'grad' must be either gradient function or ", "pre-computed numeric gradient matrix") out <- list(t0=t0, f.t0=f0, compareGrad = list(analytic=analytic)) # if(is.null(dim(analytic))) { if(print)cat("Note: analytic gradient is vector. ", "Transforming into a matrix form\n") if(length(f0) > 1) analytic <- matrix(analytic, length(analytic), 1) # Note: we assume t0 is a simple vector -> hence gradient # will be a column vector else analytic <- matrix(analytic, 1, length(analytic)) # f returns a scalar -> we have row vector along t0 } if(print) { cat("Function value:\n") print(f0) } if(print)cat("Dim of analytic gradient:", dim(analytic), "\n") numeric <- numericGradient(f, t0, eps, ...) out$compareGrad$numeric = numeric if(print)cat(" numeric :", dim(numeric), "\n") # rDiff <- (analytic - numeric)/analytic rDiff <- ((analytic - numeric) / (0.5*(abs(analytic) + abs(numeric))) ) rDiff[(analytic==0) & (numeric==0)] <- 0 rDiff. <- max(abs(rDiff), na.rm=TRUE) out$compareGrad$rel.diff <- rDiff out$maxRelDiffGrad <- rDiff. # if(print){ if(ncol(analytic) < 2) { a <- cbind(t0, analytic, numeric, rDiff) dimnames(a) <- list(param=names(f0), c("theta 0", "analytic", "numeric", "rel.diff")) print(a) } else { cat("t0\n") print(t0) cat("analytic gradient\n") print(analytic) cat("numeric gradient\n") print(numeric) cat(paste("(anal-num)/(0.5*(abs(anal)+abs(num)))\n")) print(rDiff) a=list(t0=t0, analytic=analytic, numeric=numeric, rel.diff=rDiff) } cat("Max relative difference:", rDiff., "\n") } # out <- list(t0=t0, f.t0=f0, compareGrad=a, maxRelDiffGrad=rDiff.) ## ## Hessian? ## if(!is.null(hess)) { if(print)cat("Comparing hessians: relative dfference\n") anHess <- hess(t0, ...) numHess <- numericGradient(grad, t0, eps, ...) rDifHess <- ((anHess-numHess) / (0.5*(abs(anHess)+abs(numHess))) ) rDifHess[(anHess==0) & (numHess==0)] <- 0 rDifHess. <- max(abs(rDifHess), na.rm=TRUE) if(print)print(rDifHess.) out$compareHessian <- list(analytic = anHess, numeric = numHess, rel.diff = rDifHess) out$maxRelDiffHess = rDifHess. } if(print)cat("-------- END of compare derivatives -------- \n") invisible(out) } maxLik/R/vcov.maxLik.R0000755000176000001440000000221111711073173014254 0ustar ripleyusers## maxLik vcov.maxLik <- function(object, eigentol=1e-12, ...) { ## if exists $varcovar, take it if(!is.null(object$varcovar)) return(object$varcovar) ## otherwise invert hessian activePar <- activePar(object) if(!is.null(hess <- hessian(object))) { hess <- hessian(object)[activePar, activePar,drop=FALSE] hessev <- abs(eigen(hess, symmetric=TRUE, only.values=TRUE)$values) varcovar <- matrix(0, nParam.maxim(object), nParam.maxim(object)) # this makes the fixed parameters to 0 rownames( varcovar ) <- colnames(varcovar ) <- names(coef.maxLik(object)) if(min(hessev) > (eigentol*max(hessev))) { ## If hessian is not singular, fill in the free parameter values varcovar[activePar,activePar] <- solve(-hessian(object)[activePar,activePar]) # guarantee that the returned variance covariance matrix is symmetric varcovar <- ( varcovar + t( varcovar ) ) / 2 } else { ## If singular, the free parameter values will be Inf varcovar[activePar,activePar] <- Inf } return(varcovar) } else return(NULL) } maxLik/R/maxNR.R0000644000176000001440000001344611415340427013111 0ustar ripleyusersmaxNR <- function(fn, grad=NULL, hess=NULL, start, print.level=0, tol=1e-8, reltol=sqrt(.Machine$double.eps), gradtol=1e-6, steptol=1e-10, lambdatol=1e-6, qrtol=1e-10, iterlim=150, constraints=NULL, finalHessian=TRUE, bhhhHessian=FALSE, fixed=NULL, activePar=NULL, ...) { ## Newton-Raphson maximisation ## Parameters: ## fn - the function to be minimized. Returns either scalar or ## vector value with possible attributes ## constPar and newVal ## grad - gradient function (numeric used if missing). Must return either ## * vector, length=nParam ## * matrix, dim=c(nObs, 1). Treated as vector ## * matrix, dim=c(M, nParam), where M is arbitrary. In this case the ## rows are simply summed (useful for maxBHHH). ## hess - hessian function (numeric used if missing) ## start - initial parameter vector (eventually w/names) ## steptol - minimum step size ## lambdatol - max lowest eigenvalue when forcing pos. definite H ## qrtol - tolerance for qr decomposition ## ... - extra arguments for fn() ## The stopping criteria ## tol - maximum allowed absolute difference between sequential values ## reltol - maximum allowed reltive difference (stops if < reltol*(abs(fn) + reltol) ## gradtol - maximum allowed norm of gradient vector ## iterlim - maximum # of iterations ## finalHessian include final Hessian? As computing final hessian does not carry any extra penalty for NR method, this option is ## mostly for compatibility reasons with other maxXXX functions. ## TRUE/something else include ## FALSE do not include ## activePar - an index vector -- which parameters are taken as ## variable (free). Other paramters are treated as ## fixed constants ## fixed index vector, which parameters to keep fixed ## ## RESULTS: ## a list of class "maxim": ## maximum function value at maximum ## estimate the parameter value at maximum ## gradient gradient ## hessian Hessian ## code integer code of success: ## 1 - gradient close to zero ## 2 - successive values within tolerance limit ## 3 - could not find a higher point (step error) ## 4 - iteration limit exceeded ## 100 - initial value out of range ## message character message describing the code ## last.step only present if code == 3 (step error). A list with following components: ## theta0 - parameter value which led to the error ## f0 - function value at these parameter values ## climb - the difference between theta0 and the new approximated parameter value (theta1) ## activePar - logical vector, which parameters are active (not constant) ## activePar logical vector, which parameters were treated as free (resp fixed) ## iterations number of iterations ## type "Newton-Raphson maximisation" argNames <- c( "fn", "grad", "hess", "start", "print.level", "tol", "reltol", "gradtol", "steptol", "lambdatol", "qrtol", "iterlim", "activePar", "fixed" ) checkFuncArgs( fn, argNames, "fn", "maxNR" ) if( !is.null( grad ) ) { checkFuncArgs( grad, argNames, "grad", "maxNR" ) } if( !is.null( hess ) ) { checkFuncArgs( hess, argNames, "hess", "maxNR" ) } ## establish the active parameters. Internally, we just use 'activePar' fixed <- prepareFixed( start = start, activePar = activePar, fixed = fixed ) if(is.null(constraints)) { result <- maxNRCompute(fn=logLikAttr, fnOrig = fn, gradOrig = grad, hessOrig = hess, start=start, print.level=print.level, tol=tol, reltol=reltol, gradtol=gradtol, steptol=steptol, lambdatol=lambdatol, qrtol=qrtol, finalHessian=finalHessian, bhhhHessian=bhhhHessian, iterlim=iterlim, fixed=fixed, ...) } else { if(identical(names(constraints), c("ineqA", "ineqB"))) { stop("Inequality constraints not implemented for maxNR") } else if(identical(names(constraints), c("eqA", "eqB"))) { # equality constraints: A %*% beta + B = 0 result <- sumt(fn=fn, grad=grad, hess=hess, start=start, maxRoutine=maxNR, constraints=constraints, print.level=print.level, tol=tol, reltol=reltol, gradtol=gradtol, steptol=steptol, lambdatol=lambdatol, qrtol=qrtol, finalHessian=finalHessian, bhhhHessian=bhhhHessian, iterlim=iterlim, fixed=fixed, ...) } else { stop("maxBFGS only supports the following constraints:\n", "constraints=list(ineqA, ineqB)\n", "\tfor A %*% beta + B >= 0 linear inequality constraints\n", "current constraints:", paste(names(constraints), collapse=" ")) } } return( result ) } maxLik/R/maxNRCompute.R0000644000176000001440000003375112230721771014450 0ustar ripleyusersmaxNRCompute <- function(fn, start, print.level=0, tol=1e-8, reltol=sqrt(.Machine$double.eps), gradtol=1e-6, steptol=1e-10, lambdatol=1e-6, qrtol=1e-10, iterlim=150, finalHessian=TRUE, bhhhHessian = FALSE, fixed=NULL, ...) { ## Newton-Raphson maximisation ## Parameters: ## fn - the function to be minimized. Returns either scalar or ## vector value with possible attributes ## constPar and newVal ## fn must return the value with attributes 'gradient' ## and 'hessian' ## fn must have an argument sumObs ## start - initial parameter vector (eventually w/names) ## steptol - minimum step size ## lambdatol - max lowest eigenvalue when forcing pos. definite H ## qrtol - tolerance for qr decomposition ## ... - extra arguments for fn() ## The stopping criteria ## tol - maximum allowed absolute difference between sequential values ## reltol - maximum allowed reltive difference (stops if < reltol*(abs(fn) + reltol) ## gradtol - maximum allowed norm of gradient vector ## ## iterlim - maximum # of iterations ## finalHessian include final Hessian? As computing final hessian does not carry any extra penalty for NR method, this option is ## mostly for compatibility reasons with other maxXXX functions. ## TRUE/something else include ## FALSE do not include ## fixed - a logical vector -- which parameters are taken as fixed. ## Other paramters are treated as variable (free). ## ... additional argument to 'fn'. This may include ## 'fnOrig', 'gradOrig', 'hessOrig' if called fromm ## 'maxNR'. ## ## RESULTS: ## a list of class "maxim": ## maximum function value at maximum ## estimate the parameter value at maximum ## gradient gradient ## hessian Hessian ## code integer code of success: ## 1 - gradient close to zero ## 2 - successive values within tolerance limit ## 3 - could not find a higher point (step error) ## 4 - iteration limit exceeded ## 100 - initial value out of range ## message character message describing the code ## last.step only present if code == 3 (step error). A list with following components: ## theta0 - parameter value which led to the error ## f0 - function value at these parameter values ## climb - the difference between theta0 and the new approximated parameter value (theta1) ## fixed - logical vector, which parameters are constant (fixed, inactive, non-free) ## fixed logical vector, which parameters were treated as constant (fixed, inactive, non-free) ## iterations number of iterations ## type "Newton-Raphson maximisation" ## max.eigen <- function( M) { ## return maximal eigenvalue of (symmetric) matrix val <- eigen(M, symmetric=TRUE, only.values=TRUE)$values val[1] ## L - eigenvalues in decreasing order, [1] - biggest in abs value } ## ------------------------------------------------- maxim.type <- "Newton-Raphson maximisation" nimed <- names(start) nParam <- length(start) samm <- NULL I <- diag(rep(1, nParam)) # I is unit matrix start1 <- start iter <- 0 returnHessian <- ifelse( bhhhHessian, "BHHH", TRUE ) f1 <- fn(start1, fixed = fixed, sumObs = TRUE, returnHessian = returnHessian, ...) if(print.level > 2) { cat("Initial function value:", f1, "\n") } if(any(is.na( f1))) { result <- list(code=100, message=maximMessage("100"), iterations=0, type=maxim.type) class(result) <- "maxim" return(result) } if(any(is.infinite( f1)) && sum(f1) > 0) { # we stop at +Inf but not at -Inf result <- list(code=5, message=maximMessage("5"), iterations=0, type=maxim.type) class(result) <- "maxim" return(result) } if( isTRUE( attr( f1, "gradBoth" ) ) ) { warning( "the gradient is provided both as attribute 'gradient' and", " as argument 'grad': ignoring argument 'grad'" ) } if( isTRUE( attr( f1, "hessBoth" ) ) ) { warning( "the Hessian is provided both as attribute 'hessian' and", " as argument 'hess': ignoring argument 'hess'" ) } G1 <- attr( f1, "gradient" ) if(print.level > 2) { cat("Initial gradient value:\n") print(G1) } if(any(is.na(G1[!fixed]))) { stop("NA in the initial gradient") } if(any(is.infinite(G1[!fixed]))) { stop("Infinite initial gradient") } if(length(G1) != nParam) { stop( "length of gradient (", length(G1), ") not equal to the no. of parameters (", nParam, ")" ) } H1 <- attr( f1, "hessian" ) if(print.level > 3) { cat("Initial Hessian value:\n") print(H1) } if(length(H1) == 1) { # Allow the user program to return a # single NA in case of out of support or # other problems if(any(is.na(H1))) stop("NA in the initial Hessian") } if(any(is.na(H1[!fixed, !fixed]))) { stop("NA in the initial Hessian") } if(any(is.infinite(H1))) { stop("Infinite initial Hessian") } if( print.level > 1) { cat( "----- Initial parameters: -----\n") cat( "fcn value:", as.vector(f1), "\n") a <- cbind(start, G1, as.integer(!fixed)) dimnames(a) <- list(nimed, c("parameter", "initial gradient", "free")) print(a) cat( "Condition number of the (active) hessian:", kappa( H1[!fixed, !fixed]), "\n") if( print.level > 3) { print( H1) } } repeat { if( iter >= iterlim) { code <- 4; break } iter <- iter + 1 lambda <- 0 start0 <- start1 f0 <- f1 G0 <- G1 if(any(is.na(G0[!fixed]))) { stop("NA in gradient (at the iteration start)") } H0 <- H1 if(any(is.na(H0[!fixed, !fixed]))) { stop("NA in Hessian (at the iteration start)") } step <- 1 H <- H0 ## check whether hessian is positive definite while((me <- max.eigen( H[!fixed,!fixed,drop=FALSE])) >= -lambdatol | (qRank <- qr(H[!fixed,!fixed], tol=qrtol)$rank) < sum(!fixed)) { # maximum eigenvalue -> negative definite # qr()$rank -> singularity lambda <- abs(me) + lambdatol + min(abs(diag(H)[!fixed]))/1e7 # The third term corrects numeric singularity. If diag(H) only contains large values, # (H - (a small number)*I) == H because of finite precision H <- H - lambda*I # how to make it better? } amount <- vector("numeric", nParam) amount[!fixed] <- qr.solve(H[!fixed,!fixed,drop=FALSE], G0[!fixed], tol=qrtol) start1 <- start0 - step*amount f1 <- fn(start1, fixed = fixed, sumObs = TRUE, returnHessian = returnHessian, ...) # The call calculates new function, # gradient, and Hessian values ## Are we requested to fix some of the parameters? constPar <- attr(f1, "constPar") if(!is.null(constPar)) { if(any(is.na(constPar))) { stop("NA in the list of constants") } fixed <- rep(FALSE, nParam) fixed[constPar] <- TRUE } ## Are we asked to write in a new value for some of the parameters? if(is.null(newVal <- attr(f1, "newVal"))) { ## no ... while( any(is.na(f1)) || ( ( sum(f1) < sum(f0) ) && ( step >= steptol))) { # We end up in a NA or a higher value. # try smaller step step <- step/2 start1 <- start0 - step*amount if(print.level > 2) { if(print.level > 3) { cat("Try new parameters:\n") print(start1) } cat("function value difference", f1 - f0, "-> step", step, "\n") } f1 <- fn(start1, fixed = fixed, sumObs = TRUE, returnHessian = returnHessian, ...) # WTF does the 'returnHessian' here ? ## Find out the constant parameters -- these may be other than ## with full step constPar <- attr(f1, "constPar") if(!is.null(constPar)) { if(any(is.na(constPar))) { stop("NA in the list of constants") } fixed[constPar] <- TRUE ## Any new values requested? if(!is.null(newVal <- attr(f1, "newVal"))) { ## Yes. Write them to parameters and go for ## next iteration start1[newVal$index] <- newVal$val break; } } } if(step < steptol) { # we did not find a better place to go... start1 <- start0 f1 <- f0 samm <- list(theta0=start0, f0=f0, climb=amount) } } else { ## Yes, indeed. New values given to some of the params. ## Note, this may result in a lower function value, ## hence we do not check f1 > f0 start1[newVal$index] <- newVal$val if( print.level > 0 ) { cat( "Keeping parameter(s) ", paste( newVal$index, collapse = ", " ), " at the fixed values ", paste( newVal$val, collapse = ", " ), ", as the log-likelihood function", " returned attributes 'constPar' and 'newVal'\n", sep = "" ) } } G1 <- attr( f1, "gradient" ) if(any(is.na(G1[!fixed]))) { cat("Iteration", iter, "\n") cat("Parameter:\n") print(start1) if(length(G1) < 30) { cat("Gradient:\n") print(G1) } stop("NA in gradient") } if(any(is.infinite(G1))) { code <- 6; break; } H1 <- attr( f1, "hessian" ) if( print.level > 1) { cat( "-----Iteration", iter, "-----\n") } if(any(is.infinite(H1))) { code <- 7; break } if(print.level > 2) { cat( "lambda ", lambda, " step", step, " fcn value:", formatC(as.vector(f1), digits=8, format="f"), "\n") a <- cbind(amount, start1, G1, as.integer(!fixed)) dimnames(a) <- list(names(start0), c("amount", "new param", "new gradient", "active")) print(a) if( print.level > 3) { cat("Hessian\n") print( H1) } if(!any(is.na(H1[!fixed, !fixed]))) { cat( "Condition number of the hessian:", kappa(H1[!fixed,!fixed,drop=FALSE]), "\n") } } if( step < steptol) { code <- 3; break } if( sqrt( crossprod( G1[!fixed] ) ) < gradtol ) { code <-1; break } if(is.null(newVal) && sum(f1) - sum(f0) < tol) { code <- 2; break } if(is.null(newVal) && sum(f1) - sum(f0) < reltol * ( sum(f1) + reltol)) { code <- 2; break } if(any(is.infinite(f1)) && sum(f1) > 0) { code <- 5; break } } if( print.level > 0) { cat( "--------------\n") cat( maximMessage( code), "\n") cat( iter, " iterations\n") cat( "estimate:", start1, "\n") cat( "Function value:", f1, "\n") } names(start1) <- nimed F1 <- fn( start1, fixed = fixed, sumObs = FALSE, returnHessian = ( finalHessian == TRUE ), ... ) G1 <- attr( F1, "gradient" ) if(observationGradient(G1, length(start1))) { gradientObs <- G1 colnames( gradientObs ) <- nimed G1 <- colSums(as.matrix(G1 )) } else { gradientObs <- NULL } names( G1 ) <- nimed ## calculate (final) Hessian if(tolower(finalHessian) == "bhhh") { if(!is.null(gradientObs)) { hessian <- - crossprod( gradientObs ) attr(hessian, "type") <- "BHHH" } else { hessian <- NULL warning("For computing the final Hessian by 'BHHH' method, the log-likelihood or gradient must be supplied by observations") } } else if( finalHessian != FALSE ) { hessian <- attr( F1, "hessian" ) } else { hessian <- NULL } if( !is.null( hessian ) ) { rownames( hessian ) <- colnames( hessian ) <- nimed } ## remove attributes from final value of objective (likelihood) function attributes( f1 )$gradient <- NULL attributes( f1 )$hessian <- NULL attributes( f1 )$gradBoth <- NULL attributes( f1 )$hessBoth <- NULL ## result <-list( maximum = unname( drop( f1 ) ), estimate=start1, gradient=drop(G1), hessian=hessian, code=code, message=maximMessage( code), last.step=samm, # only when could not find a # lower point fixed=fixed, iterations=iter, type=maxim.type) if( exists( "gradientObs" ) ) { result$gradientObs <- gradientObs } class(result) <- c("maxim", class(result)) invisible(result) } returnCode.maxim <- function(x, ...) x$code maxLik/R/fnSubset.R0000644000176000001440000000272011066630507013651 0ustar ripleyusersfnSubset <- function(x, fnFull, xFixed, xFull=c(x, xFixed), ...){ ## ## 1. Confirm length(x)+length(xFixed) = length(xFull) ## nx <- length(x) nFixed <- length(xFixed) nFull <- length(xFull) if((nx+nFixed) != nFull) stop("length(x)+length(xFixed) != length(xFull): ", nx, " + ", nFixed, " != ", nFull) ## ## 2. names(xFull)? ## # 2.1. is.null(names(xFull)) if(is.null(names(xFull))) return(fnFull(c(x, xFixed), ...)) # 2.2. xFull[names(xFixed)] <- xFixed, ... { if(is.null(names(xFixed))){ if(is.null(names(x))) xFull <- c(x, xFixed) else { x. <- (names(xFull) %in% names(x)) if(sum(x.) != nx){ print(x) print(xFull) stop("x has names not in xFull.") } xFull[names(x)] <- x xFull[!x.] <- xFixed } } else { Fixed <- (names(xFull) %in% names(xFixed)) if(sum(Fixed) != nFixed){ print(xFixed) print(xFull) stop("xFixed has names not in xFull.") } xFull[names(xFixed)] <- xFixed { if(is.null(names(x))) xFull[!Fixed] <- x else { x. <- (names(xFull) %in% names(x)) if(sum(x.) != nx){ print(x) print(xFull) stop("x has names not in xFull.") } xFull[names(x)] <- x } } } } ## ## 3. fnFull(...) ## fnFull(xFull, ...) } maxLik/R/sumGradients.R0000644000176000001440000000037211413611743014523 0ustar ripleyuserssumGradients <- function( gr, nParam ) { if( !is.null(dim(gr))) { gr <- colSums(gr) } else { ## ... or vector if only one parameter if( nParam == 1 && length( gr ) > 1 ) { gr <- sum(gr) } } return( gr ) }maxLik/R/maxBFGSR.R0000644000176000001440000001270511643562143013436 0ustar ripleyusers maxBFGSR <- function(fn, grad=NULL, hess=NULL, start, print.level=0, tol=1e-8, reltol=sqrt(.Machine$double.eps), gradtol=1e-6, steptol=1e-10, lambdatol=1e-6, qrtol=1e-10, iterlim=150, constraints=NULL, finalHessian=TRUE, fixed=NULL, activePar=NULL, ...) { ## Newton-Raphson maximisation ## Parameters: ## fn - the function to be minimized. Returns either scalar or ## vector value with possible attributes ## constPar and newVal ## grad - gradient function (numeric used if missing). Must return either ## * vector, length=nParam ## * matrix, dim=c(nObs, 1). Treated as vector ## * matrix, dim=c(M, nParam), where M is arbitrary. In this case the ## rows are simply summed (useful for maxBHHH). ## hess - hessian function (numeric used if missing) ## start - initial parameter vector (eventually w/names) ## steptol - minimum step size ## ... - extra arguments for fn() ## The stopping criteria ## tol - maximum allowed absolute difference between sequential values ## reltol - maximum allowed reltive difference (stops if < reltol*(abs(fn) + reltol) ## gradtol - maximum allowed norm of gradient vector ## iterlim - maximum # of iterations ## finalHessian include final Hessian? As computing final hessian does not carry any extra penalty for NR method, this option is ## mostly for compatibility reasons with other maxXXX functions. ## TRUE/something else include ## FALSE do not include ## activePar - an index vector -- which parameters are taken as ## variable (free). Other paramters are treated as ## fixed constants ## fixed index vector, which parameters to keep fixed ## ## RESULTS: ## a list of class "maxim": ## maximum function value at maximum ## estimate the parameter value at maximum ## gradient gradient ## hessian Hessian ## code integer code of success: ## 1 - gradient close to zero ## 2 - successive values within tolerance limit ## 3 - could not find a higher point (step error) ## 4 - iteration limit exceeded ## 100 - initial value out of range ## message character message describing the code ## last.step only present if code == 3 (step error). A list with following components: ## theta0 - parameter value which led to the error ## f0 - function value at these parameter values ## climb - the difference between theta0 and the new approximated parameter value (theta1) ## activePar - logical vector, which parameters are active (not constant) ## activePar logical vector, which parameters were treated as free (resp fixed) ## iterations number of iterations ## type "Newton-Raphson maximisation" argNames <- c( "fn", "grad", "hess", "start", "print.level", "tol", "reltol", "gradtol", "steptol", "iterlim", "activePar", "fixed" ) checkFuncArgs( fn, argNames, "fn", "maxNR" ) if( !is.null( grad ) ) { checkFuncArgs( grad, argNames, "grad", "maxNR" ) } if( !is.null( hess ) ) { checkFuncArgs( hess, argNames, "hess", "maxNR" ) } ## establish the active parameters. Internally, we just use 'activePar' fixed <- prepareFixed( start = start, activePar = activePar, fixed = fixed ) if(is.null(constraints)) { result <- maxBFGSRCompute(fn=logLikAttr, fnOrig = fn, gradOrig = grad, hessOrig = hess, start=start, print.level=print.level, tol=tol, reltol=reltol, gradtol=gradtol, steptol=steptol, lambdatol=lambdatol, qrtol=qrtol, iterlim=iterlim, finalHessian=finalHessian, fixed=fixed, ...) } else { if(identical(names(constraints), c("ineqA", "ineqB"))) { stop("Inequality constraints not implemented for maxNR") } else if(identical(names(constraints), c("eqA", "eqB"))) { # equality constraints: A %*% beta + B = 0 result <- sumt(fn=fn, grad=grad, hess=hess, start=start, maxRoutine=maxBFGSR, constraints=constraints, print.level=print.level, tol=tol, reltol=reltol, gradtol=gradtol, steptol=steptol, lambdatol=lambdatol, qrtol=qrtol, iterlim=iterlim, fixed=fixed, ...) } else { stop("maxBFGS only supports the following constraints:\n", "constraints=list(ineqA, ineqB)\n", "\tfor A %*% beta + B >= 0 linear inequality constraints\n", "current constraints:", paste(names(constraints), collapse=" ")) } } return( result ) } maxLik/R/nParam.R0000644000176000001440000000040211711073172013265 0ustar ripleyusers## Return the #of parameters of model nParam.maxim <- function(x, free=FALSE, ...) { if(!inherits(x, "maxim")) { stop("'nParam.maxim' called on non-'maxim' object") } if(free) sum( activePar( x ) ) else length( x$estimate ) } maxLik/R/AIC.R0000644000176000001440000000021211066630507012446 0ustar ripleyusers## Akaike (and other) information criteria AIC.maxLik <- function(object, ..., k = 2) -2*logLik(object) + k*nParam(object, free=TRUE) maxLik/R/maxBHHH.R0000644000176000001440000000176411415363066013307 0ustar ripleyusersmaxBHHH <- function(fn, grad=NULL, hess=NULL, start, print.level=0, iterlim=100, finalHessian="BHHH", ...) { ## hess: Hessian, not used, for compatibility with the other methods ## check if arguments of user-provided functions have reserved names argNames <- c( "fn", "grad", "hess", "start", "print.level", "iterlim" ) checkFuncArgs( fn, argNames, "fn", "maxBHHH" ) if( !is.null( grad ) ) { checkFuncArgs( grad, argNames, "grad", "maxBHHH" ) } if( !is.null( hess ) ) { checkFuncArgs( hess, argNames, "hess", "maxBHHH" ) } ## using the Newton-Raphson algorithm with BHHH method for Hessian a <- maxNR( fn=fn, grad = grad, hess = hess, start=start, iterlim=iterlim, print.level=print.level, finalHessian = finalHessian, bhhhHessian = TRUE, ...) a$type = "BHHH maximisation" invisible(a) } maxLik/R/nObs.R0000644000176000001440000000131611400730216012747 0ustar ripleyusers## Return #of observations for models nObs.maxLik <- function(x, ...) { if( is.null( x$gradientObs ) ) { stop( "cannot return the number of observations:", " please re-run 'maxLik' and", " provide a gradient function using argument 'grad' or", " (if no gradient function is specified) a log-likelihood function", " using argument 'logLik'", " that return the gradients or log-likelihood values, respectively,", " at each observation" ) } else if( is.matrix( x$gradientObs ) ) { return( nrow( x$gradientObs ) ) } else { stop( "internal error: component 'gradientObs' is not a matrix.", " Please contact the developers." ) } } maxLik/R/maxOptim.R0000644000176000001440000002455112230721772013663 0ustar ripleyusersmaxOptim <- function(fn, grad, hess, start, method, fixed, print.level, iterlim, constraints, tol, reltol, finalHessian=TRUE, parscale, alpha = NULL, beta = NULL, gamma = NULL, temp = NULL, tmax = NULL, random.seed = NULL, cand = NULL, ...) { ## Wrapper of optim-based optimization methods ## ## finalHessian: how (and if) to calculate the final Hessian: ## FALSE not calculate ## TRUE use analytic/numeric Hessian ## bhhh/BHHH use information equality approach ## if( method == "Nelder-Mead" ) { maxMethod <- "maxNM" } else { maxMethod <- paste( "max", method, sep = "" ) } argNames <- c( "fn", "grad", "hess", "start", "print.level", "iterlim", "constraints", "tol", "reltol", "parscale", "alpha", "beta", "gamma", "temp", "tmax" ) checkFuncArgs( fn, argNames, "fn", maxMethod ) if( !is.null( grad ) ) { checkFuncArgs( grad, argNames, "grad", maxMethod ) } if( !is.null( hess ) ) { checkFuncArgs( hess, argNames, "hess", maxMethod ) } ## check argument 'fixed' fixed <- prepareFixed( start = start, activePar = NULL, fixed = fixed ) message <- function(c) { switch(as.character(c), "0" = "successful convergence", "1" = "iteration limit exceeded", "10" = "degeneracy in Nelder-Mead simplex", "51" = "warning from the 'L-BFGS-B' method; see the corresponding component 'message' for details", "52" = "error from the 'L-BFGS-B' method; see the corresponding component 'message' for details" ) } ## initialize variables for saving gradients provided as attributes ## and the corresponding parameter values lastFuncGrad <- NULL lastFuncParam <- NULL ## strip possible SUMT parameters and call the function thereafter environment( callWithoutSumt ) <- environment() maximType <- paste( method, "maximisation" ) parscale <- rep(parscale, length.out=length(start)) control <- list(trace=max(print.level, 0), REPORT=1, fnscale=-1, reltol=reltol, maxit=iterlim, parscale=parscale[ !fixed ], alpha=alpha, beta=beta, gamma=gamma, temp=temp, tmax=tmax ) f1 <- callWithoutSumt( start, "logLikFunc", fnOrig = fn, gradOrig = grad, hessOrig = hess, ...) if(is.na( f1)) { result <- list(code=100, message=maximMessage("100"), iterations=0, type=maximType) class(result) <- "maxim" return(result) } if(print.level > 2) { cat("Initial function value:", f1, "\n") } hasGradAttr <- !is.null( attr( f1, "gradient" ) ) if( hasGradAttr && !is.null( grad ) ) { grad <- NULL warning( "the gradient is provided both as attribute 'gradient' and", " as argument 'grad': ignoring argument 'grad'" ) } hasHessAttr <- !is.null( attr( f1, "hessian" ) ) if( hasHessAttr && !is.null( hess ) ) { hess <- NULL warning( "the Hessian is provided both as attribute 'hessian' and", " as argument 'hess': ignoring argument 'hess'" ) } if( method == "BFGS" ) { G1 <- callWithoutSumt( start, "logLikGrad", fnOrig = fn, gradOrig = grad, hessOrig = hess, ...) if(print.level > 2) { cat("Initial gradient value:\n") print(G1) } if(any(is.na(G1))) { stop("NA in the initial gradient") } if(any(is.infinite(G1))) { stop("Infinite initial gradient") } if(length(G1) != length(start)) { stop( "length of gradient (", length(G1), ") not equal to the no. of parameters (", length(start), ")" ) } } ## function to return the gradients (BFGS) or the new candidate point (SANN) if( method == "BFGS" ) { gradOptim <- logLikGrad } else if( method == "SANN" ) { if( is.null( cand ) ) { gradOptim <- NULL } else { gradOptim <- function( theta, fnOrig, gradOrig, hessOrig, start, fixed, ... ) { return( cand( theta, ... ) ) } } } else if( method == "CG" ) { gradOptim <- logLikGrad } else if( method == "Nelder-Mead" ) { gradOptim <- NULL } else { stop( "internal error: unknown method '", method, "'" ) } ## A note about return value: ## We can the return from 'optim' in a object of class 'maxim'. ## However, as 'sumt' already returns such an object, we return the ## result of 'sumt' directly, without the canning if(is.null(constraints)) { result <- optim( par = start[ !fixed ], fn = logLikFunc, control = control, method = method, gr = gradOptim, fnOrig = fn, gradOrig = grad, hessOrig = hess, start = start, fixed = fixed, ... ) resultConstraints <- NULL } else { ## linear equality and inequality constraints # inequality constraints: A %*% beta + B >= 0 if(identical(names(constraints), c("ineqA", "ineqB"))) { nra <- nrow(constraints$ineqA) nrb <- nrow(as.matrix(constraints$ineqB)) ncb <- ncol(as.matrix(constraints$ineqB)) if(ncb != 1) { stop("Inequality constraint B must be a vector ", "(or Nx1 matrix). Currently ", ncb, " columns") } if(length(dim(constraints$ineqA)) != 2) { stop("Inequality constraint A must be a matrix\n", "Current dimension", dim(constraints$ineqA)) } if(ncol(constraints$ineqA) != length(start)) { stop("Inequality constraint A must have the same ", "number of columns as length of the parameter.\n", "Currently ", ncol(constraints$ineqA), " and ", length(start), ".") } if(ncol(constraints$ineqA) != length(start)) { stop("Inequality constraint A cannot be matrix multiplied", " with the start value.\n", "A is a ", nrow(constraints$ineqA), "x", ncol(constraints$ineqA), " matrix,", " start value has lenght ", length(start)) } if(nra != nrb) { stop("Inequality constraints A and B suggest different number ", "of constraints: ", nra, " and ", nrb) } result <- constrOptim2( theta = start, f = logLikFunc, grad = gradOptim, ineqA=constraints$ineqA, ineqB=constraints$ineqB, control=control, method = method, fnOrig = fn, gradOrig = grad, hessOrig = hess, fixed = fixed, start=start, ...) # 'start' argument is needed for adding fixed parameters later in the call chain resultConstraints <- list(type="constrOptim", barrier.value=result$barrier.value, outer.iterations=result$outer.iterations ) } else if(identical(names(constraints), c("eqA", "eqB"))) { # equality constraints: A %*% beta + B = 0 argList <- list(fn=fn, grad=grad, hess=hess, start=start, fixed = fixed, maxRoutine = get( maxMethod ), constraints=constraints, print.level=print.level, iterlim = iterlim, tol = tol, reltol = reltol, parscale = parscale, alpha = alpha, beta= beta, gamma = gamma, temp = temp, tmax = tmax, random.seed = random.seed, cand = cand, ...) result <- do.call( sumt, argList[ !sapply( argList, is.null ) ] ) return(result) # this is already maxim object } else { stop( maxMethod, " only supports the following constraints:\n", "constraints=list(ineqA, ineqB)\n", "\tfor A %*% beta + B >= 0 linear inequality constraints\n", "current constraints:", paste(names(constraints), collapse=" ")) } } # estimates (including fixed parameters) estimate <- start estimate[ !fixed ] <- result$par ## Calculate the final gradient gradient <- callWithoutSumt( estimate, "logLikGrad", fnOrig = fn, gradOrig = grad, hessOrig = hess, sumObs = FALSE, ... ) if(observationGradient(gradient, length(start))) { gradientObs <- gradient gradient <- colSums(as.matrix(gradient )) } else { gradientObs <- NULL } ## calculate (final) Hessian if(tolower(finalHessian) == "bhhh") { if(!is.null(gradientObs)) { hessian <- - crossprod( gradientObs ) attr(hessian, "type") <- "BHHH" } else { hessian <- NULL warning("For computing the final Hessian by 'BHHH' method, the log-likelihood or gradient must be supplied by observations") } } else if(finalHessian != FALSE) { hessian <- as.matrix( logLikHess( estimate, fnOrig = fn, gradOrig = grad, hessOrig = hess, ... ) ) } else { hessian <- NULL } if( !is.null( hessian ) ) { rownames( hessian ) <- colnames( hessian ) <- names( estimate ) } result <- list( maximum=result$value, estimate=estimate, gradient=drop(gradient), # ensure the final (non-observation) gradient is just a vector hessian=hessian, code=result$convergence, message=paste(message(result$convergence), result$message), last.step=NULL, fixed = fixed, iterations=result$counts[1], type=maximType, constraints=resultConstraints ) if( exists( "gradientObs" ) ) { result$gradientObs <- gradientObs } class(result) <- "maxim" return(result) } maxLik/R/condiNumber.R0000644000176000001440000000241211354136206014320 0ustar ripleyusers### condiNumber: print matrix' condition number adding columns one by one. ### In this way user may investigate the which columns cause problems with singularity condiNumber <- function(x, ...) UseMethod("condiNumber") condiNumber.default <- function(x, exact=FALSE, norm=FALSE, print.level=1, ...) { ## x: a matrix, condition number of which are to be printed ## exact: whether the condition number have to be exact or approximated (see 'kappa') ## norm: whether to normalise the matrix' columns. ## print.level: whether to print the condition numbers while calculating. Useful for interactive testing. if(dim(x)[2] > dim(x)[1]) { warning(paste(dim(x)[1], "rows and", dim(x)[2], "columns, use transposed matrix")) x <- t(x) } cn <- numeric(ncol(x)) if(norm) { # Now normalise column vectors x <- apply(x, 2, FUN=function(v) v/sqrt(sum(v*v))) } for(i in seq(length=ncol(x))) { m <- x[,1:i] cn[i] <- kappa(m, exact=exact) if(print.level > 0) cat(colnames(x)[i], "\t", cn[i], "\n") } names(cn) <- colnames(x) invisible(cn) } condiNumber.maxLik <- function(x, ...) condiNumber.default(hessian(x, ...)[activePar(x), activePar(x),drop=FALSE]) maxLik/R/sumt.R0000644000176000001440000001635011624471463013060 0ustar ripleyusers### SUMT function borrowed from 'clue' package ### ### Adapted for linear constraints sumt <- function(fn, grad=NULL, hess=NULL, start, maxRoutine, constraints, SUMTTol = sqrt(.Machine$double.eps), # difference between estimates for successive outer iterations SUMTPenaltyTol = sqrt(.Machine$double.eps), # maximum allowed penalty SUMTQ = 10, SUMTRho0 = NULL, print.level=0, SUMTMaxIter=100, ...) { ## constraints list w/components eqA and eqB. Maximization will ## be performed wrt to the constraint ## A %*% theta + B = 0 ## The user must ensure the matrices are in correct ## form ## maxSUMTiter how many SUMT iterations to perform max ## penalty <- function(theta) { p <- A %*% theta + B sum(p*p) } ## Penalty gradient and Hessian are used only if corresponding function ## for the likelihood function is provided gPenalty <- function(theta) { 2*(t(theta) %*% t(A) %*% A - t(B) %*% A) } hessPenalty <- function(theta) { 2*t(A) %*% A } ## strip possible arguments of maxRoutine and call the function thereafter callWithoutMaxArgs <- function(theta, fName, ...) { return( callWithoutArgs( theta, fName = fName, args = names(formals(maxRoutine)), ... ) ) } SUMTMessage <- function(code) { message <- switch(code, "1" = "penalty close to zero", "2" = "successive function values within tolerance limit", "4" = "Outer iteration limit exceeded (increase SUMTMaxIter ?).", paste("Code", code)) return(message) } ## the penalized objective function Phi <- function(theta, ...) { llVal <- callWithoutMaxArgs( theta, "logLikFunc", fnOrig = fn, gradOrig = grad, hessOrig = hess, sumObs = FALSE, ... ) llVal <- llVal - rho * penalty( theta ) / length( llVal ) g <- attributes( llVal )$gradient if( !is.null( g ) ) { if( is.matrix( g ) ) { g <- g - matrix( rep( rho * gPenalty( theta ) / nrow( g ), each = nrow( g ) ), nrow = nrow( g ), ncol = ncol( g ) ) } else { g <- g - rho * gPenalty( theta ) } attributes( llVal )$gradient <- g } h <- attributes( llVal )$hessian if( !is.null( h ) ) { attributes( llVal )$hessian <- h - rho * hessPenalty( theta ) } return( llVal ) } ## gradient of the penalized objective function if(!is.null(grad)) { gradPhi<- function(theta, ...) { g <- grad(theta, ...) if(is.matrix(g)) { g <- g - matrix( rep( rho * gPenalty( theta ) / nrow( g ), each = nrow( g ) ), nrow = nrow( g ), ncol = ncol( g ) ) } else { g <- g - rho * gPenalty( theta ) } return( g ) } } else { gradPhi <- NULL } ## Hessian of the penalized objective function if(!is.null(hess)) { hessPhi <- function(theta, ...) { return( hess(theta, ...) - rho*hessPenalty(theta) ) } } else { hessPhi <- NULL } ## A <- constraints$eqA B <- constraints$eqB ## ## Note also that currently we do not check whether optimization was ## "successful" ... ## ## Find a suitable inital value for rho if not specified if(is.null(SUMTRho0)) { rho <- 0 result <- maxRoutine(fn=Phi, grad=gradPhi, hess=hessPhi, start=start, print.level=max(print.level - 1, 0), ...) theta <- coef(result) # Note: this may be a bad idea, if unconstrained function is unbounded # from above. In that case rather specify SUHTRho0. if(print.level > 0) { cat("SUMT initial: rho = ", rho, ", function = ", callWithoutMaxArgs( theta, "logLikFunc", fnOrig = fn, gradOrig = grad, hessOrig = hess, ... ), ", penalty = ", penalty(theta), "\n") cat("Estimate:") print(theta) } ## Better upper/lower bounds for rho? rho <- max( callWithoutMaxArgs( theta, "logLikFunc", fnOrig = fn, gradOrig = grad, hessOrig = hess, ... ), 1e-3) / max(penalty(start), 1e-3) } ## if rho specified, simply pick that and use previous initial values else { rho <- SUMTRho0 theta <- start } ## iter <- 1L repeat { thetaOld <- theta result <- maxRoutine(fn=Phi, grad=gradPhi, hess=hessPhi, start=thetaOld, print.level=max(print.level - 1, 0), ...) theta <- coef(result) if(print.level > 0) { cat("SUMT iteration ", iter, ": rho = ", rho, ", function = ", callWithoutMaxArgs( theta, "logLikFunc", fnOrig = fn, gradOrig = grad, hessOrig = hess, ... ), ", penalty = ", penalty(theta), "\n", sep="") cat("Estimate:") print(theta) } if(max(abs(thetaOld - theta)) < SUMTTol) { SUMTCode <- 2 break } if(penalty(theta) < SUMTPenaltyTol) { SUMTCode <- 1 break } if(iter >= SUMTMaxIter) { SUMTCode <- 4 break } iter <- iter + 1L rho <- SUMTQ * rho } ## Now we replace the resulting gradient and Hessian with those, ## calculated on the original function llVal <- callWithoutMaxArgs( theta, "logLikFunc", fnOrig = fn, gradOrig = grad, hessOrig = hess, sumObs = FALSE, ... ) gradient <- attr( llVal, "gradient" ) if( is.null( gradient ) ) { gradient <- callWithoutMaxArgs( theta, "logLikGrad", fnOrig = fn, gradOrig = grad, hessOrig = hess, sumObs = FALSE, ... ) } if( !is.null( dim( gradient ) ) ) { if( nrow( gradient ) > 1 ) { gradientObs <- gradient } gradient <- colSums( gradient ) } else if( length( start ) == 1 && length( gradient ) > 1 ) { gradientObs <- matrix( gradient, ncol = 1 ) gradient <- sum( gradient ) } result$gradient <- gradient names( result$gradient ) <- names( result$estimate ) result$hessian <- callWithoutMaxArgs( theta, "logLikHess", fnOrig = fn, gradOrig = grad, hessOrig = hess, ... ) result$constraints <- list(type="SUMT", barrier.value=penalty(theta), code=SUMTCode, message=SUMTMessage(SUMTCode), outer.iterations=iter ) if( exists( "gradientObs" ) ) { result$gradientObs <- gradientObs colnames( result$gradientObs ) <- names( result$estimate ) } if( result$constraints$barrier.value > 0.001 ) { warning( "problem in imposing equality constraints: the constraints", " are not satisfied (barrier value = ", result$constraints$barrier.value, "). Try setting 'SUMTTol' to 0" ) } return(result) } maxLik/R/estfun.maxLik.R0000644000176000001440000000104511373163360014606 0ustar ripleyusersestfun.maxLik <- function( x, ... ) { if( is.null( x$gradientObs ) ) { stop( "cannot return the gradients of the log-likelihood function", " evaluated at each observation: please re-run 'maxLik' and", " provide a gradient function using argument 'grad' or", " (if no gradient function is specified) a log-likelihood function", " using argument 'logLik'", " that return the gradients or log-likelihood values, respectively,", " at each observation" ) } return( x$gradientObs ) } maxLik/R/observationGradient.R0000644000176000001440000000053711716501626016076 0ustar ripleyusers ### The function tests whether a given gradient is given ### observation-wise. It tests essentially the # of rows ### in the gradient observationGradient <- function(g, nParam) { if(is.null(dim(g))) { if(nParam == 1 & length(g) > 1) return(TRUE) return(FALSE) } if(nrow(g) == 1) return(FALSE) return(TRUE) } maxLik/R/maxSANN.R0000644000176000001440000000344311411140475013322 0ustar ripleyusersmaxSANN <- function(fn, grad=NULL, hess=NULL, start, fixed = NULL, print.level=0, iterlim=10000, constraints = NULL, tol=1e-8, reltol=tol, finalHessian=TRUE, cand = NULL, temp=10, tmax=10, parscale=rep(1, length=length(start)), random.seed = 123, ... ) { ## Wrapper of optim-based 'SANN' optimization ## ## contraints constraints to be passed to 'constrOptim' ## finalHessian: how (and if) to calculate the final Hessian: ## FALSE not calculate ## TRUE use analytic/numeric Hessian ## bhhh/BHHH use information equality approach ## ## ... : further arguments to fn() ## ## Note: grad and hess are for compatibility only, SANN uses only fn values # save seed of the random number generator if( exists( ".Random.seed" ) ) { savedSeed <- .Random.seed } # set seed for the random number generator (used by 'optim( method="SANN" )') set.seed( random.seed ) # restore seed of the random number generator on exit # (end of function or error) if( exists( "savedSeed" ) ) { on.exit( assign( ".Random.seed", savedSeed, envir = sys.frame() ) ) } else { on.exit( rm( .Random.seed, envir = sys.frame() ) ) } result <- maxOptim( fn = fn, grad = grad, hess = hess, start = start, method = "SANN", fixed = fixed, print.level = print.level, iterlim = iterlim, constraints = constraints, tol = tol, reltol = reltol, finalHessian=finalHessian, parscale = parscale, temp = temp, tmax = tmax, random.seed = random.seed, cand = cand, ... ) return(result) } maxLik/R/numericHessian.R0000644000176000001440000000543711742216552015046 0ustar ripleyusersnumericHessian <- function(f, grad=NULL, t0, eps=1e-6, fixed, ...) { a <- f(t0, ...) if(is.null(grad)) { numericNHessian( f = f, t0 = t0, eps = eps, fixed=fixed, ...) # gradient not provided -> everything numerically } else { numericGradient( f = grad, t0 = t0, eps = eps, fixed=fixed, ...) # gradient is provided -> Hessian is grad grad } } numericNHessian <- function( f, t0, eps=1e-6, fixed, ...) { ## Numeric Hessian without gradient ## Assume f() returns a scalar ## ## fixed calculate the Hessian only for the non-fixed parameters warnMessage <- function(theta, value) { ## issue a warning if the function value at theta is not a scalar max.print <- 10 if(length(value) != 1) { warnMsg <- "Function value at\n" warnMsg <- c(warnMsg, paste(format(theta[seq(length=min(max.print,length(theta)))]), collapse=" "), "\n") if(max.print < length(theta)) warnMsg <- c(warnMsg, "...\n") warnMsg <- c(warnMsg, " =\n") warnMsg <- c(warnMsg, paste(format(value[seq(length=min(max.print,length(value)))]), collapse=" "), "\n") if(max.print < length(value)) warnMsg <- c(warnMsg, "...\n") warnMsg <- c(warnMsg, "but numeric Hessian only works on numeric scalars\n", "Component set to NA") return(warnMsg) } if(!is.numeric(value)) stop("The function value must be numeric") return(NULL) } f00 <- f( t0, ...) if(!is.null(msg <- warnMessage(t0, f00))) { warning(msg) f00 <- NA } eps2 <- eps*eps N <- length( t0) H <- matrix(NA, N, N) if(missing(fixed)) fixed <- rep(FALSE, length(t0)) for( i in 1:N) { if(fixed[i]) next for( j in 1:N) { if(fixed[j]) next t01 <- t0 t10 <- t0 t11 <- t0 # initial point t01[i] <- t01[i] + eps t10[j] <- t10[j] + eps t11[i] <- t11[i] + eps t11[j] <- t11[j] + eps f01 <- f( t01, ...) if(!is.null(msg <- warnMessage(t01, f01))) { warning(msg) f01 <- NA } f10 <- f( t10, ...) if(!is.null(msg <- warnMessage(t10, f10))) { warning(msg) f10 <- NA } f11 <- f( t11, ...) if(!is.null(msg <- warnMessage(t11, f11))) { warning(msg) f11 <- NA } H[i,j] <- ( f11 - f01 - f10 + f00)/eps2 } } return( H ) } maxLik/R/checkBhhhGrad.R0000644000176000001440000000645411742216552014536 0ustar ripleyuserscheckBhhhGrad <- function( g, theta, analytic, fixed=NULL) { ## This function controls if the user-supplied analytic or ## numeric gradient of the right dimension. ## If not, signals an error. ## ## analytic: logical, do we have a user-supplied analytic ## gradient? if(is.null(fixed)) { activePar <- rep(T, length=length(theta)) } else { activePar <- !fixed } if( analytic ) { ## Gradient supplied by the user. ## Check whether the gradient has enough rows (about enough ## observations in data) if( !is.matrix( g ) ) { stop("gradient is not a matrix but of class '", class( g ), "';\n", "the BHHH method requires that the gradient function\n", "(argument 'grad') returns a numeric matrix,\n", "where each row must correspond to the gradient(s)\n", "of the log-likelihood function at an individual\n", "(independent) observation and each column must\n", "correspond to a parameter" ) } else if( nrow( g ) < length( theta[activePar] ) ) { stop( "the matrix returned by the gradient function", " (argument 'grad') must have at least as many", " rows as the number of parameters (", length( theta ), "),", " where each row must correspond to the gradients", " of the log-likelihood function of an individual", " (independent) observation:\n", " currently, there are (is) ", length( theta ), " parameter(s)", " but the gradient matrix has only ", nrow( g ), " row(s)" ) } else if( ncol( g ) != length( theta ) ) { stop( "the matrix returned by the gradient function", " (argument 'grad') must have exactly as many columns", " as the number of parameters:\n", " currently, there are (is) ", length( theta ), " parameter(s)", " but the gradient matrix has ", ncol( g ), " columns" ) } } else { ## numeric gradient ## Check whether the gradient has enough rows. This is the case ## if and only if loglik has enough rows, hence the error message ## about loglik. if( !is.matrix( g ) || nrow( g ) == 1 ) { stop( "if the gradients (argument 'grad') are not provided by the user,", " the BHHH method requires that the log-likelihood function", " (argument 'fn') returns a numeric vector,", " where each element must be the log-likelihood value corresponding", " to an individual (independent) observation" ) } if( nrow( g ) < length( theta ) ) { stop( "the vector returned by the log-likelihood function", " (argument 'fn') must have at least as many elements", " as the number of parameters,", " where each element must be the log-likelihood value corresponding", " to an individual (independent) observation:\n", " currently, there are (is) ", length( theta ), " parameter(s)", " but the log likelihood function return only ", nrow( g ), " element(s)" ) } } return( NULL ) } maxLik/R/print.maxLik.R0000644000176000001440000000064011414351266014436 0ustar ripleyusersprint.maxLik <- function( x, ... ) { cat("Maximum Likelihood estimation\n") cat(maximType(x), ", ", nIter(x), " iterations\n", sep="") cat("Return code ", returnCode(x), ": ", returnMessage(x), "\n", sep="") if(!is.null(x$estimate)) { cat("Log-Likelihood:", x$maximum ) cat( " (", sum( activePar( x ) ), " free parameter(s))\n", sep = "" ) cat("Estimate(s):", x$estimate, "\n" ) } } maxLik/R/checkFuncArgs.R0000644000176000001440000000215011256655004014563 0ustar ripleyuserscheckFuncArgs <- function( func, checkArgs, argName, funcName ) { ## is the 'func' a function? if( !is.function( func ) ) { stop( "argument '", argName, "' of function '", funcName, "' is not a function" ) } funcArgs <- names( formals( func ) ) if( length( funcArgs ) > 1 ) { a <- charmatch( funcArgs[ -1 ], checkArgs ) if( sum( !is.na( a ) ) == 1 ) { stop( "argument '", funcArgs[ -1 ][ !is.na( a ) ], "' of the function specified in argument '", argName, "' of function '", funcName, "' (partially) matches the argument names of function '", funcName, "'. Please change the name of this argument" ) } else if( sum( !is.na( a ) ) > 1 ) { stop( "arguments '", paste( funcArgs[ -1 ][ !is.na( a ) ], collapse = "', '" ), "' of the function specified in argument '", argName, "' of function '", funcName, "' (partially) match the argument names of function '", funcName, "'. Please change the names of these arguments" ) } } return( NULL ) } maxLik/R/hessian.R0000644000176000001440000000021011066630507013502 0ustar ripleyusers## Return Hessian of an object hessian <- function(x, ...) UseMethod("hessian") hessian.default <- function(x, ...) x$hessian maxLik/R/logLikGrad.R0000644000176000001440000000414511643562143014103 0ustar ripleyusers## gradient function: ## sum over possible individual gradients logLikGrad <- function(theta, fnOrig, gradOrig, hessOrig, start = NULL, fixed = NULL, sumObs = TRUE, gradAttr = NULL, ...) { # Argument "hessOrig" is just for compatibility with logLikHess() # argument "gradAttr" should be # - FALSE if the gradient is not provided as attribute of the log-lik value # - TRUE if the gradient is provided as attribute of the log-lik value # - NULL if this is not known theta <- addFixedPar( theta = theta, start = start, fixed = fixed, ...) if(!is.null(gradOrig)) { g <- gradOrig(theta, ...) } else if( isTRUE( gradAttr ) || is.null( gradAttr ) ) { if( exists( "lastFuncGrad" ) && exists( "lastFuncParam" ) ) { if( identical( theta, lastFuncParam ) ) { g <- lastFuncGrad } else { g <- "different parameters" } } else { g <- "'lastFuncGrad' or 'lastFuncParam' does not exist" } if( is.character( g ) ) { # do not call fnOrig() if 'lastFuncGrad' is NULL g <- attr( fnOrig( theta, ... ), "gradient" ) } } else { g <- NULL } if( is.null( g ) ) { g <- numericGradient(logLikFunc, theta, fnOrig = fnOrig, sumObs = sumObs, ...) } if( sumObs ) { ## We were requested a single (summed) gradient. Return a vector g <- sumGradients( g, length( theta ) ) names( g ) <- names( theta ) if( !is.null( fixed ) ) { g <- g[ !fixed ] } } else { ## we were requested individual gradients (if possible). Ensure g is a matrix if(observationGradient(g, length(theta))) { ## it was indeed by observations g <- as.matrix(g) colnames( g ) <- names( theta ) if( !is.null( fixed ) ) { g <- g[ , !fixed ] } } else { ## it wasn't g <- drop(g) names(g) <- names(theta) if( !is.null( fixed ) ) { g <- g[ !fixed ] } } } return( g ) } maxLik/R/logLik.maxLik.R0000644000176000001440000000030311066630507014520 0ustar ripleyusers### Methods for accessing loglik value maximum likelihood estimates logLik.summary.maxLik <- function( object, ...) object$loglik logLik.maxLik <- function( object, ...) object$maximum maxLik/R/maxNM.R0000644000176000001440000000236011411140475013072 0ustar ripleyusersmaxNM <- function(fn, grad=NULL, hess=NULL, start, fixed = NULL, print.level=0, iterlim=500, constraints=NULL, tol=1e-8, reltol=tol, finalHessian=TRUE, parscale=rep(1, length=length(start)), alpha=1, beta=0.5, gamma=2, ...) { ## Wrapper of optim-based 'Nelder-Mead' optimization ## ## contraints constraints to be passed to 'constrOptim' ## hessian: how (and if) to calculate the final Hessian: ## FALSE not calculate ## TRUE use analytic/numeric Hessian ## bhhh/BHHH use information equality approach ## ... : further arguments to fn() ## ## Note: grad and hess are for compatibility only, SANN uses only fn values result <- maxOptim( fn = fn, grad = grad, hess = hess, start = start, method = "Nelder-Mead", fixed = fixed, print.level = print.level, iterlim = iterlim, constraints = constraints, tol = tol, reltol = reltol, finalHessian=finalHessian, parscale = parscale, alpha = alpha, beta = beta, gamma = gamma, ... ) return(result) } maxLik/R/maxBFGS.R0000644000176000001440000000220011411140475013272 0ustar ripleyusersmaxBFGS <- function(fn, grad=NULL, hess=NULL, start, fixed = NULL, print.level=0, iterlim=200, constraints=NULL, tol=1e-8, reltol=tol, finalHessian=TRUE, parscale=rep(1, length=length(start)), ## sumt parameters ...) { ## Wrapper of optim-based 'BFGS' optimization ## ## contraints constraints to be passed to 'constrOptim' ## finalHessian: how (and if) to calculate the final Hessian: ## FALSE not calculate ## TRUE use analytic/numeric Hessian ## bhhh/BHHH use information equality approach ## ## ... further arguments to fn() and grad() result <- maxOptim( fn = fn, grad = grad, hess = hess, start = start, method = "BFGS", fixed = fixed, print.level = print.level, iterlim = iterlim, constraints = constraints, tol = tol, reltol = reltol, finalHessian=finalHessian, parscale = parscale, ... ) return(result) } maxLik/R/maxCG.R0000644000176000001440000000234711711073172013060 0ustar ripleyusersmaxCG <- function(fn, grad=NULL, hess=NULL, start, fixed = NULL, print.level=0, iterlim=500, constraints=NULL, tol=1e-8, reltol=tol, finalHessian=TRUE, parscale=rep(1, length=length(start)), alpha=1, beta=0.5, gamma=2, ...) { ## Wrapper of optim-based 'Nelder-Mead' optimization ## ## contraints constraints to be passed to 'constrOptim' ## hessian: how (and if) to calculate the final Hessian: ## FALSE not calculate ## TRUE use analytic/numeric Hessian ## bhhh/BHHH use information equality approach ## ... : further arguments to fn() ## ## Note: grad and hess are for compatibility only, SANN uses only fn values result <- maxOptim( fn = fn, grad = grad, hess = hess, start = start, method = "CG", fixed = fixed, print.level = print.level, iterlim = iterlim, constraints = constraints, tol = tol, reltol = reltol, finalHessian=finalHessian, parscale = parscale, alpha = alpha, beta = beta, gamma = gamma, ... ) return(result) } maxLik/R/returnMessage.R0000644000176000001440000000036011225430225014672 0ustar ripleyusers returnMessage <- function(x, ...) UseMethod("returnMessage") returnMessage.default <- function(x, ...) x$returnMessage returnMessage.maxim <- function(x, ...) x$message returnMessage.maxLik <- function(x, ...) x$message maxLik/R/maximType.R0000644000176000001440000000022111201122277014016 0ustar ripleyusersmaximType <- function(x) UseMethod("maximType") maximType.default <- function(x) x$maximType maximType.maxim <- function(x) x$type maxLik/R/callWithoutArgs.R0000644000176000001440000000051211355554711015174 0ustar ripleyusers## strip arguments "args" and call the function with name "fName" thereafter callWithoutArgs <- function(theta, fName, args, ...) { f <- match.call() f[ args ] <- NULL f[[1]] <- as.name(fName) names(f)[2] <- "" f[["fName"]] <- NULL f[["args"]] <- NULL f1 <- eval(f, sys.frame(sys.parent())) return( f1 ) } maxLik/R/numericGradient.R0000644000176000001440000000452511742216552015206 0ustar ripleyusersnumericGradient <- function(f, t0, eps=1e-6, fixed, ...) { ## numeric gradient of a vector-valued function ## f function, return Nval x 1 vector of values ## t0 NPar x 1 vector of parameters ## fixed calculate the gradient based on these parameters only ## return: ## NvalxNPar matrix, gradient ## gradient along parameters which are not active are NA warnMessage <- function(theta, value, i) { ## issue a warning if the function value at theta is not a scalar max.print <- 10 if(length(value) != nVal) { warnMsg <- "Function value at\n" warnMsg <- c(warnMsg, paste(format(theta[seq(length=min(max.print,length(theta)))]), collapse=" "), "\n") if(max.print < length(theta)) warnMsg <- c(warnMsg, "...\n") warnMsg <- c(warnMsg, " =\n") warnMsg <- c(warnMsg, paste(format(value[seq(length=min(max.print,length(value)))]), collapse=" "), "\n") if(max.print < length(value)) warnMsg <- c(warnMsg, "...\n") warnMsg <- c(warnMsg, "(length ", length(value), ") does not conform with ", "the length at original value ", nVal, "\n") warnMsg <- c(warnMsg, "Component ", i, " set to NA") return(warnMsg) } if(!all(is.na(value)) & !is.numeric(value)) stop("The function value must be numeric for 'numericGradient'") return(NULL) } NPar <- length(t0) nVal <- length(f0 <- f(t0, ...)) grad <- matrix(NA, nVal, NPar) row.names(grad) <- names(f0) colnames(grad) <- names(t0) if(missing(fixed)) fixed <- rep(FALSE, NPar) for(i in 1:NPar) { if(fixed[i]) next t2 <- t1 <- t0 t1[i] <- t0[i] - eps/2 t2[i] <- t0[i] + eps/2 ft1 <- f(t1, ...) ft2 <- f(t2, ...) ## give meaningful error message if the functions give vectors ## of different length at t1, t2 if(!is.null(msg <- warnMessage(t1, ft1, i))) { warning(msg) ft1 <- NA } if(!is.null(msg <- warnMessage(t2, ft2, i))) { warning(msg) ft2 <- NA } grad[,i] <- (ft2 - ft1)/eps } return(grad) } maxLik/R/callWithoutSumt.R0000644000176000001440000000032411355554711015231 0ustar ripleyusers## strip possible SUMT parameters and call the function thereafter callWithoutSumt <- function(theta, fName, ...) { return( callWithoutArgs( theta, fName = fName, args = names(formals(sumt)), ... ) ) } maxLik/R/maximMessage.R0000644000176000001440000000211411643562143014476 0ustar ripleyusersmaximMessage <- function(code) { message <- switch(code, "1" = "gradient close to zero", "2" = "successive function values within tolerance limit", "3" = paste("Last step could not find a value above the", "current.\nBoundary of parameter space?", " \nConsider switching to a more robust optimisation method temporarily."), "4" = "Iteration limit exceeded.", "5" = "Infinite value", "6" = "Infinite gradient", "7" = "Infinite Hessian", "8" = "Relative change of the function within relative tolerance", "9" = paste("Gradient did not change,", "cannot improve BFGS approximation for the Hessian.\n", "Use different optimizer and/or analytic gradient."), "100" = "Initial value out of range.", paste("Code", code)) return(message) } maxLik/R/maxLik.R0000644000176000001440000000616411711073172013307 0ustar ripleyusersmaxLik <- function(logLik, grad=NULL, hess=NULL, start, method, constraints=NULL, ...) { ## Maximum Likelihood estimation. ## ## Newton-Raphson maximisation ## Parameters: ## logLik log-likelihood function. First argument must be the vector of parameters. ## grad gradient of log-likelihood. If NULL, numeric gradient is used. Must return either ## * vector, length=nParam ## * matrix, dim=c(nObs, 1). Treated as vector ## * matrix, dim=c(nObs, nParam). In this case the rows are simply ## summed (useful for maxBHHH). ## hess Hessian function (numeric used if NULL) ## start initial vector of parameters (eventually w/names) ## method maximisation method (Newton-Raphson) ## constraints constrained optimization: a list (see below) ## ... additional arguments for the maximisation routine ## ## RESULTS: ## list of class c("maxLik", "maxim"). This is in fact equal to class "maxim", just the ## methods are different. ## maximum function value at maximum ## estimate the parameter value at maximum ## gradient gradient ## hessian Hessian ## code integer code of success, depends on the optimization ## method ## message character message describing the code ## type character, type of optimization ## ## there may be more components, depending on the choice of ## the algorith. ## argNames <- c( "logLik", "grad", "hess", "start", "method", "constraints" ) checkFuncArgs( logLik, argNames, "logLik", "maxLik" ) if( !is.null( grad ) ) { checkFuncArgs( grad, argNames, "grad", "maxLik" ) } if( !is.null( hess ) ) { checkFuncArgs( hess, argNames, "hess", "maxLik" ) } ## Constrained optimization. We can two possibilities: ## * linear equality constraints ## * linear inequality constraints ## if(missing(method)) { if(is.null(constraints)) { method <- "nr" } else if(identical(names(constraints), c("ineqA", "ineqB"))) { if(is.null(grad)) method <- "Nelder-Mead" else method <- "BFGS" } else method <- "nr" } maxRoutine <- switch(tolower(method), "newton-raphson" =, "nr" = maxNR, "bfgs" = maxBFGS, "bfgsr" =, "bfgs-r" = maxBFGSR, "bhhh" = maxBHHH, "conjugate-gradient" =, "cg" = maxCG, "nelder-mead" =, "nm" = maxNM, "sann" = maxSANN, stop( "Maxlik: unknown maximisation method ", method ) ) result <- maxRoutine(fn=logLik, grad=grad, hess=hess, start=start, constraints=constraints, ...) class(result) <- c("maxLik", class(result)) result } maxLik/R/summary.maxLik.R0000644000176000001440000000574111711073172015003 0ustar ripleyusersprint.summary.maxLik <- function( x, ... ) { cat("--------------------------------------------\n") cat("Maximum Likelihood estimation\n") cat(maximType(x), ", ", nIter(x), " iterations\n", sep="") cat("Return code ", returnCode(x), ": ", returnMessage(x), "\n", sep="") if(!is.null(x$estimate)) { cat("Log-Likelihood:", x$loglik, "\n") cat(x$NActivePar, " free parameters\n") cat("Estimates:\n") printCoefmat(x$estimate) } if(!is.null(x$constraints)) { cat("\nWarning: constrained likelihood estimation.", "Inference is probably wrong\n") cat("Constrained optimization based on", x$constraints$type, "\n") if(!is.null(x$constraints$code)) cat("Return code:", x$constraints$code, "\n") # note: this is missing for 'constrOptim' if(!is.null(x$constraints$message)) cat(x$constraints$message, "\n") # note: this is missing for 'constrOptim' cat(x$constraints$outer.iterations, " outer iterations, barrier value", x$constraints$barrier.value, "\n") } cat("--------------------------------------------\n") } summary.maxLik <- function(object, eigentol=1e-12,... ) { ## object object of class "maxLik" ## ## RESULTS: ## list of class "summary.maxLik" with following components: ## maximum : function value at optimum ## estimate : estimated parameter values at optimum ## gradient : gradient at optimum ## code : code of convergence ## message : message, description of the code ## iterations : number of iterations ## type : type of optimisation ## if(!inherits(object, "maxLik")) stop("'summary.maxLik' called on a non-'maxLik' object") ## Here we should actually coerce the object to a 'maxLik' object, dropping all the subclasses... ## Instead, we force the program to use maxLik-related methods result <- object$maxim nParam <- length(coef.maxLik(object)) activePar <- activePar( object ) if((object$code < 100) & !is.null(coef.maxLik(object))) { # in case of infinity at initial values, the coefs are not provided t <- coef.maxLik(object)/stdEr.maxLik(object) p <- 2*pnorm( -abs( t)) t[!activePar(object)] <- NA p[!activePar(object)] <- NA results <- cbind("Estimate"=coef.maxLik(object), "Std. error"=stdEr.maxLik(object), "t value"=t, "Pr(> t)"=p) } else { results <- NULL } summary <- list(maximType=object$type, iterations=object$iterations, returnCode=object$code, returnMessage=object$message, loglik=object$maximum, estimate=results, fixed=!activePar, NActivePar=sum(activePar), constraints=object$constraints) class(summary) <- "summary.maxLik" summary } maxLik/R/summary.maxim.R0000644000176000001440000000554611414347111014671 0ustar ripleyusersprint.summary.maxim <- function( x, ... ) { summary <- x cat("--------------------------------------------\n") cat(summary$type, "\n") cat("Number of iterations:", summary$iterations, "\n") cat("Return code:", summary$code, "\n") cat(summary$message, "\n") if(!is.null(summary$unsucc.step)) { cat("Last (unsuccessful) step: function value", summary$unsucc.step$value, "\n") print(summary$unsucc.step$parameters) } if(!is.null(summary$estimate)) { cat("Function value:", summary$maximum, "\n") cat("Estimates:\n") print(summary$estimate) if(!is.null(summary$hessian)) { cat("Hessian:\n") print(summary$hessian) } } if(!is.null(summary$constraints)) { cat("\nConstrained optimization based on", summary$constraints$type, "\n") if(!is.null(summary$constraints$code)) cat("Return code:", summary$constraints$code, "\n") # note: this is missing for 'constrOptim' if(!is.null(summary$constraints$message)) cat(summary$constraints$message, "\n") # note: this is missing for 'constrOptim' cat(summary$constraints$outer.iterations, " outer iterations, barrier value", summary$constraints$barrier.value, "\n") } cat("--------------------------------------------\n") } summary.maxim <- function(object, hessian=FALSE, unsucc.step=FALSE, ... ) { ## The object of class "maxim" should include following components: ## maximum : function value at optimum ## estimate : matrix, estimated parameter values and gradient at optimum ## hessian : hessian ## code : code of convergence ## message : message, description of the code ## last.step : information about last step, if unsuccessful ## iterations : number of iterations ## type : type of optimisation ## nParam <- length(object$estimate) if(object$code == 3 & unsucc.step) { a <- cbind(object$last.step$theta0, object$last.step$theta1) dimnames(a) <- list(parameter=object$names, c("current par", "new par")) unsucc.step <- list(value=object$last.step$f0, parameters=a) } else { unsucc.step <- NULL } estimate <- cbind("estimate"=object$estimate, "gradient"=object$gradient) if(hessian) { H <- object$hessian } else { H <- NULL } summary <- list(maximum=object$maximum, type=object$type, iterations=object$iterations, code=object$code, message=object$message, unsucc.step=unsucc.step, estimate=estimate, hessian=H, constraints=object$constraints) class(summary) <- c("summary.maxim", class(summary)) summary } maxLik/R/prepareFixed.R0000644000176000001440000000572611360204225014476 0ustar ripleyusersprepareFixed <- function( start, activePar, fixed ) { nParam <- length( start ) ## establish the active parameters. if(!is.null(fixed)) { if(!is.null(activePar)) { if(!all(activePar)) { warning("Both 'activePar' and 'fixed' specified. 'activePar' ignored") } } if( is.logical( fixed ) ) { if( length ( fixed ) != length( start ) || !is.null( dim( fixed ) ) ) { stop( "if fixed parameters are specified using logical values,", " argument 'fixed' must be a logical vector", " with one element for each parameter", " (number of elements in argument 'start')" ) } activePar <- !fixed } else if( is.numeric( fixed ) ) { if( length ( fixed ) >= length( start ) || !is.null( dim( fixed ) ) ) { stop( "if fixed parameters are specified using their positions,", " argument 'fixed' must be a numerical vector", " with less elements than the number of parameters", " (number of elements in argument 'start'" ) } else if( min( fixed ) < 1 || max(fixed ) > length( start ) ) { stop( "if fixed parameters are specified using their positions,", " argument 'fixed' must have values between 1 and", " the total number of parameter", " (number of elements in argument 'start'" ) } activePar <- ! c( 1:length( start ) ) %in% fixed } else if( is.character( fixed ) ) { if( length ( fixed ) >= length( start ) || !is.null( dim( fixed ) ) ) { stop( "if fixed parameters are specified using their names,", " argument 'fixed' must be a vector of character strings", " with less elements than the number of parameters", " (number of elements in argument 'start'" ) } else if( is.null( names( start ) ) ) { stop( "if fixed parameters are specified using their names,", " parameter names have to be specified in argument 'start'" ) } else if( any( ! names( fixed ) %in% names( start ) ) ) { stop( "if fixed parameters are specified using their names,", " all parameter names specified in argument 'fixed'", " must be specified in argument 'start'" ) } activePar <- ! names( start ) %in% fixed } else { stop( "argument 'fixed' must be either a logical vector,", " a numeric vector, or a vector of character strings" ) } } else { if( is.null( activePar ) ) { activePar <- rep( TRUE, length( start ) ) } else if(is.numeric(activePar)) { a <- rep(FALSE, nParam) a[activePar] <- TRUE activePar <- a } } names( activePar ) <- names( start ) if( all( !activePar ) ){ stop( "At least one parameter must not be fixed", " using argument 'fixed'" ) } return( !activePar ) }maxLik/R/logLikHess.R0000644000176000001440000000357011415060733014124 0ustar ripleyusers## Calculate the Hessian of the function, either by analytic or numeric method logLikHess <- function( theta, fnOrig, gradOrig, hessOrig, start = NULL, fixed = NULL, gradAttr = NULL, hessAttr = NULL, ... ) { # argument "gradAttr" should be # - FALSE if the gradient is not provided as attribute of the log-lik value # - TRUE if the gradient is provided as attribute of the log-lik value # - NULL if this is not known # argument "hessAttr" should be # - FALSE if the Hessian is not provided as attribute of the log-lik value # - TRUE if the Hessian is provided as attribute of the log-lik value # - NULL if this is not known theta <- addFixedPar( theta = theta, start = start, fixed = fixed, ...) if(!is.null(hessOrig)) { hessian <- as.matrix(hessOrig( theta, ... )) } else { if( is.null( hessAttr ) || hessAttr || is.null( gradAttr ) ) { llVal <- fnOrig( theta, ... ) gradient <- attr( llVal, "gradient" ) hessian <- attr( llVal, "hessian" ) gradAttr <- !is.null( gradient ) hessAttr <- !is.null( hessian ) } if( !hessAttr ) { if( !is.null( gradOrig ) ) { grad2 <- logLikGrad } else if( gradAttr ) { grad2 <- function( theta, fnOrig = NULL, gradOrig = NULL, ... ) { gradient <- attr( fnOrig( theta, ... ), "gradient" ) gradient <- sumGradients( gradient, length( theta ) ) return( gradient ) } } else { grad2 <- NULL } hessian <- numericHessian( f = logLikFunc, grad = grad2, t0 = theta, fnOrig = fnOrig, gradOrig = gradOrig, ... ) } } rownames( hessian ) <- colnames( hessian ) <- names( theta ) if( !is.null( fixed ) ) { hessian <- hessian[ !fixed, !fixed, drop = FALSE ] } return( hessian ) } maxLik/R/logLikAttr.R0000644000176000001440000001411511745246743014146 0ustar ripleyuserslogLikAttr <- function(theta, fnOrig, gradOrig, hessOrig, fixed, sumObs = FALSE, returnHessian = TRUE, ...) { ## fixed: logical, which parameters to keep fixed ## # this function returns the log-likelihood value with gradient and Hessian as # attributes. If the log-likelihood function provided by the user does not add # these attributes, this functions uses the functions provided by the user # as arguments "grad" and "hess" or (if they are not provided) uses the # finite-difference method to obtain the gradient and Hessian # large initial indentation to be able to diff to previous version # that was defined in maxNR() / maxNR.R. ## number of parameters nParam <- length( theta ) ## value of log-likelihood function f <- fnOrig(theta, ...) ## if there are NA-s in the function value, do not ## compute gradient and Hessian if(any(is.na(f))) { attr(f, "gradient") <- NA attr(f, "hessian") <- NA return(f) } ## gradient of log-likelihood function gr <- attr( f, "gradient" ) if( is.null( gr ) ) { if( !is.null( gradOrig ) ) { gr <- gradOrig(theta, ...) } else { gr <- numericGradient(f = fnOrig, t0 = theta, fixed=fixed, ...) } } ## if there are NA-s in active gradient, do not compute Hessian if(is.matrix(gr)) { activeGr <- gr[,!fixed] } else { activeGr <- gr[!fixed] } if(any(is.na(activeGr))) { attr(f, "gradient") <- gr attr(f, "hessian") <- NA return(f) } # if gradients are observation-specific, they must be stored in a matrix if(observationGradient(gr, length(theta))) { gr <- as.matrix(gr) } ## Set gradients of fixed parameters to NA so that they are always NA ## (no matter if they are analytical or finite-difference gradients) if( is.null( dim( gr ) ) ) { gr[ fixed ] <- NA } else { gr[ , fixed ] <- NA } ## Hessian of log-likelihood function if( isTRUE( returnHessian ) ) { h <- attr( f, "hessian" ) if( is.null( h ) ) { if(!is.null(hessOrig)) { h <- as.matrix(hessOrig(theta, ...)) } else { llFunc <- function( theta, ... ) { return( sum( fnOrig( theta, ... ) ) ) } if( !is.null( attr( f, "gradient" ) ) ) { gradFunc <- function( theta, ... ) { return( sumGradients( attr( fnOrig( theta, ... ), "gradient" ), nParam ) ) } } else if( !is.null( gradOrig ) ) { gradFunc <- function( theta, ... ) { return( sumGradients( gradOrig( theta, ... ), nParam ) ) } } else { gradFunc <- NULL } h <- numericHessian(f = llFunc, grad = gradFunc, t0 = theta, fixed=fixed, ...) } } ## Check the correct size of Hessian. if((dim(h)[1] != nParam) | (dim(h)[2] != nParam)) { stop("Wrong hessian dimension. Needed ", nParam, "x", nParam, " but supplied ", dim(h)[1], "x", dim(h)[2]) } else { ## Set elements of the Hessian corresponding to the ## fixed parameters ## to NA so that they are always zero ## (no matter if they are ## calculated analytical or by the finite-difference ## method) h[ fixed, ] <- NA h[ , fixed ] <- NA } } else if( tolower( returnHessian ) == "bhhh" ) { ## We have to return BHHH Hessian. Check if it contains NA in free paramateres, otherwise ## return outer product as Hessian. h <- NULL # to keep track of what we have done if(is.null(dim(gr)) & any(is.na(gr[!fixed]))) { # NA gradient: do not check but send the wrong values to the optimizer. # The optimizer should take corresponding action, such as looking for another value h <- NA } else if(is.matrix(gr)) { if(any(is.na(gr[,!fixed]))) { # NA gradient: do not check but send the wrong values to the optimizer. # The optimizer should take corresponding action, such as looking for another value h <- NA } } if(is.null(h)) { # gr seems not to contain NA-s at free parameters checkBhhhGrad( g = gr, theta = theta, analytic = ( !is.null( attr( f, "gradient" ) ) || !is.null( gradOrig ) ), fixed=fixed) h <- - crossprod( gr ) } attr( h, "type" ) = "BHHH" } else { h <- NULL } ## sum log-likelihood values over observations (if requested) if( sumObs ) { f <- sumKeepAttr( f ) } ## sum gradients over observations (if requested) if( sumObs ) { ## We need just summed gradient gr <- sumGradients( gr, nParam ) } if( !is.null( gradOrig ) && !is.null( attr( f, "gradient" ) ) ) { attr( f, "gradBoth" ) <- TRUE } if( !is.null( hessOrig ) && !is.null( attr( f, "hessian" ) ) ) { attr( f, "hessBoth" ) <- TRUE } attr( f, "gradient" ) <- gr attr( f, "hessian" ) <- h return( f ) } maxLik/R/activePar.R0000644000176000001440000000061511414353631013774 0ustar ripleyusers## activePar: returns parameters which are free under maximisation (not fixed as constants) activePar <- function(x, ...) UseMethod("activePar") activePar.default <- function(x, ...) { if( !is.null( x$fixed ) ) { result <- !x$fixed } else { result <- x$activePar } if( is.null( result ) ) { result <- rep( TRUE, length( coef( x ) ) ) } return( result ) } maxLik/R/returnCode.R0000644000176000001440000000030611066630507014170 0ustar ripleyusers## Returns return (error) code returnCode <- function(x, ...) UseMethod("returnCode") returnCode.default <- function(x, ...) x$returnCode returnCode.maxLik <- function(x, ...) x$code maxLik/R/bread.maxLik.R0000644000176000001440000000011311373555433014360 0ustar ripleyusersbread.maxLik <- function( x, ... ) { return( vcov( x ) * nObs( x ) ) } maxLik/R/constrOptim2.R0000644000176000001440000001227612230721772014471 0ustar ripleyusers# This file is a modified copy of src/library/stats/R/constrOptim.R # Part of the R package, http://www.R-project.org ### This foutine is not intended for end-user use. ### API is subject to change. constrOptim2<-function(theta, f,grad=NULL, ineqA,ineqB, mu=0.0001,control=list(), method=if(is.null(grad)) "Nelder-Mead" else "BFGS", outer.iterations=100,outer.eps=0.00001, ...){ ## Optimize with inequality constraint using SUMT/logarithmic ## barrier ## ## start initial value of parameters, included the fixed ones ## ## This function has to operate with free parameter components ## only as 'optim' cannot handle ## fixed parameters. However, for computing constraints in ## 'R' and 'dR' we have to use the complete parameter vector. ## R <- function(thetaFree, thetaFree.old, ...) { ## Wrapper for the function. As this will be feed to the ## 'optim', we have to call it with free parameters only ## (thetaFree) and internally expand it to the full (theta) ## ## Were we called with 'fixed' argument in ... ? dotdotdot <- list(...) # can this be made better? fixed <- dotdotdot[["fixed"]] theta <- addFixedPar( theta = thetaFree, start = theta0, fixed = fixed) theta.old <- addFixedPar( theta = thetaFree.old, start = theta0, fixed = fixed) ineqA.theta<-ineqA%*%theta gi<- ineqA.theta + ineqB if(any(gi < 0)) ## at least one of the constraints not fulfilled return(NaN) gi.old <- ineqA%*%theta.old + ineqB bar <- sum(gi.old*log(gi) - ineqA.theta) # logarithmic barrier value: sum over # components if(!is.finite(bar)) bar<- -Inf result <- f(thetaFree, ...)-mu*bar # do not send 'fixed' and 'start' to the # function here -- we have already # expanded theta to the full parameter result } dR<-function(thetaFree, thetaFree.old, ...){ ## Wrapper for the function. As this will be feed to the 'optim', ## we have to call it with free parameters only (thetaFree) and ## internally expand it to the full (theta) ## ## Were we called with 'fixed' argument in ... ? dotdotdot <- list(...) # can this be made better? fixed <- dotdotdot[["fixed"]] theta <- addFixedPar( theta = thetaFree, start = theta0, fixed = fixed) theta.old <- addFixedPar( theta = thetaFree.old, start = theta0, fixed = fixed) ineqA.theta<-ineqA%*%theta gi<-drop(ineqA.theta + ineqB) gi.old<-drop(ineqA%*%theta.old + ineqB) dbar<-colSums( ineqA*gi.old/gi-ineqA) if(!is.null(fixed)) gr <- grad(thetaFree,...)- (mu*dbar)[!fixed] # grad only gives gradient for the free parameters in order to maintain # compatibility with 'optim'. Hence we compute barrier gradient # for the free parameters only as well. else gr <- grad(thetaFree,...)- (mu*dbar) return(gr) } if (!is.null(control$fnscale) && control$fnscale<0) mu <- -mu ##maximizing if(any(ineqA%*%theta + ineqB < 0)) stop("initial value not the feasible region") theta0 <- theta # inital value, for keeping the fixed params ## Were we called with 'fixed' argument in ... ? fixed <- list(...)[["fixed"]] if(!is.null(fixed)) thetaFree <- theta[!fixed] else thetaFree <- theta ## obj<-f(thetaFree, ...) r<-R(thetaFree,thetaFree,...) for(i in 1L:outer.iterations){ obj.old<-obj r.old<-r thetaFree.old<-thetaFree fun<-function(thetaFree,...){ R(thetaFree,thetaFree.old,...)} if( method == "SANN" ) { if( is.null( grad ) ) { gradient <- NULL } else { gradient <- grad } } else { gradient <- function(thetaFree, ...) { dR(thetaFree, thetaFree.old, ...) } } ## As 'optim' does not directly support fixed parameters, a<-optim(par=thetaFree.old,fn=fun,gr=gradient,control=control,method=method,...) r<-a$value if (is.finite(r) && is.finite(r.old) && abs(r-r.old)/(outer.eps+abs(r-r.old))obj.old) break } if (i==outer.iterations){ a$convergence<-7 a$message<-"Barrier algorithm ran out of iterations and did not converge" } if (mu>0 && obj>obj.old){ a$convergence<-11 a$message<-paste("Objective function increased at outer iteration",i) } if (mu<0 && obj 0) cat(paste("Initial value of the function :", as.numeric(x), "\n")) if(is.na(x)) { result <- list(code=100, message=maximMessage("100"), iterations=0, type=maxim.type) class(result) <- "maxim" return(result) } if(is.infinite(x) & (x > 0)) { # we stop at +Inf but not at -Inf result <- list(code=5, message=maximMessage("5"), iterations=0, type=maxim.type) class(result) <- "maxim" return(result) } if( isTRUE( attr( x, "gradBoth" ) ) ) { warning( "the gradient is provided both as attribute 'gradient' and", " as argument 'grad': ignoring argument 'grad'" ) } if( isTRUE( attr( x, "hessBoth" ) ) ) { warning( "the Hessian is provided both as attribute 'hessian' and", " as argument 'hess': ignoring argument 'hess'" ) } ## ## gradient by individual observations, used for BHHH approximation of initial Hessian. ## If not supplied by observations, we use the summed gradient. gri <- attr( x, "gradient" ) gr <- sumGradients( gri, nParam = length( param ) ) if(print.level > 2) { cat("Initial gradient value:\n") print(gr) } if(any(is.na(gr[!fixed]))) { stop("NA in the initial gradient") } if(any(is.infinite(gr[!fixed]))) { stop("Infinite initial gradient") } if(length(gr) != nParam) { stop( "length of gradient (", length(gr), ") not equal to the no. of parameters (", nParam, ")" ) } ## initial approximation for inverse Hessian. We only work with the non-fixed part if(observationGradient(gri, length(param))) { invHess <- -solve(crossprod(gri[,!fixed])) # initial approximation of inverse Hessian (as in BHHH), if possible if(print.level > 3) { cat("Initial inverse Hessian by gradient crossproduct\n") if(print.level > 4) { print(invHess) } } } else { invHess <- -1e-5*diag(1, nrow=length(gr[!fixed])) # ... if not possible (Is this OK?). Note we make this negative definite. if(print.level > 3) { cat("Initial inverse Hessian is diagonal\n") if(print.level > 4) { print(invHess) } } } if( print.level > 1) { cat("-------- Initial parameters: -------\n") cat( "fcn value:", as.vector(x), "\n") a <- cbind(start, gr, as.integer(!fixed)) dimnames(a) <- list(nimed, c("parameter", "initial gradient", "free")) print(a) cat("------------------------------------\n") } samm <- NULL # this will be returned in case of step getting too small I <- diag(nParam - sum(fixed)) direction <- rep(0, nParam) ## ----------- Main loop --------------- repeat { iter <- iter + 1 if( iter > iterlim) { code <- 4; break } if(any(is.na(invHess))) { cat("Error in the approximated (free) inverse Hessian:\n") print(invHess) stop("NA in Hessian") } if(print.level > 0) { cat("Iteration ", iter, "\n") if(print.level > 3) { cat("Eigenvalues of approximated inverse Hessian:\n") print(eigen(invHess, only.values=TRUE)$values) if(print.level > 4) { cat("inverse Hessian:\n") print(invHess) } } } ## Next, ensure that the approximated inverse Hessian is negative definite for computing ## the new climbing direction. However, retain the original, potentially not negative definite ## for computing the following approximation. ## This procedure seems to work, but unfortunately I have little idea what I am doing :-( approxHess <- invHess # approxHess is used for computing climbing direction, invHess for next approximation while((me <- max.eigen( approxHess)) >= -lambdatol | (qRank <- qr(approxHess, tol=qrtol)$rank) < sum(!fixed)) { # maximum eigenvalue -> negative definite # qr()$rank -> singularity lambda <- abs(me) + lambdatol + min(abs(diag(approxHess)))/1e7 # The third term corrects numeric singularity. If diag(H) only contains # large values, (H - (a small number)*I) == H because of finite precision approxHess <- approxHess - lambda*I if(print.level > 4) { cat("Not negative definite. Subtracting", lambda, "* I\n") cat("Eigenvalues of new approximation:\n") print(eigen(approxHess, only.values=TRUE)$values) if(print.level > 5) { cat("new Hessian approximation:\n") print(approxHess) } } # how to make it better? } ## next, take a step of suitable length to the suggested direction step <- 1 direction[!fixed] <- as.vector(approxHess %*% gr[!fixed]) oldx <- x oldgr <- gr oldparam <- param param[!fixed] <- oldparam[!fixed] - step * direction[!fixed] x <- sumKeepAttr( fn( param, fixed = fixed, sumObs = FALSE, returnHessian = FALSE, ... ) ) # sum of log-likelihood value but not sum of gradients ## did we end up with a larger value? while((is.na(x) | x < oldx) & step > steptol) { step <- step/2 if(print.level > 2) { cat("Function decreased. Function values: old ", oldx, ", new ", x, ", difference ", x - oldx, "\n") if(print.level > 3) { resdet <- cbind(param = param, gradient = gr, direction=direction, active=!fixed) cat("Attempted parameters:\n") print(resdet) } cat(" -> step ", step, "\n", sep="") } param[!fixed] <- oldparam[!fixed] - step * direction[!fixed] x <- sumKeepAttr( fn( param, fixed = fixed, sumObs = FALSE, returnHessian = FALSE, ... ) ) # sum of log-likelihood value but not sum of gradients } if(step < steptol) { # we did not find a better place to go... samm <- list(theta0=oldparam, f0=oldx, climb=direction) } gri <- attr( x, "gradient" ) # observation-wise gradient. We only need it in order to compute the BHHH Hessian, if asked so. gr <- sumGradients( gri, nParam = length( param ) ) incr <- step * direction y <- gr - oldgr if(all(y == 0)) { # gradient did not change -> cannot proceed code <- 9; break } ## Compute new approximation for the inverse hessian update <- outer( incr[!fixed], incr[!fixed]) * (sum(y[!fixed] * incr[!fixed]) + as.vector( t(y[!fixed]) %*% invHess %*% y[!fixed])) / sum(incr[!fixed] * y[!fixed])^2 + (invHess %*% outer(y[!fixed], incr[!fixed]) + outer(incr[!fixed], y[!fixed]) %*% invHess)/ sum(incr[!fixed] * y[!fixed]) invHess <- invHess - update ## chi2 <- - crossprod(direction[!fixed], oldgr[!fixed]) if (print.level > 0){ cat("step = ",step, ", lnL = ", x,", chi2 = ", chi2, ", function increment = ", x - oldx, "\n",sep="") if (print.level > 1){ resdet <- cbind(param = param, gradient = gr, direction=direction, active=!fixed) print(resdet) cat("--------------------------------------------\n") } } if( step < steptol) { code <- 3; break } if( sqrt( crossprod( gr[!fixed] ) ) < gradtol ) { code <-1; break } if(x - oldx < tol) { code <- 2; break } if(x - oldx < reltol*(x + reltol)) { code <- 8; break } if(is.infinite(x) & x > 0) { code <- 5; break } } if( print.level > 0) { cat( "--------------\n") cat( maximMessage( code), "\n") cat( iter, " iterations\n") cat( "estimate:", param, "\n") cat( "Function value:", x, "\n") } if( is.matrix( gr ) ) { if( dim( gr )[ 1 ] == 1 ) { gr <- gr[ 1, ] } } names(gr) <- names(param) # calculate (final) Hessian if(tolower(finalHessian) == "bhhh") { if(observationGradient(gri, length(param))) { hessian <- - crossprod( gri ) attr(hessian, "type") <- "BHHH" } else { hessian <- NULL warning("For computing the final Hessian by 'BHHH' method, the log-likelihood or gradient must be supplied by observations") } } else if(finalHessian) { hessian <- attr( fn( param, fixed = fixed, returnHessian = TRUE, ... ) , "hessian" ) } else { hessian <- NULL } if( !is.null( hessian ) ) { rownames( hessian ) <- colnames( hessian ) <- nimed } ## remove attributes from final value of objective (likelihood) function attributes( x )$gradient <- NULL attributes( x )$hessian <- NULL attributes( x )$gradBoth <- NULL attributes( x )$hessBoth <- NULL ## result <-list( maximum = unname( drop( x ) ), estimate=param, gradient=gr, hessian=hessian, code=code, message=maximMessage( code), last.step=samm, # only when could not find a # lower point fixed=fixed, iterations=iter, type=maxim.type) if(observationGradient(gri, length(param))) { colnames( gri ) <- names( param ) result$gradientObs <- gri } class(result) <- c("maxim", class(result)) invisible(result) } maxLik/R/coef.maxLik.R0000644000176000001440000000036511266030724014220 0ustar ripleyuserscoef.maxim <- function( object, ... ) { return( object$estimate ) } coef.maxLik <- function( object, ... ) { return( object$estimate ) } coef.summary.maxLik <- function( object, ... ) { result <- object$estimate return( result ) } maxLik/R/zzz.R0000644000176000001440000000116212215565054012715 0ustar ripleyusers.onAttach <- function( lib, pkg ) { packageStartupMessage( paste0( "\nPlease cite the 'maxLik' package as:\n", "Henningsen, Arne and Toomet, Ott (2011). ", "maxLik: A package for maximum likelihood estimation in R. ", "Computational Statistics 26(3), 443-458. ", "DOI 10.1007/s00180-010-0217-1.\n\n", "If you have questions, suggestions, or comments ", "regarding the 'maxLik' package, ", "please use a forum or 'tracker' at maxLik's R-Forge site:\n", "https://r-forge.r-project.org/projects/maxlik/"), domain = NULL, appendLF = TRUE ) } maxLik/R/logLikFunc.R0000644000176000001440000000203612230721772014114 0ustar ripleyusersif( getRversion() >= "2.15.1" ) { globalVariables( c( "lastFuncGrad", "lastFuncParam" ) ) } ## objective function: ## sum over possible individual likelihoods logLikFunc <- function(theta, fnOrig, gradOrig, hessOrig, start = NULL, fixed = NULL, sumObs = TRUE, ...) { # Arguments "gradOrig" and "hessOrig" are just for compatibility with # logLikGrad() and logLikHess() theta <- addFixedPar( theta = theta, start = start, fixed = fixed, ...) result <- fnOrig( theta, ... ) ## save gradients and the corresponding parameter values assign( "lastFuncGrad", attr( result, "gradient" ), inherits = TRUE ) assign( "lastFuncParam", theta, inherits = TRUE ) if( sumObs ) { result <- sumKeepAttr( result ) g <- attributes( result )$gradient if( !is.null( g ) ) { g <- sumGradients( g, length( theta ) ) names( g ) <- names( theta ) if( !is.null( fixed ) ) { g <- g[ !fixed ] } attributes( result )$gradient <- g } } return( result ) } maxLik/MD50000644000176000001440000001146612231414345012046 0ustar ripleyuserse406e4369b8645a5ba9327101b29935a *DESCRIPTION ec4ffc3a682b930eeb620dcad328111b *NAMESPACE 1d1f1fc1679f011a6a631f315817181c *NEWS 23151b49bc8cdafc5378098818605dc8 *R/AIC.R 616d4b76bd50cdff58e1b31249581c2a *R/activePar.R 6b999dafa9bdf5880be41146752383ae *R/addFixedPar.R 1cb94fb786cf735b89302987f2f45ece *R/bread.maxLik.R 628fd12f511412a5431211ebc242b33b *R/callWithoutArgs.R b281b27bd439a07982fc1363b2c95e44 *R/callWithoutSumt.R f6b11464c98bd3662e2b4aae72410ee9 *R/checkBhhhGrad.R 907220a64644066e025e07d1dba550cd *R/checkFuncArgs.R 7446aa5174844bfb8b0c530d4b0292da *R/coef.maxLik.R 504f651d540bb6d84a03b60529730cdf *R/compareDerivatives.R 543a472fb00c5ed64ad77c1446ce2cbd *R/condiNumber.R 16ce669188a2349f588a121fc48f85b9 *R/constrOptim2.R 804efa9a9611766159532fa1ba2535b7 *R/estfun.maxLik.R 1d0a677cc7248ab9651c2bec1d3316d6 *R/fnSubset.R 64d17ba17bea697ac45de7f5ec616c47 *R/hessian.R 2bea3b2594c3a717cdce3af836c560fe *R/logLik.maxLik.R 06173526d586811abca09d66c542b0da *R/logLikAttr.R 17dda617a86a0248d9aa8e80af6d3181 *R/logLikFunc.R 53af305a147d63a6a6125f9df1b30c7b *R/logLikGrad.R 6f5bb5dffae2175ef4b1e408283a024d *R/logLikHess.R 74df2581e5b457158021c58ce611e44d *R/maxBFGS.R 564ea6b19da810f48578470e7601f4c8 *R/maxBFGSR.R 40aa90559ace2a827f2970183d3a4bcc *R/maxBFGSRCompute.R b2cdaeb5d884e0109af4f3ac7005c9d8 *R/maxBHHH.R beb05e07a8652c15e31bd02beb837921 *R/maxCG.R fbd13bdfa6f2dc09a1a22bd6ee8569ba *R/maxLik.R 162379466bac7fb66b81bbd64c61ad4e *R/maxNM.R e45cf5baea4373a36aa2cae38bc8a9da *R/maxNR.R 9605b77c9ee9234f8d6bd8e691a4353a *R/maxNRCompute.R 4b761f07e897584f9b34a891c0031471 *R/maxOptim.R 9fcd4ae424971714ab6fc8d9684690c6 *R/maxSANN.R 83523ce13d0157c4fabfc58ffc40786d *R/maximMessage.R 999df96b4d40ca70de26151b9f04d63e *R/maximType.R f4e22fe11b2dcb40e338b2f19dbfbde8 *R/nIter.R aa6c83d64ee8b14b3b5934fca4e1cf90 *R/nObs.R 1dc47c8109c34ff37a0c657d1cc82419 *R/nParam.R 76c96796242b1f29c693fff5946344d4 *R/numericGradient.R fd6764e2955ed5802e9d5c02dcc8cd17 *R/numericHessian.R 32ee3ff9f876da5b606fea22ee11c3ce *R/observationGradient.R 0db0a207aa820ecd499edc72f953ef1e *R/prepareFixed.R f681ec2a71708e712ea0c3841ecc9711 *R/print.maxLik.R 2d8c9fd7ddf91f986e7392c11d808397 *R/returnCode.R 69d1f09210d746bb394c6dd9bfa680bc *R/returnMessage.R 8604299b5c0b487b57b07c6038ac58fb *R/stdEr.maxLik.R 131c1768155ac15fea7db3101dea58f5 *R/sumGradients.R 1a3644a70f110fd850668c1f81a4201c *R/summary.maxLik.R fcef0ef389f6eb9b0edbf08f815a1e3b *R/summary.maxim.R 70becd04bd3a37f13fa68ed1039169c2 *R/sumt.R 7f8d32f62e006396f5dab695599f9037 *R/vcov.maxLik.R 75f63f7d6ab9ffae6a82540c17d5f57b *R/zzz.R 6bb1b8e24134c4322e0b6d50f3d87b6e *inst/CITATION b49902e9a1e490cf7dfaa1b4879361e1 *man/activePar.Rd 95993762c6e3df1b7a257ff5489df210 *man/bread.maxLik.Rd dec9a6067e51ba753062bd0b8ac32f93 *man/compareDerivatives.Rd bc71148da6d9ddddf69475fbcf8bb0c7 *man/condiNumber.Rd 3e6815003aabf00919e782c15f821fe8 *man/estfun.maxLik.Rd 4e4d0bcfa8fa16c379f7c11caab96702 *man/fnSubset.Rd b67d98332730e8a3159b5f065c33d4e0 *man/hessian.Rd 6bc77e921e5570ceec370f82c54501ee *man/logLik.maxLik.Rd 5a29e184a6a6051093bf06c381d91851 *man/maxBFGS.Rd 152c343b5bcb10b24a3386765d1c1d2d *man/maxLik-internal.Rd 9275bd3c64d1bfca1fbae40b6b5d6b7e *man/maxLik-methods.Rd f60642668a27725d4102d583ac12357d *man/maxLik.Rd 850d6c9984e15357d5866d2357b712d5 *man/maxNR.Rd 5e6e09860abf01baaed44a49e8d4441f *man/maximType.Rd c2eae784cd73fcad441f0424ae5e9ce6 *man/nIter.Rd 27282bb02e85412ce45c4b74e6a5343b *man/nObs.Rd a1ce32f254cba0470618fedeea26aafa *man/nParam.Rd 23b5588a90837dc04d3c2ba956c7d35a *man/numericGradient.Rd 839c9e3073b76a6092f4259749e16ac2 *man/returnCode.Rd d58bf6e0f604936e1f827c1035f115a6 *man/returnMessage.Rd 3d4ce72cb4cc1993a6b3cbac0c6b8a42 *man/summary.maxLik.Rd b00829cd8d00c280c1572dd47e310ddc *man/summary.maxim.Rd 8a68921d0ef611e6190d57a9455fc97b *man/sumt.Rd b2438d959aadc978e0c68005e754cbdd *man/vcov.maxLik.Rd a5a5d67adda5fb880641a4c6254e3dee *tests/BFGSR.R 0b23f7bd1d5b496dac7a4ae5c824fcc7 *tests/BFGSR.Rout.save 6b59b7c9ab9d213c312dde2e91e8eeac *tests/constraints.R 35d59c7dd0c96af877839933a99e5f8b *tests/constraints.Rout.save cb26c2d5ac8972a66de4c3a258ee25df *tests/examples.R cbf36b3493ad24c3d5c36e8b8bdefb54 *tests/examples.Rout.save 70db31a194f4a57154ea8d6ca822dc52 *tests/finalHessian.R d71d6178543b008001914a4fdf8b8116 *tests/finalHessian.Rout.save e5f3bc93aaaff8b51c9026e3a7520f88 *tests/fitExpDist.R d752ccc7f266d0e65599479db15381d5 *tests/fitExpDist.Rout.save cf1a1fef59bdc054167cd31dd354fc0a *tests/fitGammaDist.R a01a8a4391ed0b470a6b111c2311b484 *tests/fitGammaDist.Rout.save 681476681fe1679667ef14cd1ac61343 *tests/fitNormalDist.R 5310e2914dba7615a3392f4a976941b4 *tests/fitNormalDist.Rout.save 4f03a9221b64705de73e57fa7ba6e5e0 *tests/methods.R 6588311f357c160e600581d238fe6b0e *tests/methods.Rout.save c1bbe611737d8fb90e93a7bac3b1be7a *tests/numericGradient.R da073867a297c038b711fda70344d2b7 *tests/numericGradient.Rout.save maxLik/DESCRIPTION0000644000176000001440000000111112231414345013226 0ustar ripleyusersPackage: maxLik Version: 1.2-0 Date: 2013/10/22 Title: Maximum Likelihood Estimation Author: Ott Toomet , Arne Henningsen , with contributions from Spencer Graves and Yves Croissant Maintainer: Arne Henningsen Depends: R (>= 2.4.0), miscTools (>= 0.6-8) Imports: sandwich Description: Tools for Maximum Likelihood Estimation License: GPL (>= 2) ByteCompile: yes URL: http://www.maxLik.org Packaged: 2013-10-22 04:38:18 UTC; arne NeedsCompilation: no Repository: CRAN Date/Publication: 2013-10-22 08:19:17 maxLik/man/0000755000176000001440000000000012231400472012275 5ustar ripleyusersmaxLik/man/activePar.Rd0000644000176000001440000000266311066630507014522 0ustar ripleyusers\name{activePar} \alias{activePar} \alias{activePar.default} \title{free parameters under maximisation} \description{ Return a logical vector, indicating which parameters were free under maximisation, as opposed to the fixed parameters, treated as constants. } \usage{ activePar(x, \dots) \method{activePar}{default}(x, \dots) } \arguments{ \item{x}{object, created by a maximisation routine, or derived from a maximisation object. Currently only \code{\link{maxNR}} and it's derivations support \code{activePar}} \item{\dots}{further arguments for methods} } \details{ Several optimisation routines allow the user to fix some parameter values (or do it automatically in some cases). For gradient or Hessian based inference one has to know which parameters carry optimisation-related information. } \value{ A logical vector, indicating whether the parameters were free to change during optimisation algorithm. } \author{Ott Toomet, \email{otoomet@econ.au.dk}} \seealso{\code{\link{maxNR}}, \code{\link{nObs}}} \examples{ # a simple two-dimensional exponential hat f <- function(a) exp(-a[1]^2 - a[2]^2) # # maximize wrt. both parameters free <- maxNR(f, start=1:2) summary(free) # results should be close to (0,0) activePar(free) # allow only the second parameter to vary cons <- maxNR(f, start=1:2, activePar=c(FALSE,TRUE)) summary(cons) # result should be around (1,0) activePar(cons) } \keyword{methods} \keyword{optimize} maxLik/man/summary.maxLik.Rd0000644000176000001440000000434211415370324015515 0ustar ripleyusers\name{summary.maxLik} \alias{summary.maxLik} \alias{coef.summary.maxLik} \title{summary the Maximum-Likelihood estimation} \description{ Summary the Maximum-Likelihood estimation including standard errors and t-values. } \usage{ \method{summary}{maxLik}(object, eigentol=1e-12, ... ) \method{coef}{summary.maxLik}(object, \ldots) } \arguments{ \item{object}{ object of class 'maxLik', or 'summary.maxLik', usually a result from Maximum-Likelihood estimation. } \item{eigentol}{ nonzero print limit on the range of the absolute values of the hessian. Specifically, define: absEig <- eigen(hessian(object), symmetric=TRUE)[['values']] Then compute and print t values, p values, etc. only if min(absEig) > (eigentol * max(absEig)). } \item{\ldots}{currently not used.} } \value{ \code{summary.maxLik} returns an object of class 'summary.maxLik' with following components: \item{type}{type of maximisation.} \item{iterations}{number of iterations.} \item{code}{code of success.} \item{message}{a short message describing the code.} \item{loglik}{the loglik value in the maximum.} \item{estimate}{numeric matrix, the first column contains the parameter estimates, the second the standard errors, third t-values and fourth corresponding probabilities.} \item{fixed}{logical vector, which parameters are treated as constants.} \item{NActivePar}{number of free parameters.} \item{constraints}{information about the constrained optimization. Passed directly further from \code{maxim}-object. \code{NULL} if unconstrained maximization. } \code{coef.summary.maxLik} returns the matrix of estimated values, standard errors, and \eqn{$t$}{t}- and \eqn{$p$}{p}-values. } \author{Ott Toomet \email{otoomet@ut.ee}, Arne Henningsen} \seealso{\code{\link{maxLik}}} \examples{ ## ML estimation of exponential duration model: t <- rexp(100, 2) loglik <- function(theta) log(theta) - theta*t gradlik <- function(theta) 1/theta - t hesslik <- function(theta) -100/theta^2 ## Estimate with numeric gradient and hessian a <- maxLik(loglik, start=1, print.level=2) summary(a) ## Estimate with analytic gradient and hessian a <- maxLik(loglik, gradlik, hesslik, start=1) summary(a) } \keyword{models} maxLik/man/nIter.Rd0000644000176000001440000000175611127454563013673 0ustar ripleyusers\name{nIter} \alias{nIter} \alias{nIter.default} \title{Return number of iterations for iterative models} \description{ Returns the number of iterations for iterative models. The default method assumes presence of a component \code{iterations} in \code{x}. } \usage{ nIter(x, \dots) \method{nIter}{default}(x, \dots) } \arguments{ \item{x}{a statistical model, or a result of maximisation, such as created by \code{\link{maxLik}} or \code{\link{maxNR}}} \item{\dots}{further arguments for methods} } \details{ This is a generic function. The default method returns the component \code{x$iterations}. } \value{ numeric, number of iterations } \author{Ott Toomet, \email{otoomet@econ.au.dk}} \seealso{\code{\link{maxLik}}, \code{\link{maxNR}} } \examples{ ## ML estimation of exponential duration model: t <- rexp(100, 2) loglik <- function(theta) sum(log(theta) - theta*t) ## Estimate with numeric gradient and numeric Hessian a <- maxNR(loglik, start=1) nIter(a) } \keyword{methods} maxLik/man/estfun.maxLik.Rd0000644000176000001440000000306111373216263015325 0ustar ripleyusers\name{estfun.maxLik} \alias{estfun.maxLik} \title{Extract Gradients Evaluated at each Observation} \description{ Extract the gradients of the log-likelihood function evaluated at each observation (\sQuote{Empirical Estimating Function}, see \code{\link[sandwich]{estfun}}). } \usage{ \method{estfun}{maxLik}( x, ... ) } \arguments{ \item{x}{an object of class \code{maxLik}.} \item{\dots}{further arguments (currently ignored).} } \value{ Matrix of gradients of the log-likelihood function at the estimated parameter value evaluated at each observation } \section{Warnings}{ The \pkg{sandwich} package must be loaded before this method can be used. This method works only if \code{\link{maxLik}} was called with argument \code{grad} equal to a gradient function or (if no gradient function is specified) argument \code{logLik} equal to a log-likelihood function that return the gradients or log-likelihood values, respectively, for each observation. } \author{ Arne Henningsen } \seealso{\code{\link[sandwich]{estfun}}, \code{\link{maxLik}}.} \examples{ ## ML estimation of exponential duration model: t <- rexp(100, 2) loglik <- function(theta) log(theta) - theta*t ## Estimate with numeric gradient and hessian a <- maxLik(loglik, start=1 ) # Extract the gradients evaluated at each observation library( sandwich ) estfun( a ) ## Estimate with analytic gradient gradlik <- function(theta) 1/theta - t b <- maxLik(loglik, gradlik, start=1) estfun( b ) all.equal( c( estfun( b ) ), gradlik( coef( b ) ) ) } \keyword{methods} maxLik/man/maxNR.Rd0000644000176000001440000003646212215550064013631 0ustar ripleyusers\name{maxNR} \alias{maxNR} \alias{maxBFGSR} \alias{maxBHHH} \title{Newton- and Quasi-Newton Maximization} \description{ Unconstrained and equality-constrained maximization based on the quadratic approximation (Newton) method. The Newton-Raphson, BFGS (Broyden 1970, Fletcher 1970, Goldfarb 1970, Shanno 1970), and BHHH (Berndt, Hall, Hall, Hausman 1974) methods are available. } \usage{ maxNR(fn, grad = NULL, hess = NULL, start, print.level = 0, tol = 1e-08, reltol=sqrt(.Machine$double.eps), gradtol = 1e-06, steptol = 1e-10, lambdatol = 1e-06, qrtol = 1e-10, iterlim = 150, constraints = NULL, finalHessian = TRUE, bhhhHessian=FALSE, fixed = NULL, activePar = NULL, ... ) maxBFGSR(fn, grad = NULL, hess = NULL, start, print.level = 0, tol = 1e-8, reltol=sqrt(.Machine$double.eps), gradtol = 1e-6, steptol = 1e-10, lambdatol=1e-6, qrtol=1e-10, iterlim = 150, constraints = NULL, finalHessian = TRUE, fixed = NULL, activePar = NULL, ... ) maxBHHH(fn, grad = NULL, hess = NULL, start, print.level = 0, iterlim = 100, finalHessian = "BHHH", ... ) } \arguments{ \item{fn}{function to be maximized. It must have the parameter vector as the first argument and it must return either a single number or a numeric vector, which is summed. If the BHHH method is used and argument \code{gradient} is not given, \code{fn} must return a numeric vector of observation-specific likelihood values. If the parameters are out of range, \code{fn} should return \code{NA}. See details for constant parameters. \code{fn} may also return attributes "gradient" and/or "hessian". If these attributes are set, the algorithm uses the corresponding values as gradient and Hessian. } \item{grad}{gradient of the objective function. It must have the parameter vector as the first argument and it must return either a gradient vector of the objective function, or a matrix, where \emph{columns} correspond to individual parameters. The column sums are treated as gradient components. If \code{NULL}, finite-difference gradients are computed. If the BHHH method is used, \code{grad} must return a matrix, where rows corresponds to the gradient vectors of individual observations and the columns to the individual parameters. If \code{fn} returns an object with attribute \code{gradient}, this argument is ignored. } \item{hess}{Hessian matrix of the function. It must have the parameter vector as the first argument and it must return the Hessian matrix of the objective function. If missing, finite-difference Hessians, based on \code{gradient}, are computed. Hessians are used for maximizations with the Newton-Raphson method but not for maximizations with the BFGS or BHHH method.} \item{start}{initial value for the parameter vector.} \item{print.level}{this argument determines the level of printing which is done during the minimization process. The default value of 0 means that no printing occurs, a value of 1 means that initial and final details are printed and a value of 2 means that full tracing information for every iteration is printed. Higher values will result in even more details. } \item{tol}{stopping condition. Stop if the absolute difference between successive iterations is less than \code{tol}, return \code{code=2}.} \item{reltol}{Relative convergence tolerance. The algorithm stops if it is unable to increase the value by a factor of 'reltol * (abs(val) + reltol)' at a step. Defaults to 'sqrt(.Machine\$double.eps)', typically about '1e-8'.} \item{gradtol}{stopping condition. Stop if the norm of the gradient less than \code{gradtol}, return \code{code=1}.} \item{steptol}{stopping/error condition. If the quadratic approximation leads to lower function value instead of higher, or \code{NA}, the step length is halved and a new attempt is made. This procedure is repeated until step < \code{steptol}, thereafter \code{code=3} is returned.} \item{lambdatol}{control whether the Hessian is treated as negative definite. If the largest of the eigenvalues of the Hessian is larger than \code{-lambdatol}, a suitable diagonal matrix is subtracted from the Hessian (quadratic hill-climbing) in order to enforce nagetive definiteness.} \item{qrtol}{QR-decomposition tolerance} \item{iterlim}{stopping condition. Stop if more than \code{iterlim} iterations, return \code{code=4}.} \item{constraints}{either \code{NULL} for unconstrained optimization or a list with two components \code{eqA} and \code{eqB} for equality-constrained optimization \eqn{A \theta + B = 0}{A \%*\% theta + B = 0}. The constrained problem is forwarded to \code{\link{sumt}}. } \item{finalHessian}{how (and if) to calculate the final Hessian. Either \code{FALSE} (do not calculate), \code{TRUE} (use analytic/finite-difference Hessian) or \code{"bhhh"}/\code{"BHHH"} for the information equality approach. The latter approach is only suitable for maximizing log-likelihood functions. It requires the gradient/log-likelihood to be supplied by individual observations. Note that computing the (real, not BHHH) final Hessian does not carry any extra penalty for the NR method, but for the other methods.} \item{bhhhHessian}{logical. Indicating whether the approximation for the Hessian suggested by Bernd, Hall, Hall, and Hausman (1974) should be used.} \item{fixed}{parameters that should be fixed at their starting values: either a logical vector of the same length as argument \code{start}, a numeric (index) vector indicating the positions of the fixed parameters, or a vector of character strings indicating the names of the fixed parameters (parameter names are taken from argument \code{start}).} \item{activePar}{this argument is retained for backward compatibility only; please use argument \code{fixed} instead.} \item{\dots}{further arguments to \code{fn}, \code{grad} and \code{hess}. Further arguments to \code{maxBHHH} are also passed to \code{maxNR}.} } \details{ The idea of the Newton method is to approximate the function in a given location with a multidimensional parabola, and use the estimated maximum as the initial value for the next iteration. Such an approximation requires knowledge of both gradient and Hessian, the latter of which can be quite costly to compute. Several methods for approximating Hessian exist, including BFGS and BHHH. The BHHH method (\code{maxNR} with argument \code{bhhhHessian = TRUE )} or \code{maxBHHH}) is suitable only for maximizing log-likelihood functions. It uses information equality in order to approximate the Hessian of the log-likelihood function. Hence, the log-likelihood values and its gradients must e calculated by individual observations. The Hessian is approximated as the negative of the sum of the outer products of the gradients of individual observations, or, in the matrix form, \code{-t(gradient) \%*\% gradient = - crossprod( gradient )}. The functions \code{maxNR}, \code{maxBFGSR}, and \code{maxBHHH} can work with constant parameters and related changes of parameter values. Constant parameters are useful if a parameter value is converging toward the boundary of support, or for testing. One way is to put \code{fixed} to non-NULL, specifying which parameters should be treated as constants. However, when using \code{maxNR} or \code{maxBHHH}, parameters can also be fixed in runtime by signaling with \code{fn}. This may be useful if an estimation converges toward the edge of the parameter space possibly causing problems. The value of \code{fn} may have following attributes (only used by maxNR): \itemize{ \item{constPar}{ index vector. Which parameters are redefined to constant} \item{newVal}{ a list with following components: \itemize{ \item{index}{which parameters will have a new value} \item{val}{the new value of parameters} } } } Hence, \code{constVal} specifies which parameters are treated as constants. \code{newVal} allows one to overwrite the existing parameter values, possibly the non-constant values as well. If the attribute \code{newVal} is present, the new function value need not to exceed the previous one (maximization is not performed in that step). } \value{ list of class "maxim" with following components: \item{maximum}{\code{fn} value at maximum (the last calculated value if not converged).} \item{estimate}{estimated parameter value.} \item{gradient}{vector, last gradient value which was calculated. Should be close to 0 if normal convergence.} \item{gradientObs}{matrix of gradients at parameter value \code{estimate} evaluated at each observation (only if \code{grad} returns a matrix or \code{grad} is not specified and \code{fn} returns a vector).} \item{hessian}{Hessian at the maximum (the last calculated value if not converged).} \item{code}{return code: \itemize{ \item{1}{ gradient close to zero (normal convergence).} \item{2}{ successive function values within tolerance limit (normal convergence).} \item{3}{ last step could not find higher value (probably not converged). This is related to line search step getting too small, usually because hitting the boundary of the parameter space. It may also be related to attempts to move to a wrong direction because of numerical errors. In some cases it can be helped by changing \code{steptol}.} \item{4}{ iteration limit exceeded.} \item{5}{ Infinite value.} \item{6}{ Infinite gradient.} \item{7}{ Infinite Hessian.} \item{8}{ Successive function values withing relative tolerance limit (normal convergence).} \item{9}{ (BFGS) Hessian approximation cannot be improved because of gradient did not change. May be related to numerical approximation problems or wrong analytic gradient.} \item{100}{ Initial value out of range.} } } \item{message}{ a short message, describing \code{code}.} \item{last.step}{ list describing the last unsuccessful step if \code{code=3} with following components: \itemize{ \item{theta0}{ previous parameter value} \item{f0}{ \code{fn} value at \code{theta0}} \item{climb}{ the movement vector to the maximum of the quadratic approximation} } } \item{fixed}{logical vector, which parameters are constants.} \item{iterations}{number of iterations.} \item{type}{character string, type of maximization.} \item{constraints}{A list, describing the constrained optimization (\code{NULL} if unconstrained). Includes the following components: \itemize{ \item{type}{ type of constrained optimization} \item{outer.iterations}{ number of iterations in the constraints step} \item{barrier.value}{ value of the barrier function} } } } \section{Warning}{ No attempt is made to ensure that user-provided analytic gradient/Hessian correct. However, the users are recommended to use \code{\link{compareDerivatives}} function, designed for this purpose. If analytic gradient/Hessian are wrong, the algorithm may not converge, or converge to a wrong point. As the BHHH method (\code{maxNR} with argument \code{bhhhHessian = TRUE} or \code{maxBHHH}) uses the likelihood-specific information equality, it is only suitable for maximizing log-likelihood functions! Quasi-Newton methods, including those mentioned above, do not work well in non-concave regions. This is especially the case with the implementation in \code{maxBFGSR}. The user is advised to experiment with various tolerance options to achieve convergence. } \references{ Berndt, E., Hall, B., Hall, R. and Hausman, J. (1974): Estimation and Inference in Nonlinear Structural Models, \emph{Annals of Social Measurement} 3, p. 653-665. Broyden, C.G. (1970): The Convergence of a Class of Double-rank Minimization Algorithms, \emph{Journal of the Institute of Mathematics and Its Applications} 6, p. 76-90. Fletcher, R. (1970): A New Approach to Variable Metric Algorithms, \emph{Computer Journal} 13, p. 317-322. Goldfeld, S.M. and Quandt, R.E. (1972): \emph{Nonlinear Methods in Econometrics}. Amsterdam: North-Holland. Goldfarb, D. (1970): A Family of Variable Metric Updates Derived by Variational Means, \emph{Mathematics of Computation} 24, p. 23-26. Greene, W.H., 2008, \emph{Econometric Analysis}, 6th edition, Prentice Hall. Shanno, D.F. (1970): Conditioning of Quasi-Newton Methods for Function Minimization, \emph{Mathematics of Computation} 24, p. 647-656. } \author{Ott Toomet \email{otoomet@ut.ee}, Arne Henningsen, function \code{maxBFGSR} was originally developed by Yves Croissant (and placed in 'mlogit' package)} \seealso{\code{\link{maxLik}} for a general framework for maximum likelihood estimation (MLE); \code{\link{maxBHHH}} for maximizations using the Berndt, Hall, Hall, Hausman (1974) algorithm (which is a wrapper function to \code{maxNR}); \code{\link{maxBFGS}} for maximization using the BFGS, Nelder-Mead (NM), and Simulated Annealing (SANN) method (based \code{\link{optim}}), also supporting inequality constraints; \code{\link{nlm}} for Newton-Raphson optimization; and \code{\link{optim}} for different gradient-based optimization methods.} \examples{ ## ML estimation of exponential duration model: t <- rexp(100, 2) loglik <- function(theta) sum(log(theta) - theta*t) ## Note the log-likelihood and gradient are summed over observations gradlik <- function(theta) sum(1/theta - t) hesslik <- function(theta) -100/theta^2 ## Estimate with finite-difference gradient and Hessian a <- maxNR(loglik, start=1, print.level=2) summary(a) ## You would probably prefer 1/mean(t) instead ;-) ## Estimate with analytic gradient and Hessian a <- maxNR(loglik, gradlik, hesslik, start=1) summary(a) ## BFGS estimation with finite-difference gradient a <- maxBFGSR( loglik, start=1 ) summary(a) ## BFGS estimation with analytic gradient a <- maxBFGSR( loglik, gradlik, start=1 ) summary(a) ## For the BHHH method we need likelihood values and gradients ## of individual observations loglikInd <- function(theta) log(theta) - theta*t gradlikInd <- function(theta) 1/theta - t ## Estimate with finite-difference gradient a <- maxBHHH(loglikInd, start=1, print.level=2) summary(a) ## Estimate with analytic gradient a <- maxBHHH(loglikInd, gradlikInd, start=1) summary(a) ## ## Next, we give an example with vector argument: Estimate the mean and ## variance of a random normal sample by maximum likelihood ## Note: you might want to use maxLik instead ## loglik <- function(param) { mu <- param[1] sigma <- param[2] ll <- -0.5*N*log(2*pi) - N*log(sigma) - sum(0.5*(x - mu)^2/sigma^2) ll } x <- rnorm(1000, 1, 2) # use mean=1, stdd=2 N <- length(x) res <- maxNR(loglik, start=c(0,1)) # use 'wrong' start values summary(res) ### ### Now an example of constrained optimization ### ## We maximize exp(-x^2 - y^2) where x+y = 1 f <- function(theta) { x <- theta[1] y <- theta[2] exp(-(x^2 + y^2)) ## Note: you may want to use exp(- theta \%*\% theta) instead ;-) } ## use constraints: x + y = 1 A <- matrix(c(1, 1), 1, 2) B <- -1 res <- maxNR(f, start=c(0,0), constraints=list(eqA=A, eqB=B), print.level=1) print(summary(res)) } \keyword{optimize} maxLik/man/returnMessage.Rd0000644000176000001440000000246411066630507015427 0ustar ripleyusers\name{returnMessage} \alias{returnMessage} \alias{returnMessage.default} \alias{returnMessage.maxim} \alias{returnMessage.maxLik} \title{Information about the optimisation process} \description{ This function returns a short message, summarising the outcome of the statistical process, typically optimisation. The message should describe either the type of the convergence, or the problem. \code{returnMessage} is a generic function, with methods for various optimisation algorithms. } \usage{ returnMessage(x, ...) \method{returnMessage}{maxim}(x, ...) \method{returnMessage}{maxLik}(x, ...) } \arguments{ \item{x}{object, should orginate from an optimisation problem} \item{...}{further arguments to other methods}. } \details{ The default methods returns component \code{returnMessage}. } \value{ Character string, the message describing the success or failure of the statistical procedure. } \author{Ott Toomet, \email{otoomet@ut.ee}} \seealso{\code{\link{returnCode}}, \code{\link{maxNR}}} \examples{ ## maximise the exponential bell f1 <- function(x) exp(-x^2) a <- maxNR(f1, start=2) returnMessage(a) # should be success (1 or 2) ## Now try to maximise log() function f2 <- function(x) log(x) a <- maxNR(f2, start=2) returnMessage(a) # should give a failure (4) } \keyword{methods} \keyword{utilities} maxLik/man/maxLik-internal.Rd0000644000176000001440000000112411646617731015642 0ustar ripleyusers\name{maxLik-internal} \alias{checkFuncArgs} \alias{coef.maxim} \alias{constrOptim2} \alias{maximMessage} \alias{maxNRCompute} \alias{observationGradient} \alias{print.summary.maxim} \alias{print.summary.maxLik} \alias{returnCode.maxim} \alias{returnCode.maxLik} % Document the following: %%%% \title{ Internal maxLik Functions } \description{ Internal maxLik Functions } \details{ These are either various methods, or functions, not intended to be called directly by the user (or in some cases are just waiting for proper documentation to be written :). } \keyword{ internal } maxLik/man/summary.maxim.Rd0000644000176000001440000000405311265324672015412 0ustar ripleyusers\name{summary.maxim} \alias{summary.maxim} \title{Summary method for maximisation/minimisation} \description{ Summarises the maximisation results } \usage{ \method{summary}{maxim}( object, hessian=FALSE, unsucc.step=FALSE, ... ) } \arguments{ \item{object}{optimisation result, object of class \code{maxim}. See \code{\link{maxNR}}.} \item{hessian}{logical, whether to display Hessian matrix.} \item{unsucc.step}{logical, whether to describe last unsuccesful step if \code{code} == 3} \item{\ldots}{currently not used.} } \value{ Object of class \code{summary.maxim}, intended to print with corresponding print method. There are following components: \item{type}{type of maximisation.} \item{iterations}{number of iterations.} \item{code}{exit code (see \code{\link{maxNR}}.)} \item{message}{a brief message, explaining code.} \item{unsucc.step}{description of last unsuccessful step, only if requested and \code{code} == 3} \item{maximum}{function value at maximum} \item{estimate}{matrix with following columns: \itemize{ \item{results}{coefficient estimates at maximum} \item{gradient}{estimated gradient at maximum} } } \item{constraints}{information about the constrained optimization. Passed directly further from \code{maxim}-object. \code{NULL} if unconstrained maximization. } \item{hessian}{estimated hessian at maximum, only if requested} } \author{Ott Toomet \email{siim@obs.ee}} \seealso{\code{\link{maxNR}}} \examples{ ## minimize a 2D quadratic function: f <- function(b) { x <- b[1]; y <- b[2]; val <- (x - 2)^2 + (y - 3)^2 attr(val, "gradient") <- c(2*x - 4, 2*y - 6) attr(val, "hessian") <- matrix(c(2, 0, 0, 2), 2, 2) val } ## Note that NR finds the minimum of a quadratic function with a single ## iteration. Use c(0,0) as initial value. result1 <- maxNR( f, start = c(0,0) ) summary( result1 ) ## Now use c(1000000, -777777) as initial value and ask for hessian result2 <- maxNR( f, start = c( 1000000, -777777)) summary( result2 ) } \keyword{methods} \keyword{print} maxLik/man/maxBFGS.Rd0000644000176000001440000001774512230721772014041 0ustar ripleyusers\name{maxBFGS} \alias{maxBFGS} \alias{maxCG} \alias{maxSANN} \alias{maxNM} \title{BFGS, conjugate gradient, SANN and Nelder-Mead Maximization} \description{ These functions are wrappers for \code{\link{optim}} where the arguments are compatible with \code{\link{maxNR}}. Note that there is a \code{\link{maxNR}}-based BFGS implementation \code{\link{maxBFGSR}}. } \usage{ maxBFGS(fn, grad = NULL, hess=NULL, start, fixed = NULL, print.level = 0, iterlim = 200, constraints = NULL, tol = 1e-08, reltol=tol, finalHessian=TRUE, parscale=rep(1, length=length(start)), ... ) maxCG(fn, grad = NULL, hess = NULL, start, fixed = NULL, print.level = 0, iterlim = 500, constraints = NULL, tol = 1e-08, reltol=tol, finalHessian=TRUE, parscale = rep(1, length = length(start)), alpha = 1, beta = 0.5, gamma = 2, ...) maxSANN(fn, grad = NULL, hess = NULL, start, fixed = NULL, print.level = 0, iterlim = 10000, constraints = NULL, tol = 1e-08, reltol=tol, finalHessian=TRUE, cand = NULL, temp = 10, tmax = 10, parscale = rep(1, length = length(start)), random.seed = 123, ... ) maxNM(fn, grad = NULL, hess = NULL, start, fixed = NULL, print.level = 0, iterlim = 500, constraints = NULL, tol = 1e-08, reltol=tol, finalHessian=TRUE, parscale = rep(1, length = length(start)), alpha = 1, beta = 0.5, gamma = 2, ...) } \arguments{ \item{fn}{function to be maximised. Must have the parameter vector as the first argument. In order to use numeric gradient and BHHH method, \code{fn} must return vector of observation-specific likelihood values. Those are summed by maxNR if necessary. If the parameters are out of range, \code{fn} should return \code{NA}. See details for constant parameters.} \item{grad}{gradient of the function. Must have the parameter vector as the first argument. If \code{NULL}, numeric gradient is used (only maxBFGS uses gradient). Gradient may return a matrix, where columns correspond to the parameters and rows to the observations (useful for maxBHHH). The columns are summed internally.} \item{hess}{Hessian of the function. Not used by any of these methods, for compatibility with \code{\link{maxNR}}.} \item{start}{initial values for the parameters.} \item{fixed}{parameters that should be fixed at their starting values: either a logical vector of the same length as argument \code{start}, a numeric (index) vector indicating the positions of the fixed parameters, or a vector of character strings indicating the names of the fixed parameters (parameter names are taken from argument \code{start}).} \item{print.level}{a larger number prints more working information.} \item{iterlim}{maximum number of iterations.} \item{constraints}{either \code{NULL} for unconstrained optimization or a list with two components. The components may be either \code{eqA} and \code{eqB} for equality-constrained optimization \eqn{A \theta + B = 0}{A \%*\% theta + B = 0}; or \code{ineqA} and \code{ineqB} for inequality constraints \eqn{A \theta + B > 0}{A \%*\% theta + B > 0}. In the inequality-constraints-case, more than one row in \code{ineqA} and \code{ineqB} corresponds to more than one linear constraint, in that case all these must be positive. The equality-constrained problem is forwarded to \code{\link{sumt}}, the inequality-constrained case to \code{\link{constrOptim2}}. } \item{tol, reltol}{the relative convergence tolerance (see \code{\link{optim}}). \code{tol} is for compatibility with \code{\link{maxNR}}.} \item{finalHessian}{how (and if) to calculate the final Hessian. Either \code{FALSE} (not calculate), \code{TRUE} (use analytic/numeric Hessian) or \code{"bhhh"}/\code{"BHHH"} for information equality approach. The latter approach is only suitable for maximizing log-likelihood function. It requires the gradient/log-likelihood to be supplied by individual observations, see \code{\link{maxBHHH}} for details. } \item{cand}{a function used in the \code{"SANN"} algorithm to generate a new candidate point; if it is \code{NULL}, a default Gaussian Markov kernel is used (see argument \code{gr} of \code{\link{optim}}).} \item{temp}{controls the '"SANN"' method. It is the starting temperature for the cooling schedule. Defaults to '10'.} \item{tmax}{is the number of function evaluations at each temperature for the '"SANN"' method. Defaults to '10'. (see \code{\link{optim}})} \item{random.seed}{an integer used to seed R's random number generator. This is to ensure replicability when the \sQuote{Simulated Annealing} method is used. Defaults to 123.} \item{parscale}{A vector of scaling values for the parameters. Optimization is performed on 'par/parscale' and these should be comparable in the sense that a unit change in any element produces about a unit change in the scaled value. (see \code{\link{optim}})} \item{alpha, beta, gamma}{Scaling parameters for the '"Nelder-Mead"' method. 'alpha' is the reflection factor (default 1.0), 'beta' the contraction factor (0.5) and 'gamma' the expansion factor (2.0). (see \code{\link{optim}})} \item{\dots}{further arguments for \code{fn} and \code{grad}.} } \details{ The \sQuote{state} (or \sQuote{seed}) of R's random number generator is saved at the beginning of the \code{maxSANN} function and restored at the end of this function so that this function does \emph{not} affect the generation of random numbers although the random seed is set to argument \code{random.seed} and the \sQuote{SANN} algorithm uses random numbers. } \value{ Object of class "maxim": \item{maximum}{value of \code{fn} at maximum.} \item{estimate}{best set of parameters found.} \item{gradient}{vector, gradient at parameter value \code{estimate}.} \item{gradientObs}{matrix of gradients at parameter value \code{estimate} evaluated at each observation (only if \code{grad} returns a matrix or \code{grad} is not specified and \code{fn} returns a vector).} \item{hessian}{value of Hessian at optimum.} \item{code}{integer. Success code, 0 is success (see \code{\link{optim}}).} \item{message}{character string giving any additional information returned by the optimizer, or NULL.} \item{fixed}{logical vector indicating which parameters are treated as constants.} \item{iterations}{two-element integer vector giving the number of calls to \code{fn} and \code{gr}, respectively. This excludes those calls needed to compute the Hessian, if requested, and any calls to \code{fn} to compute a finite-difference approximation to the gradient.} \item{type}{character string "BFGS maximisation".} \item{constraints}{A list, describing the constrained optimization (\code{NULL} if unconstrained). Includes the following components: \itemize{ \item{type}{type of constrained optimization} \item{outer.iterations}{number of iterations in the constraints step} \item{barrier.value}{value of the barrier function} } } } \author{Ott Toomet \email{otoomet@ut.ee}, Arne Henningsen} \seealso{\code{\link{optim}}, \code{\link{nlm}}, \code{\link{maxNR}}, \code{\link{maxBHHH}}, \code{\link{maxBFGSR}}.} \examples{ # Maximum Likelihood estimation of the parameter of Poissonian distribution n <- rpois(100, 3) loglik <- function(l) n*log(l) - l - lfactorial(n) # we use numeric gradient summary(maxBFGS(loglik, start=1)) # you would probably prefer mean(n) instead of that ;-) # Note also that maxLik is better suited for Maximum Likelihood ### ### Now an example of constrained optimization ### f <- function(theta) { x <- theta[1] y <- theta[2] exp(-(x^2 + y^2)) ## Note: you may want to use exp(- theta \%*\% theta) instead ;-) } ## use constraints: x + y >= 1 A <- matrix(c(1, 1), 1, 2) B <- -1 res <- maxNM(f, start=c(1,1), constraints=list(ineqA=A, ineqB=B), print.level=1) print(summary(res)) } \keyword{optimize} maxLik/man/compareDerivatives.Rd0000644000176000001440000000715111624471463016441 0ustar ripleyusers\name{compareDerivatives} \alias{compareDerivatives} \title{function to compare analytic and numeric derivatives} \description{ This function compares analytic and numerical derivative and prints a few diagnostics. It is intended for testing pre-programmed derivative routines for maximisation algorithms. } \usage{ compareDerivatives(f, grad, hess=NULL, t0, eps=1e-6, print=TRUE, ...) } \arguments{ \item{f}{ function to be differentiated. The parameter (vector) of interest must be the first argument. The function may return a vector. } \item{grad}{ analytic gradient. This may be either a function, returning the analytic gradient, or a numeric vector, the pre-computed gradient. The function must use the same set of parameters as \code{f}. If \code{f} is a vector-valued function, grad must return/be a matrix where the number of rows equals the number of components of \code{f}, and the number of columns must equal to the number of components in \code{t0}. } \item{hess}{ function returning the analytic hessian. If present, hessian matrices are compared too. Only appropriate for scalar-valued functions. } \item{t0}{ parameter vector indicating the point at which the derivatives are compared. The derivative is taken with respect to this vector. } \item{eps}{ numeric. Step size for numeric differentiation. Central derivative is used. } \item{print}{ logical: TRUE to print a summary, FALSE to return the comparison only (invisibly). } \item{\dots}{ further arguments to \code{f}, \code{grad} and \code{hess}. } } \details{ For every component of \code{f}, the parameter value, analytic and numeric derivative and their relative difference rel.diff = (analytic - numeric)/(0.5*(analytic+numeric)) are printed; if analytic = 0 = numeric, we define rel.diff = 0. If analytic derivatives are correct and the function is sufficiently smooth, expect the relative differences to be less than 1e-7. } \value{ A list with the following components: \item{t0}{the input argument \code{t0}} \item{f.t0}{f(t0)} \item{compareGrad}{ a list with components \code{analytic} = grad(t0), \code{nmeric} = numericGradient(f, t0), and their \code{rel.diff}. } \item{maxRelDiffGrad}{max(abs(rel.diff))} If \code{hess} is also provided, the following optional components are also present: \item{compareHessian}{ a list with components \code{analytic} = hess(t0), \code{numeric} = numericGradient(grad, t0), and their \code{rel.diff}. } \item{maxRelDiffHess}{max(abs(rel.diff)) for the Hessian} } \author{Ott Toomet \email{otoomet@ut.ee} and Spencer Graves} \seealso{ \code{\link{numericGradient}} \code{\link{deriv}} } \examples{ ## A simple example with sin(x)' = cos(x) f <- function(x)c(sin=sin(x)) Dsin <- compareDerivatives(f, cos, t0=c(angle=1)) D2sin <- compareDerivatives(f, cos, function(x)-sin(x), t0=1) ## ## Example of log-likelihood of normal density. Two-parameter ## function. ## x <- rnorm(100, 1, 2) # generate rnorm x l <- function(b) sum(log(dnorm((x-b[1])/b[2])/b[2])) # b[1] = mu, b[2] = sigma gradl <- function(b) { c(mu=sum(x - b[1])/b[2]^2, sigma=sum((x - b[1])^2/b[2]^3 - 1/b[2])) } gradl. <- compareDerivatives(l, gradl, t0=c(mu=1,sigma=2)) ## ## An example with f returning a vector, t0 = a scalar ## trig <- function(x)c(sin=sin(x), cos=cos(x)) Dtrig <- function(x)c(sin=cos(x), cos=-sin(x)) Dtrig. <- compareDerivatives(trig, Dtrig, t0=1) D2trig <- function(x)-trig(x) D2trig. <- compareDerivatives(trig, Dtrig, D2trig, t0=1) } \keyword{math} \keyword{utilities} maxLik/man/maxLik.Rd0000644000176000001440000000774712215557570014046 0ustar ripleyusers\name{maxLik} \alias{maxLik} \alias{coef.maxLik} \alias{print.maxLik} \title{Maximum likelihood estimation} \description{ This is just a wrapper for maximisation routines which return object of class "maxLik". Corresponding methods can correctly handle the likelihood-specific properties of the estimate including the fact that inverse of negative hessian is the variance-covariance matrix. } \usage{ maxLik(logLik, grad = NULL, hess = NULL, start, method, constraints=NULL, ...) } \arguments{ \item{logLik}{log-likelihood function. Must have the parameter vector as the first argument. Must return either a single log-likelihood value or a numeric vector where each component is log-likelihood corresponding to individual observations.} \item{grad}{gradient of log-likelihood. Must have the parameter vector as the first argument. Must return either single gradient vector with length equal to the number of parameters, or a matrix where each row corresponds to gradient vector of individual observations. If \code{NULL}, numeric gradient will be used.} \item{hess}{hessian of log-likelihood. Must have the parameter vector as the first argument. Must return a square matrix. If \code{NULL}, numeric gradient will be used.} \item{start}{numeric vector, initial value of parameters.} \item{method}{maximisation method, currently either "NR" (for Newton-Raphson), "BFGS" (for Broyden-Fletcher-Goldfarb-Shanno), "BFGSR" (for the BFGS algorithm implemented in \R), "BHHH" (for Berndt-Hall-Hall-Hausman), "SANN" (for Simulated ANNealing), "CG" (for Conjugate Gradients), or "NM" (for Nelder-Mead). Lower-case letters (such as "nr" for Newton-Raphson) are allowed. If missing, a suitable method is selected automatically.} \item{constraints}{either \code{NULL} for unconstrained maximization or a list, specifying the constraints. See \code{\link{maxBFGS}}. } \item{\dots}{further arguments are passed to the selected maximisation routine, i.e. \code{\link{maxNR}}, \code{\link{maxBFGS}}, \code{\link{maxBFGSR}}, \code{\link{maxBHHH}}, \code{\link{maxSANN}}, \code{\link{maxCG}}, or \code{\link{maxNM}} (depending on argument \code{method}).} } \details{ \code{maxLik} "support" constrained optimization in the sense that constraints are passed further to the underlying optimization routines, and suitable default method is selected. However, no attempt is made to correct the resulting variance-covariance matrix. Hence, the inference may be wrong. A corresponding warning is issued by the summary method. } \value{ object of class 'maxLik' which inherits from class 'maxim'. Components are identical to those of class 'maxim', see \code{\link{maxNR}}. } \section{Warning}{The constrained maximum likelihood estimation should be considered as experimental. In particular, the variance-covariance matrix is not corrected for constrained parameter space. } \author{Ott Toomet \email{otoomet@ut.ee}, Arne Henningsen} \seealso{\code{\link{maxNR}}, \code{\link{nlm}} and \code{\link{optim}} for different non-linear optimisation routines.} \examples{ ## ML estimation of exponential duration model: t <- rexp(100, 2) loglik <- function(theta) log(theta) - theta*t gradlik <- function(theta) 1/theta - t hesslik <- function(theta) -100/theta^2 ## Estimate with numeric gradient and hessian a <- maxLik(loglik, start=1, print.level=2) print( a ) coef( a ) ## Estimate with analytic gradient and hessian a <- maxLik(loglik, gradlik, hesslik, start=1) print( a ) coef( a ) ## ## ## Next, we give an example with vector argument: Estimate the mean and ## variance of a random normal sample by maximum likelihood ## loglik <- function(param) { mu <- param[1] sigma <- param[2] ll <- -0.5*N*log(2*pi) - N*log(sigma) - sum(0.5*(x - mu)^2/sigma^2) ll } x <- rnorm(1000, 1, 2) # use mean=1, stdd=2 N <- length(x) res <- maxLik(loglik, start=c(0,1)) # use 'wrong' start values print( res ) coef( res ) } \keyword{optimize} maxLik/man/maximType.Rd0000644000176000001440000000152611066630507014556 0ustar ripleyusers\name{maximType} \alias{maximType} \alias{maximType.default} \alias{maximType.maxim} \alias{maximType.MLEstimate} \title{Type of Minimization/Maximization} \description{ Returns the type of optimisation. It should be returned by the optimisation routine. } \usage{ maximType(x) } \arguments{ \item{x}{object of class 'maxim' or another object which involves numerical optimisation. } } \value{ A text message, describing the involved optimisation algorithm } \author{Ott Toomet, \email{otoomet@ut.ee}} \seealso{\code{\link{maxNR}}} \examples{ ## maximise two-dimensional exponential hat. Maximum is at c(2,1): f <- function(a) exp(-(a[1] - 2)^2 - (a[2] - 1)^2) m <- maxNR(f, start=c(0,0)) summary(m) maximType(m) ## Now use BFGS maximisation. m <- maxBFGS(f, start=c(0,0)) summary(m) maximType(m) } \keyword{optimize} \keyword{methods} maxLik/man/returnCode.Rd0000644000176000001440000000224611066630507014713 0ustar ripleyusers\name{returnCode} \alias{returnCode} \alias{returnCode.default} \title{Return code for optimisation and other objects} \description{ This function gives the return code of various optimisation objects. The return code gives a brief information about the success or problems, occured during the optimisation (see documentation for the corresponding function). } \usage{ returnCode(x, ...) \method{returnCode}{default}(x, ...) } \arguments{ \item{x}{object, usually an estimator, achieved by optimisation} \item{...}{further arguments for other methods} } \details{ The default methods returns component \code{returnCode}. } \value{ Integer, the success code of optimisation procedure. However, different optimisation routines may define it in a different way. } \author{Ott Toomet, \email{otoomet@ut.ee}} \seealso{\code{\link{returnMessage}}, \code{\link{maxNR}}} \examples{ ## maximise the exponential bell f1 <- function(x) exp(-x^2) a <- maxNR(f1, start=2) returnCode(a) # should be success (1 or 2) ## Now try to maximise log() function f2 <- function(x) log(x) a <- maxNR(f2, start=2) returnCode(a) # should give a failure (4) } \keyword{methods} \keyword{utilities} maxLik/man/condiNumber.Rd0000644000176000001440000000530311066630507015043 0ustar ripleyusers\name{condiNumber} \alias{condiNumber} \alias{condiNumber.default} \alias{condiNumber.maxLik} \title{Print matrix condition numbers column-by-column} \description{ This function prints the condition number of a matrix while adding columns one-by-one. This is useful for testing multicollinearity and other numerical problems. This is a generic function with default method and method for \code{maxLik} objects. } \usage{ condiNumber(x, ...) \method{condiNumber}{default}(x, exact = FALSE, norm = FALSE, print.level=1, ...) \method{condiNumber}{maxLik}(x, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{numeric matrix, condition numbers of which are to be printed} \item{exact}{logical, should condition numbers be exact or approximations (see \code{link{kappa}})} \item{norm}{logical, whether the columns should be normalised to have unit norm} \item{print.level}{numeric, positive value will output the numbers during the calculations. Useful for interactive work.} \item{\dots}{other arguments to different methods} } \details{ Statistical model often fail because of strong correlation between explanatory variables in linear index (multicollinearity) or because the evaluated maximum of a non-linear model is virtually flat. In both cases, the (near) singularity of the related matrices may give us a hint, how to improve the results. \code{condiNumber} allows to inspect the matrices column-by-column and unerstand which variable leads to a huge jump in the condition number. If the single column does not immediately tell what is the problem, one may try to estimate this column by OLS using the previous columns as explanatory variables. The columns, which explain virtually all the variation, should have extremely high t-values. } \value{ Invisible vector of condition numbers by column. } \references{W. Greene, Advanced Econometrics, p ... } \author{Ott Toomet \email{otoomet@ut.ee}} \seealso{\code{\link{kappa}}} \examples{ set.seed(0) ## generate a simple multicollinear dataset x1 <- runif(100) x2 <- runif(100) x3 <- x1 + x2 + 0.000001*runif(100) # this is virtually equal to x1 + x2 x4 <- runif(100) y <- x1 + x2 + x3 + x4 + rnorm(100) m <- lm(y ~ -1 + x1 + x2 + x3 + x4) print(summary(m)) # note the low t-values while R^2 is 0.88. # This hints multicollinearity condiNumber(model.matrix(m)) # this _prints_ condition numbers. # note the values 'explode' with x3 ## we may test the results further: print(summary(lm(x3 ~ -1 + x1 + x2))) # Note the high t-values and R^2 } \keyword{math} \keyword{utilities} \keyword{debugging} % is it debugging? maxLik/man/hessian.Rd0000644000176000001440000000334411245570200014223 0ustar ripleyusers\name{hessian} \alias{hessian} \alias{hessian.default} \title{Hessian matrix} \description{ This function extracts the Hessian of the M-estimator of statistical model. It should be supplied by the underlying optimisation algorithm, possibly using approximations. } \usage{ hessian(x, \dots) \method{hessian}{default}(x, \dots) } \arguments{ \item{x}{a M-estimator based statistical model} \item{\dots}{other arguments for methods} } \value{ A numeric matrix, the Hessian of the model at the estimated parameter values. If the maximum is flat, the Hessian is singular. In that case you may want to invert only the non-singular part of the matrix. You may also want to fix certain parameters (see \code{\link{activePar}}). } \author{Ott Toomet, \email{otoomet@econ.au.dk}} \seealso{\code{\link{maxLik}}, \code{\link{activePar}}} \examples{ # log-likelihood for normal density # a[1] - mean # a[2] - standard deviation ll <- function(a) sum(-log(a[2]) - (x - a[1])^2/(2*a[2]^2)) x <- rnorm(1000) # sample from standard normal ml <- maxLik(ll, start=c(1,1)) # ignore eventual warnings "NaNs produced in: log(x)" summary(ml) # result should be close to c(0,1) hessian(ml) # How the Hessian looks like sqrt(-solve(hessian(ml))) # Note: standard deviations are on the diagonal # # Now run the same example while fixing a[2] = 1 mlf <- maxLik(ll, start=c(1,1), activePar=c(TRUE, FALSE)) summary(mlf) # first parameter close to 0, the second exactly 1.0 hessian(mlf) # Now look at the Hessian. Note that NA-s are in place of passive # parameters. # now invert only the free parameter part of the Hessian sqrt(-solve(hessian(mlf)[activePar(mlf), activePar(mlf)])) # gives the standard deviation for the mean } \keyword{methods} \keyword{optimize} maxLik/man/logLik.maxLik.Rd0000644000176000001440000000214311132015121015221 0ustar ripleyusers\name{logLik.maxLik} \alias{logLik.maxLik} \alias{logLik.summary.maxLik} \title{Return the log likelihood value} \description{ Return the log likelihood value of objects of class \code{maxLik} and \code{summary.maxLik}. } \usage{ \method{logLik}{maxLik}( object, \dots ) \method{logLik}{summary.maxLik}( object, \dots ) } \arguments{ \item{object}{object of class \code{maxLik} or \code{summary.maxLik}, usually a model estimated with Maximum Likelihood} \item{...}{additional arguments to methods} } \value{ A single numeric, log likelihood of the estimated model } \author{ Arne Henningsen, Ott Toomet \email{otoomet@ut.ee} } \seealso{\code{\link{maxLik}}} \examples{ ## ML estimation of exponential duration model: t <- rexp(100, 2) loglik <- function(theta) log(theta) - theta*t gradlik <- function(theta) 1/theta - t hesslik <- function(theta) -100/theta^2 ## Estimate with analytic gradient and hessian a <- maxLik(loglik, gradlik, hesslik, start=1) ## print log likelihood value logLik( a ) ## print log likelihood value of summary object b <- summary( a ) logLik( b ) } \keyword{methods} maxLik/man/vcov.maxLik.Rd0000644000176000001440000000267411266036230015002 0ustar ripleyusers\name{vcov.maxLik} \alias{vcov.maxLik} \title{Variance Covariance Matrix of maxLik objects} \description{ Extract variance-covariance matrices of objects of class \code{\link{maxLik}}. } \usage{ \method{vcov}{maxLik}( object, eigentol=1e-12, ... ) } \arguments{ \item{object}{an object of class \code{probit} or \code{maxLik}.} \item{eigentol}{ nonzero print limit on the range of the absolute values of the hessian. Specifically, define: absEig <- eigen(hessian(object), symmetric=TRUE)[['values']] Then compute and print t values, p values, etc. only if min(absEig) > (eigentol * max(absEig)). } \item{\dots}{further arguments (currently ignored).} } \value{ the estimated variance covariance matrix of the coefficients. In case of the estimated Hessian is singular, it's values are \code{Inf}. The values corresponding to fixed parameters are zero. } \author{ Arne Henningsen, Ott Toomet \email{otoomet@ut.ee} } \seealso{\code{\link[stats]{vcov}}, \code{\link{maxLik}}.} \examples{ ## ML estimation of exponential duration model: t <- rexp(100, 2) loglik <- function(theta) log(theta) - theta*t gradlik <- function(theta) 1/theta - t hesslik <- function(theta) -100/theta^2 ## Estimate with numeric gradient and hessian a <- maxLik(loglik, start=1, print.level=2) vcov(a) ## Estimate with analytic gradient and hessian a <- maxLik(loglik, gradlik, hesslik, start=1) vcov(a) } \keyword{methods} maxLik/man/maxLik-methods.Rd0000644000176000001440000000040611265324672015467 0ustar ripleyusers\name{AIC.maxLik} \alias{AIC.maxLik} \alias{std.maxlik} \title{Methods for the various standard functions} \description{ These are methods for the maxLik related objects. See the documentation for the corresponding generic functions } \keyword{methods} maxLik/man/fnSubset.Rd0000644000176000001440000000506011066630507014367 0ustar ripleyusers\name{fnSubset} \alias{fnSubset} \title{ Call fnFull with variable and fixed parameters } \description{ Combine variable parameters in \code{x} with \code{xFixed} and pass to \code{fnFull}. Useful for optimizing over a subset of parameters without writing a separate function. Values are combined by name if available. Otherwise, \code{xFull} is constructed by position (the default). } \usage{ fnSubset(x, fnFull, xFixed, xFull=c(x, xFixed), ...) } \arguments{ \item{x}{ Variable parameters to be passed to \code{fnFull}. } \item{fnFull}{ Function whose first argument has length = length(xFull). } \item{xFixed}{ Parameters to be combined with \code{x} to construct the first argument for a call to \code{fnFull}. } \item{xFull}{ Prototype initial argument for \code{fnFull}. } \item{\dots}{ Optional arguments passed to \code{fnFull}. } } \details{ 1. Confirm that length(x) + length(xFixed) = length(xFull) 2. If \code{xFull} has names, match at least \code{xFixed} by name. Else xFull = c(x, xFixes), the default. 3. fnFull(xFull, ...) } \value{ value returned by \code{fnFull} } %\references{ } \author{ Spencer Graves } \seealso{ \code{\link{optim}} \code{\link[dlm]{dlmMLE}} \code{\link{maxLik}} \code{\link{maxNR}} } \examples{ ## ## Test with 'optim' ## fn <- function(x, x0)(x[2]-2*x[1]-x0)^2 fullEst <- optim(1:2, fn, x0=3) # Fix the last component est4 <- optim(1, fnSubset, x0=3, fnFull=fn, xFixed=4) # Fix the first component fnSubset(1, fn, c(a=4), c(a=1, b=2), x0=3) # After substitution: xFull = c(a=4, b=1), # so fn = (1-2*4-3)^2 = (-10)^2 = 100 est4. <- optim(1, fnSubset, x0=3, fnFull=fn, xFixed=c(a=4), xFull=c(a=1, b=2)) # At optim: xFull=c(a=4, b=10.9), # so fn = (10.9-2*4-3)^2 = (-0.1)^2 = 0.01 ## ## Test with maxNR ## # fn2max = -fn fn2max <- function(x, x0, ...)(-(x[2]-2*x[1]-x0)^2) # Need "..." here when called directly from maxNR, # because maxNR will also pass 'constantPar' # Fix the last component NR4 <- maxNR(fnSubset, start=1, x0=3, fnFull=fn2max, xFixed=4) # Same thing using maxNR(..., activePar) NR4. <- maxNR(fn2max, start=c(1, 4), x0=3, constantPar=2) ## ## Test with maxLik ## # Same as maxNR max4 <- maxLik(fnSubset, start=1, x0=3, fnFull=fn2max, xFixed=4) # Same thing using constantPar in maxNR, called by maxLik max4 <- maxLik(fn2max, start=c(1, 4), x0=3, constantPar=2) } \keyword{optimize}% at least one, from doc/KEYWORDS maxLik/man/sumt.Rd0000644000176000001440000001134711624471463013577 0ustar ripleyusers\name{sumt} \Rdversion{1.1} \alias{sumt} \title{ Equality-constrained optimization } \description{ Sequentially Unconstrained Maximization Technique (SUMT) based optimization for linear equality constraints. This implementation is mostly intended to be called from other maximization routines, such as \code{\link{maxNR}}. } \usage{ sumt(fn, grad=NULL, hess=NULL, start, maxRoutine, constraints, SUMTTol = sqrt(.Machine$double.eps), SUMTPenaltyTol = sqrt(.Machine$double.eps), SUMTQ = 10, SUMTRho0 = NULL, print.level = 0, SUMTMaxIter = 100, ...) } \arguments{ \item{fn}{ function of a (single) vector parameter. The function may have more arguments, but those are not treated as parameter } \item{grad}{ gradient function of \code{fn}. NULL if missing } \item{hess}{ hessian matrix of the \code{fn}. NULL if missing } \item{start}{ initial value of the parameter. } \item{maxRoutine}{ maximization algorithm } \item{constraints}{list, information for constrained maximization. Currently two components are supported: \code{eqA} and \code{eqB} for linear equality constraints: \eqn{A \beta + B = 0}{A \%*\% beta + B = 0}. The user must ensure that the matrices \code{A} and \code{B} are conformable.} \item{SUMTTol}{ stopping condition. If the coefficient of successive outer iterations are close enough, i.e. maximum of the absolute value over the component difference is smaller than SUMTTol, the algorithm stops. Note this does not necessarily mean satisfying the constraints. In case of the penalty function is too 'weak', SUMT may repeatedly find the same optimum. In that case a warning is issued. The user may try to set SUMTTol to a lower value, e.g. to zero. } \item{SUMTPenaltyTol}{ stopping condition. If barrier value (also called penalty) \eqn{(A \beta + B)'(A \beta + B)}{t(A \%*\% beta + B) \%*\% (A \%*\% beta + B)} is less than \code{SUMTTol}, the algorithm stops } \item{SUMTQ}{ a double greater than one controlling the growth of the \code{rho} as described in Details. Defaults to 10. } \item{SUMTRho0}{ Initial value for \code{rho}. If not specified, a (possibly) suitable value is selected. See Details. One should consider supplying \code{SUMTRho0} in case where the unconstrained problem does not have a maximum, or the maximum is too far from the constrained value. Otherwise the algorithm may pick values too to achive convergence. } \item{print.level}{ Integer, debugging information. Larger number print more details. } \item{SUMTMaxIter}{ Maximum SUMT iterations } \item{\dots}{ Other arguments to \code{maxRoutine} and \code{fn}. } } \details{ The Sequential Unconstrained Minimization Technique is a heuristic for constrained optimization. To minimize a function \eqn{f}{f} subject to constraints, one employs a non-negative function \eqn{P}{P} penalizing violations of the constraints, such that \eqn{P(x)}{P(x)} is zero iff \eqn{x}{x} satisfies the constraints. One iteratively minimizes \eqn{L(x) + \varrho_k P(x)}{L(x) + rho_k P(x)}, where the \eqn{\varrho}{rho} values are increased according to the rule \eqn{\varrho_{k+1} = q \varrho_k}{rho_{k+1} = q rho_k} for some constant \eqn{q > 1}{q > 1}, until convergence is obtained in the sense that the barrier value \eqn{P(x)'P(x)}{P(x)'P(x)} is close to zero. Note that there is no guarantee that global (approximately) constrained optima are found. Standard practice would recommend to use the best solution found in "sufficiently many" replications of the algorithm. The unconstrained minimizations are carried out by either any of the maximization algorithms in the \pkg{maxLik}, such as \code{\link{maxNR}}. Analytic gradient and hessian are used if provided, numeric ones otherwise. } \value{ Object of class 'maxim'. In addition, a component \item{constraints}{A list, describing the constrained optimization. Includes the following components: \itemize{ \item{type}{type of constrained optimization} \item{barrier.value}{value of the penalty function at maximum} \item{code}{code for the stopping condition} \item{message}{a short message, describing the stopping condition} \item{outer.iterations}{number of iterations in the SUMT step} } } } \section{Note}{ It may be a lot more efficient to embrace the actual function to be optimized to an outer function, which calculates the actual parameters based on a smaller set of parameters and the constraints. } \author{ Ott Toomet \email{otoomet@ut.ee}, Arne Henningsen } \seealso{ \code{\link[clue]{sumt}} } \keyword{optimize} maxLik/man/bread.maxLik.Rd0000644000176000001440000000271111374246336015104 0ustar ripleyusers\name{bread.maxLik} \alias{bread.maxLik} \title{Bread for Sandwich Estimator} \description{ Extracting an estimator for the \sQuote{bread} of the sandwich estimator, see \code{\link[sandwich]{bread}}. } \usage{ \method{bread}{maxLik}( x, ... ) } \arguments{ \item{x}{an object of class \code{maxLik}.} \item{\dots}{further arguments (currently ignored).} } \value{ Matrix, the inverse of the expectation of the second derivative (Hessian matrix) of the log-likelihood function with respect to the parameters, usually equal to the variance covariance matrix of the parameters times the number of observations. } \section{Warnings}{ The \pkg{sandwich} package must be loaded before this method can be used. This method works only if \code{\link{maxLik}} was called with argument \code{grad} equal to a gradient function or (if no gradient function is specified) argument \code{logLik} equal to a log-likelihood function that return the gradients or log-likelihood values, respectively, for each observation. } \author{ Arne Henningsen } \seealso{\code{\link[sandwich]{bread}}, \code{\link{maxLik}}.} \examples{ ## ML estimation of exponential duration model: t <- rexp(100, 2) loglik <- function(theta) log(theta) - theta*t ## Estimate with numeric gradient and hessian a <- maxLik(loglik, start=1 ) # Extract the "bread" library( sandwich ) bread( a ) all.equal( bread( a ), vcov( a ) * nObs( a ) ) } \keyword{methods} maxLik/man/nObs.Rd0000644000176000001440000000262211400731502013465 0ustar ripleyusers\name{nObs.maxLik} \alias{nObs.maxLik} \title{Number of Observations} \description{ Returns the number of observations for statistical models estimated by Maximum Likelihood using \code{\link{maxLik}}. } \usage{ \method{nObs}{maxLik}(x, \dots) } \arguments{ \item{x}{a statistical model estimated by Maximum Likelihood using \code{\link{maxLik}}.} \item{\dots}{further arguments (currently ignored).} } \details{ The \code{nObs} method for objects of class \code{"maxLik"} can return the number of observations only if \code{\link{maxLik}} was called with argument \code{grad} equal to a gradient function or (if no gradient function is specified) argument \code{logLik} equal to a log-likelihood function that return the gradients or log-likelihood values, respectively, for each observation. } \value{ numeric, number of observations } \author{Arne Henningsen, Ott Toomet} \seealso{\code{\link[miscTools]{nObs}}, \code{\link{maxLik}}, \code{\link{nParam}}.} \examples{ ## fit a normal distribution by ML # generate a variable from normally distributed random numbers x <- rnorm( 100, 1, 2 ) # log likelihood function (for individual observations) llf <- function( param ) { return( dnorm( x, mean = param[ 1 ], sd = param[ 2 ], log = TRUE ) ) } ## ML method ml <- maxLik( llf, start = c( mu = 0, sigma = 1 ) ) # return number of onservations nObs( ml ) } \keyword{methods} maxLik/man/numericGradient.Rd0000644000176000001440000000537011742216553015724 0ustar ripleyusers\name{numericGradient} \alias{numericGradient} \alias{numericHessian} \alias{numericNHessian} \title{Functions to Calculate Numeric Derivatives} \description{ Calculate (central) numeric gradient and Hessian. \code{numericGradient} accepts vector-valued functions. } \usage{ numericGradient(f, t0, eps=1e-06, fixed, \dots) numericHessian(f, grad=NULL, t0, eps=1e-06, fixed, \dots) numericNHessian(f, t0, eps=1e-6, fixed, \dots) } \arguments{ \item{f}{function to be differentiated. The first argument must be the parameter vector with respect to which it is differentiated. For numeric gradient, \code{f} may return a (numeric) vector, for Hessian it should return a numeric scalar} \item{grad}{function, gradient of \code{f}} \item{t0}{vector, the value of parameters} \item{eps}{numeric, the step for numeric differentiation} \item{fixed}{logical vector, length of which equal the length of the parameter. Derivative is calculated only along the parameters for which it is \code{FALSE}, \code{NA} returned for the others. If missing, all parameters are treated as active.} \item{\dots}{furter arguments for \code{f}} } \details{ \code{numericGradient} numerically differentiates a (vector valued) function with respect to it's (vector valued) argument. If the functions value is a \code{NVal * 1} vector and the argument is \code{Npar * 1} vector, the resulting gradient is a \code{NVal * NPar} matrix. \code{numericHessian} checks whether a gradient function is present and calculates a gradient of the gradient (if present), or full numeric Hessian (\code{numericNHessian}) if \code{grad} is \code{NULL}. } \value{ Matrix. For \code{numericGradient}, the number of rows is equal to the length of the function value vector, and the number of columns is equal to the length of the parameter vector. For the \code{numericHessian}, both numer of rows and columns is equal to the length of the parameter vector. } \section{Warning}{ Be careful when using numerical differentiation in optimisation routines. Although quite precise in simple cases, they may work very poorly in more complicated conditions. } \author{Ott Toomet \email{otoomet@gmail.com}} \seealso{\code{\link{compareDerivatives}}, \code{\link{deriv}}} \examples{ # A simple example with Gaussian bell surface f0 <- function(t0) exp(-t0[1]^2 - t0[2]^2) numericGradient(f0, c(1,2)) numericHessian(f0, t0=c(1,2)) # An example with the analytic gradient gradf0 <- function(t0) -2*t0*f0(t0) numericHessian(f0, gradf0, t0=c(1,2)) # The results should be similar as in the previous case # The central numeric derivatives have usually quite a high precision compareDerivatives(f0, gradf0, t0=1:2) # The difference is around 1e-10 } \keyword{math} \keyword{utilities} maxLik/man/nParam.Rd0000644000176000001440000000243111400735622014007 0ustar ripleyusers\name{nParam.maxim} \alias{nParam.maxim} \title{Number of model parameters} \description{ This function returns the number of model parameters. } \usage{ \method{nParam}{maxim}(x, free=FALSE, \dots) } \arguments{ \item{x}{a model returned by a maximisation method from the \pkg{maxLik} package.} \item{free}{logical, whether to report only the free parameters or the total number of parameters (default)} \item{\dots}{other arguments for methods} } \details{ Free parameters are the parameters with no equality restrictions. Some parameters may be restricted (e.g. sum of two probabilities may be restricted to equal unity). In this case the total number of parameters may depend on the normalisation. } \value{ Number of parameters in the model } \author{Ott Toomet, \email{otoomet@econ.au.dk}} \seealso{\code{\link{nObs}} for number of observations} \examples{ ## fit a normal distribution by ML # generate a variable from normally distributed random numbers x <- rnorm( 100, 1, 2 ) # log likelihood function (for individual observations) llf <- function( param ) { return( dnorm( x, mean = param[ 1 ], sd = param[ 2 ], log = TRUE ) ) } ## ML method ml <- maxLik( llf, start = c( mu = 0, sigma = 1 ) ) # return number of parameters nParam( ml ) } \keyword{methods}