glmmTMB/0000755000176200001440000000000014122116502011543 5ustar liggesusersglmmTMB/NAMESPACE0000644000176200001440000001121014120405501012753 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(VarCorr,glmmTMB) S3method(anova,glmmTMB) S3method(as.data.frame,ranef.glmmTMB) S3method(coef,glmmTMB) S3method(confint,glmmTMB) S3method(confint,profile.glmmTMB) S3method(df.residual,glmmTMB) S3method(extractAIC,glmmTMB) S3method(family,glmmTMB) S3method(fitted,glmmTMB) S3method(fixef,glmmTMB) S3method(formula,glmmTMB) S3method(getME,glmmTMB) S3method(isLMM,glmmTMB) S3method(logLik,glmmTMB) S3method(model.frame,glmmTMB) S3method(model.matrix,glmmTMB) S3method(nobs,glmmTMB) S3method(predict,glmmTMB) S3method(print,VarCorr.glmmTMB) S3method(print,coef.glmmTMB) S3method(print,fixef.glmmTMB) S3method(print,glmmTMB) S3method(print,ranef.glmmTMB) S3method(print,summary.glmmTMB) S3method(print,vcov.glmmTMB) S3method(profile,glmmTMB) S3method(ranef,glmmTMB) S3method(refit,glmmTMB) S3method(residuals,glmmTMB) S3method(sigma,glmmTMB) S3method(simulate,glmmTMB) S3method(summary,glmmTMB) S3method(terms,glmmTMB) S3method(vcov,glmmTMB) S3method(weights,glmmTMB) export("RHSForm<-") export(RHSForm) export(VarCorr) export(addForm) export(addForm0) export(beta_family) export(betabinomial) export(compois) export(diagnose) export(drop.special) export(dropHead) export(dtruncated_nbinom1) export(dtruncated_nbinom2) export(dtruncated_poisson) export(expandAllGrpVar) export(expandDoubleVert) export(extractForm) export(findbars_x) export(fitTMB) export(fixef) export(genpois) export(getCapabilities) export(getGrpVar) export(getME) export(getReStruc) export(get_cor) export(glmmTMB) export(glmmTMBControl) export(gt_load) export(inForm) export(nbinom1) export(nbinom2) export(noSpecials) export(numFactor) export(omp_check) export(parseNumLevels) export(ranef) export(reOnly) export(refit) export(replaceForm) export(sigma) export(splitForm) export(sumTerms) export(truncated_compois) export(truncated_genpois) export(truncated_nbinom1) export(truncated_nbinom2) export(truncated_poisson) export(tweedie) export(up2date) export(ziGamma) if(getRversion() >= "3.6.0") { S3method(car::Anova, glmmTMB) } else { export(Anova.glmmTMB) } if(getRversion() >= "3.6.0") { S3method(multcomp::modelparm, glmmTMB) } else { export(modelparm.glmmTMB) } if(getRversion() >= "3.6.0") { S3method(effects::Effect, glmmTMB) } else { export(Effect.glmmTMB) } if(getRversion()>='3.3.0') importFrom(stats, sigma) else importFrom(lme4,sigma) importFrom(Matrix,Cholesky) importFrom(Matrix,solve) importFrom(Matrix,t) importFrom(TMB,MakeADFun) importFrom(TMB,sdreport) importFrom(TMB,tmbprofile) importFrom(lme4,.prt.VC) importFrom(lme4,.prt.aictab) importFrom(lme4,.prt.call) importFrom(lme4,.prt.family) importFrom(lme4,.prt.grps) importFrom(lme4,.prt.resids) importFrom(lme4,findbars) importFrom(lme4,getME) importFrom(lme4,isLMM) importFrom(lme4,mkReTrms) importFrom(lme4,nobars) importFrom(lme4,refit) importFrom(lme4,subbars) importFrom(methods,as) importFrom(methods,is) importFrom(methods,new) importFrom(nlme,VarCorr) importFrom(nlme,fixef) importFrom(nlme,ranef) importFrom(numDeriv,hessian) importFrom(numDeriv,jacobian) importFrom(splines,backSpline) importFrom(splines,interpSpline) importFrom(stats,"contrasts<-") importFrom(stats,.getXlevels) importFrom(stats,AIC) importFrom(stats,BIC) importFrom(stats,anova) importFrom(stats,as.formula) importFrom(stats,binomial) importFrom(stats,complete.cases) importFrom(stats,confint) importFrom(stats,contrasts) importFrom(stats,delete.response) importFrom(stats,df.residual) importFrom(stats,dist) importFrom(stats,dnbinom) importFrom(stats,dpois) importFrom(stats,family) importFrom(stats,fitted) importFrom(stats,formula) importFrom(stats,gaussian) importFrom(stats,getCall) importFrom(stats,logLik) importFrom(stats,make.link) importFrom(stats,model.frame) importFrom(stats,model.matrix) importFrom(stats,model.response) importFrom(stats,model.weights) importFrom(stats,na.fail) importFrom(stats,na.pass) importFrom(stats,napredict) importFrom(stats,nlminb) importFrom(stats,nobs) importFrom(stats,optimHess) importFrom(stats,pbinom) importFrom(stats,pchisq) importFrom(stats,plogis) importFrom(stats,pnbinom) importFrom(stats,pnorm) importFrom(stats,poisson) importFrom(stats,ppois) importFrom(stats,predict) importFrom(stats,printCoefmat) importFrom(stats,profile) importFrom(stats,qchisq) importFrom(stats,qnbinom) importFrom(stats,qnorm) importFrom(stats,residuals) importFrom(stats,rnbinom) importFrom(stats,rnorm) importFrom(stats,runif) importFrom(stats,sd) importFrom(stats,setNames) importFrom(stats,simulate) importFrom(stats,terms) importFrom(stats,update) importFrom(stats,var) importFrom(stats,vcov) importFrom(stats,weights) importFrom(stats,xtabs) importFrom(utils,head) importFrom(utils,packageVersion) useDynLib(glmmTMB) glmmTMB/data/0000755000176200001440000000000014070567426012475 5ustar liggesusersglmmTMB/data/epil2.rda0000644000176200001440000000563214070567426014206 0ustar liggesusers{pT7JI Q( V.a vC  (jS)F c"Z(b)Z< #bw_SNwg|}9wlnH!$=JA4$ H^q8u>NE G5?gOW|@87~7tptͣ\h,ݷ\ur,nI"_NI9|sWzmꤟ8KfuL@Y~l꽃r~CE~9ҙ_zrsGkޒ #H1bRޯy|n~|:=Ez@jHxʇǷk1= iW\!J#}߲x~tt&ۖϏ9|d ?l *x$9<k&7IӞv\w9vQn8x~K7 ms'#?qAsXڪTh,+%߆/ }˂/n(gSoZgwWo7mª뷎<ɾCΐf3gk3um?.>{] HŇ3?:'v|zzzzzz}]~mqZW}}. Eip`HQaZ)7zvs?lʣǼ)7+ϒ~hc}$ʹG]^8wQn6实,ʣKv֩//fO_V\KW}6o_\k\}{BΌyI:@;}Xv5c*Vtn|U]?QՆtnwz)}=tN>$\8>7g_4ڿ3m\onqkw_.>7W߷]_\)e֊sÎ؄R| -UڧcEUJXP AQ@Q E :P4B@ -B@ -B@@@@@@@@@@@SM4h )@SM 4h*T@SM 4 h4i@ӀM4 ht@ӁM4h:t@ - 0@ - 0@ - 4h @3f4h"@-"@-"@\" %r\" %r\" %r\" %r\" %r\" %r\" %r\" %r\" %r\"$b?#(Ăx+J\JglmmTMB/data/Salamanders.rda0000644000176200001440000000461314070567426015423 0ustar liggesusers] XT~ .U[ ՠ#([%"K V!(R5JQĘZb?Ez-*q[BEeY"KA93|}syt^Qfv8NIeRN"Ma8K^[M XQsԖwni~F"o)Z~P/Ţ}.]Ί^;/ZWx˫!6b#6b#6b#6b#SGGE*N4@7GzUoћctכ)pflƆe塵Yhve1=۝!v-g"*|~X*%1[2?62Z#bhgց 6b#6b#6b#6b#O[{qO$*`J""ۃdfx} IGkm%yq\ic$9,Ik$ ';WUЪq՟%wo}c}Y/rJ:5>+pɶA?;> uѦל)Ɇyv)և>}[iVVcޘ7ycޘ7ycޘ7ycޘ7ycޘ7ys[g G+җ((Xo7zc?zfi:]׮9OqŢ'9Nca~N'>dޘ(ihw&kc6z؂D񋛱36_j_hcڛsYAf[.2765F~#o7F~#o7ۤsºeSzUʍvƳ36',nN8BIOƗQ ~}hciiGڼ?KF[rΚY|7g7zM]&,0-ޚm>N;E_V$o_;e rwKM$kI՗ܰj~ɽҟ۳~\c+]A>*%}ɥkzϭ# K^ȝ7\CN5)!;G4/g治?s뢏&:ؒ#UaISz}W*N|#{AoḦFcWT)Mn#;WSicn$鑔(}iYٱiHDKIM"mAk׿oY痱VĬ_1p,9s{ y>ݙGgeTȳ*RXUÆ+5}k.#?v&j5$ǝay|K#z4=O8E+=Cv-KL?=Jlw;yo-zDZB8HӺڬƯ{q^sE("VRe kmɢȆ-d0_\7p/Z,\8kT>uNoVЂ%;n3/4A/L:/3~#~943t?&+>h{VL3Ѻ`c~bÞǞYSڽiM|uvTWT|qs6G;eo7F~#o7F~#ߦ~, ˗Q{PP9JK+Ǣs³P&LkjX>K;^&cOl4]G6יǩmp+gPUE̸g!&5ˮdBq#my'"̓n~g&3L,hTX#:: 5cy~Tփo,V"5cm: 8 ذ795r7vm̘u%9cuVބٽ` z{/_Cgڋ` k!+cLҐV03v/GGBn@[J5]+|s&jЍW|o ߋ:˅|LTڿurBLrC|,x_ ԡ;Zq@ذ>;OXqΆc[MS=@|bז݃M3Aq2N 舐pk?FZS8V#tOϏSaSqR)pψUGA`@DĊr\:njIen!!#Cc)\#ak\glmmTMB/data/Owls.rda0000644000176200001440000001013414070567426014110 0ustar liggesusers\k\U>w{>zo J59>h^Krn;q\2-1!F&ƐhG,D`!$%PLD"AQT4:[sΞ9)N[}^{:Ι6}Ki)-5С:R?6|4it:t/dM٦ڝ4Y{/ڧ T kԁs-C-mZPG.`\\f PnAեJ]v-]5y"u^;7[/=Knz~g bzmP;܃huJ%gfcapm½hwo8%R(;еiJTrfgS&۝r/8s3lqf xg-srZ engtyV6޳uR֝%V8T;uvp|];wkgḾeOѺ 3XrwvRGsjןU9ƫ4+l?qrI];nnf3w77lQD3:W}&LQg6L"wԼ0a"QsEwIވg#k&5$/nNtot=,ɛTI|"LQzYyQx q<,}qIue0]$bk\I-I2MRg5/ s/l+yGbaA ZT8j}Q^f֍Q{ok Cd[$Ak4&٨= O(Yqr|fp׬?iKRHsA$F>G&n^O$qNex46r(kI_ fm=BO.|JvY[s-l:v.>=fg=vǓn܎7>zuC>#7<{Nt7y|t˸>s[l64o}ce|6 {m`]=o~9~˽rZgoxvۏ=y/yoźv-Ƚ[GS9qC1GЋYQnž#3R\¼ӛg. =O?2mn"`yXo e!W|П8O}c='x:> o@>ՏM3WH-ǂ}A<y'!_ ^.L60>W/ |eoh7!q^uqz5gA.[#c'w:б׹AWGS8<9s~;P>5/U2.<~jj9d+paq̃/_Guŏ?j}}7(g8cuoR!;7x?go1W?$?z?!>e?<6/8)i?qς$O982 ڷYF2rBΡu`T~yȷ.η߰_ #l=Z27-z3L~Bobpon{V{ÿ~.>PZq:ui2_[ُ9Vo A&"!ORj㉥[~H1JomMa>731HX2!3ZMG.ӏ[AumЧ5fbւ72hs[4)ZM.18>j>,rqꋺa4|L_>os.eOL)~M$o٨>V{ďAc:1׫V:{`|kho|Էh?Y:˞Hܳ|b̀KzuߥQ>yib2sϽy#XoЧң8kZ~ɳS Z|@{ڮ\`|F?rϚo ;ʕi1Yjo NAvi5 iŏa̱=٪?ry5Bۺhś|ߢ|9vgߥX[f\KKօa{è==?<5=TBSU9Nz}Iy=@t8q&88Զ4{R(]D.]O>clj7ֳI'/,g/? Ӥeh}+u %OSiXϨJPbs/)Fy^ K[W?4#,P'AaqqIpA>QCȐO-q [wr4>wVaj8.U4ԝS'~<9hLQ%9yؕ:BL{\T?dzrZlI8Q. B%߫P5ϫӃ"]aT? ܊>S03|NBNL_q}(8r?w}^>R>DI oip\}~8T𺜏ZNϪ ?*ʏ~y~|qeI_΃Q%>(5](5P:>'OL=P:zOtt~hw=1Coeo#K U6)wE_7~lGo* }͟y%z눜XwCNj=?:z-/WSOkߣ|}_}^u6u)}.} \item{digits}{number of significant digits to use.} \item{comp}{a string specifying the component to format and print.} \item{formatter}{a \code{\link{function}}.} \item{...}{optional further arguments, passed the next \code{\link{print}} method.} } \description{ Printing The Variance and Correlation Parameters of a \code{glmmTMB} } glmmTMB/man/getGrpVar.Rd0000644000176200001440000000123514070567426014530 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmmTMB.R \name{getGrpVar} \alias{getGrpVar} \title{Get Grouping Variable} \usage{ getGrpVar(x) } \arguments{ \item{x}{"flist" object; a data frame of factors including an \code{assign} attribute matching columns to random effect terms} } \value{ character vector of grouping variables } \description{ Extract grouping variables for random effect terms from a factor list } \examples{ data(cbpp,package="lme4") cbpp$obs <- factor(seq(nrow(cbpp))) rt <- lme4::glFormula(cbind(size,incidence-size)~(1|herd)+(1|obs), data=cbpp,family=binomial)$reTrms getGrpVar(rt$flist) } \keyword{internal} glmmTMB/man/RHSForm.Rd0000644000176200001440000000061314120405266014073 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reformulas.R \name{RHSForm} \alias{RHSForm} \title{extract right-hand side of a formula} \usage{ RHSForm(form, as.form = FALSE) } \arguments{ \item{form}{a formula object} \item{as.form}{(logical) return a formula (TRUE) or as a call/symbolic object (FALSE) ?} } \description{ extract right-hand side of a formula } glmmTMB/man/sigma.glmmTMB.Rd0000644000176200001440000000757114070567426015236 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/VarCorr.R \name{sigma.glmmTMB} \alias{sigma.glmmTMB} \alias{sigma} \title{Extract residual standard deviation or dispersion parameter} \usage{ \method{sigma}{glmmTMB}(object, ...) } \arguments{ \item{object}{a \dQuote{glmmTMB} fitted object} \item{\dots}{(ignored; for method compatibility)} } \description{ For Gaussian models, \code{sigma} returns the value of the residual standard deviation; for other families, it returns the dispersion parameter, \emph{however it is defined for that particular family}. See details for each family below. } \details{ The value returned varies by family: \describe{ \item{gaussian}{returns the \emph{maximum likelihood} estimate of the standard deviation (i.e., smaller than the results of \code{sigma(lm(...))} by a factor of (n-1)/n)} \item{nbinom1}{returns a dispersion parameter (usually denoted \eqn{\alpha}{alpha} as in Hardin and Hilbe (2007)): such that the variance equals \eqn{\mu(1+\alpha)}{mu(1+alpha)}.} \item{nbinom2}{returns a dispersion parameter (usually denoted \eqn{\theta}{theta} or \eqn{k}); in contrast to most other families, larger \eqn{\theta}{theta} corresponds to a \emph{lower} variance which is \eqn{\mu(1+\mu/\theta)}{mu(1+mu/theta)}.} \item{Gamma}{Internally, glmmTMB fits Gamma responses by fitting a mean and a shape parameter; sigma is estimated as (1/sqrt(shape)), which will typically be close (but not identical to) that estimated by \code{stats:::sigma.default}, which uses sqrt(deviance/df.residual)} \item{beta}{returns the value of \eqn{\phi}{phi}, where the conditional variance is \eqn{\mu(1-\mu)/(1+\phi)}{mu*(1-mu)/(1+phi)} (i.e., increasing \eqn{\phi}{phi} decreases the variance.) This parameterization follows Ferrari and Cribari-Neto (2004) (and the \code{betareg} package):} \item{betabinomial}{This family uses the same parameterization (governing the Beta distribution that underlies the binomial probabilities) as \code{beta}.} \item{genpois}{returns the index of dispersion \eqn{\phi^2}{phi^2}, where the variance is \eqn{\mu\phi^2}{mu*phi^2} (Consul & Famoye 1992)} \item{compois}{returns the value of \eqn{1/\nu}{1/nu}, When \eqn{\nu=1}{nu=1}, compois is equivalent to the Poisson distribution. There is no closed form equation for the variance, but it is approximately undersidpersed when \eqn{1/\nu <1}{1/nu <1} and approximately oversidpersed when \eqn{1/\nu >1}{1/nu>1}. In this implementation, \eqn{\mu}{mu} is exactly the mean (Huang 2017), which differs from the COMPoissonReg package (Sellers & Lotze 2015).} \item{tweedie}{returns the value of \eqn{\phi}{phi}, where the variance is \eqn{\phi\mu^p}{phi*mu^p}. The value of \eqn{p} can be extracted using the internal function \code{glmmTMB:::.tweedie_power}.} } The most commonly used GLM families (\code{binomial}, \code{poisson}) have fixed dispersion parameters which are internally ignored. } \references{ \itemize{ \item Consul PC, and Famoye F (1992). "Generalized Poisson regression model. Communications in Statistics: Theory and Methods" 21:89–109. \item Ferrari SLP, Cribari-Neto F (2004). "Beta Regression for Modelling Rates and Proportions." \emph{J. Appl. Stat.} 31(7), 799-815. \item Hardin JW & Hilbe JM (2007). "Generalized linear models and extensions." Stata press. \item Huang A (2017). "Mean-parametrized Conway–Maxwell–Poisson regression models for dispersed counts. " \emph{Statistical Modelling} 17(6), 1-22. \item Sellers K & Lotze T (2015). "COMPoissonReg: Conway-Maxwell Poisson (COM-Poisson) Regression". R package version 0.3.5. https://CRAN.R-project.org/package=COMPoissonReg } } glmmTMB/man/dot-collectDuplicates.Rd0000644000176200001440000000044114070567426017054 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmmTMB.R \name{.collectDuplicates} \alias{.collectDuplicates} \title{collapse duplicated observations} \usage{ .collectDuplicates(data.tmb) } \description{ collapse duplicated observations } \keyword{internal} glmmTMB/man/VarCorr.glmmTMB.Rd0000644000176200001440000000255214070567426015506 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/VarCorr.R \name{VarCorr.glmmTMB} \alias{VarCorr.glmmTMB} \alias{VarCorr} \title{Extract variance and correlation components} \usage{ \method{VarCorr}{glmmTMB}(x, sigma = 1, ...) } \arguments{ \item{x}{a fitted \code{glmmTMB} model} \item{sigma}{residual standard deviation (usually set automatically from internal information)} \item{extra}{arguments (for consistency with generic method)} } \description{ Extract variance and correlation components } \details{ For an unstructured variance-covariance matrix, the internal parameters are structured as follows: the first n parameters are the log-standard-deviations, while the remaining n(n-1)/2 parameters are the elements of the Cholesky factor of the correlation matrix, filled in column-wise order (see the \href{http://kaskr.github.io/adcomp/classUNSTRUCTURED__CORR__t.html}{TMB documentation} for further details). } \examples{ ## Comparing variance-covariance matrix with manual computation data("sleepstudy",package="lme4") fm4 <- glmmTMB(Reaction ~ Days + (Days|Subject), sleepstudy) VarCorr(fm4)[[c("cond","Subject")]] ## hand calculation pars <- getME(fm4,"theta") ## construct cholesky factor L <- diag(2) L[lower.tri(L)] <- pars[-(1:2)] C <- crossprod(L) diag(C) <- 1 sdvec <- exp(pars[1:2]) (V <- outer(sdvec,sdvec) * C) } \keyword{internal} glmmTMB/man/weights.glmmTMB.Rd0000644000176200001440000000200514070567426015573 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \name{weights.glmmTMB} \alias{weights.glmmTMB} \title{Extract weights from a glmmTMB object} \usage{ \method{weights}{glmmTMB}(object, type = "prior", ...) } \arguments{ \item{object}{a fitted \code{glmmTMB} object} \item{type}{weights type} \item{...}{additional arguments (not used; for methods compatibility)} } \description{ Extract weights from a glmmTMB object } \details{ At present only explicitly specified \emph{prior weights} (i.e., weights specified in the \code{weights} argument) can be extracted from a fitted model. \itemize{ \item Unlike other GLM-type models such as \code{\link{glm}} or \code{\link[lme4]{glmer}}, \code{weights()} does not currently return the total number of trials when binomial responses are specified as a two-column matrix. \item Since \code{glmmTMB} does not fit models via iteratively weighted least squares, \code{working weights} (see \code{\link[stats:glm]{weights.glm}}) are unavailable. } } glmmTMB/man/getME.glmmTMB.Rd0000644000176200001440000000117414070567426015130 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \name{getME.glmmTMB} \alias{getME.glmmTMB} \alias{getME} \title{Extract or Get Generalize Components from a Fitted Mixed Effects Model} \usage{ \method{getME}{glmmTMB}(object, name = c("X", "Xzi", "Z", "Zzi", "Xd", "theta", "beta"), ...) } \arguments{ \item{object}{a fitted \code{glmmTMB} object} \item{name}{of the component to be retrieved} \item{\dots}{ignored, for method compatibility} } \description{ Extract or Get Generalize Components from a Fitted Mixed Effects Model } \seealso{ \code{\link[lme4]{getME}} Get generic and re-export: } glmmTMB/man/formula.glmmTMB.Rd0000644000176200001440000000122114070567426015565 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \name{formula.glmmTMB} \alias{formula.glmmTMB} \title{Extract the formula of a glmmTMB object} \usage{ \method{formula}{glmmTMB}(x, fixed.only = FALSE, component = c("cond", "zi", "disp"), ...) } \arguments{ \item{x}{a \code{glmmTMB} object} \item{fixed.only}{(logical) drop random effects, returning only the fixed-effect component of the formula?} \item{component}{formula for which component of the model to return (conditional, zero-inflation, or dispersion)} \item{...}{unused, for generic consistency} } \description{ Extract the formula of a glmmTMB object } glmmTMB/man/epil2.Rd0000644000176200001440000000341614070567426013645 0ustar liggesusers\name{epil2} \title{Seizure Counts for Epileptics - Extended} \alias{epil2} \docType{data} \description{ Extended version of the \code{epil} dataset of the \pkg{MASS} package. The three transformed variables \code{Visit}, \code{Base}, and \code{Age} used by Booth et al. (2003) have been added to \code{epil}. } \usage{epil2} \format{ A data frame with 236 observations on the following 12 variables: \describe{ \item{\code{y}}{an integer vector.} \item{\code{trt}}{a factor with levels \code{"placebo"} and \code{"progabide"}.} \item{\code{base}}{an integer vector.} \item{\code{age}}{an integer vector.} \item{\code{V4}}{an integer vector.} \item{\code{subject}}{an integer vector.} \item{\code{period}}{an integer vector.} \item{\code{lbase}}{a numeric vector.} \item{\code{lage}}{a numeric vector.} \item{Visit}{\code{(rep(1:4,59) - 2.5) / 5}.} \item{Base}{\code{log(base/4)}.} \item{Age}{\code{log(age)}.} } } \references{ Booth, J.G., G. Casella, H. Friedl, and J.P. Hobert. (2003) Negative binomial loglinear mixed models. \emph{Statistical Modelling} \bold{3}, 179--191. } \examples{ \donttest{ epil2$subject <- factor(epil2$subject) op <- options(digits=3) (fm <- glmmTMB(y ~ Base*trt + Age + Visit + (Visit|subject), data=epil2, family=nbinom2)) meths <- methods(class = class(fm)) if((Rv <- getRversion()) > "3.1.3") { (funs <- attr(meths, "info")[, "generic"]) for(F in funs[is.na(match(funs, "getME"))]) { cat(sprintf("\%s:\n-----\n", F)) r <- tryCatch( get(F)(fm), error=identity) if (inherits(r, "error")) cat("** Error:", r$message,"\n") else tryCatch( print(r) ) cat(sprintf("---end{\%s}--------------\n\n", F)) } } options(op) } } \keyword{datasets} glmmTMB/man/getReStruc.Rd0000644000176200001440000000262714070567426014724 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmmTMB.R \name{getReStruc} \alias{getReStruc} \title{Calculate random effect structure Calculates number of random effects, number of parameters, block size and number of blocks. Mostly for internal use.} \usage{ getReStruc(reTrms, ss = NULL, aa = NULL, reXterms = NULL, fr = NULL) } \arguments{ \item{reTrms}{random-effects terms list} \item{ss}{a character string indicating a valid covariance structure. Must be one of \code{names(glmmTMB:::.valid_covstruct)}; default is to use an unstructured variance-covariance matrix (\code{"us"}) for all blocks).} \item{aa}{additional arguments (i.e. rank)} \item{reXterms}{terms objects corresponding to each RE term} \item{fr}{model frame} } \value{ a list \item{blockNumTheta}{number of variance covariance parameters per term} \item{blockSize}{size (dimension) of one block} \item{blockReps}{number of times the blocks are repeated (levels)} \item{covCode}{structure code} } \description{ Calculate random effect structure Calculates number of random effects, number of parameters, block size and number of blocks. Mostly for internal use. } \examples{ data(sleepstudy, package="lme4") rt <- lme4::lFormula(Reaction~Days+(1|Subject)+(0+Days|Subject), sleepstudy)$reTrms rt2 <- lme4::lFormula(Reaction~Days+(Days|Subject), sleepstudy)$reTrms getReStruc(rt) } glmmTMB/man/bootmer_methods.Rd0000644000176200001440000000330114070567426016015 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \name{isLMM.glmmTMB} \alias{isLMM.glmmTMB} \alias{refit.glmmTMB} \title{support methods for parametric bootstrapping} \usage{ \method{isLMM}{glmmTMB}(object) \method{refit}{glmmTMB}(object, newresp, ...) } \arguments{ \item{object}{a fitted glmmTMB object} \item{newresp}{a new response vector} \item{...}{additional arguments (for generic consistency; ignored)} } \description{ see \code{\link[lme4]{refit}} and \code{\link[lme4:isREML]{isLMM}} for details } \details{ These methods are still somewhat experimental (check your results carefully!), but they should allow parametric bootstrapping. They work by copying and replacing the original response column in the data frame passed to \code{glmmTMB}, so they will only work properly if (1) the data frame is still available in the environment and (2) the response variable is specified as a single symbol (e.g. \code{proportion} or a two-column matrix constructed on the fly with \code{cbind()}. Untested with binomial models where the response is specified as a factor. } \examples{ if (requireNamespace("lme4")) { \dontrun{ fm1 <- glmmTMB(count~mined+(1|spp), ziformula=~mined, data=Salamanders, family=nbinom1) ## single parametric bootstrap step: refit with data simulated from original model fm1R <- refit(fm1, simulate(fm1)[[1]]) ## the bootMer function from lme4 provides a wrapper for doing multiple refits ## with a specified summary function b1 <- lme4::bootMer(fm1, FUN=function(x) fixef(x)$zi, nsim=20, .progress="txt") if (requireNamespace("boot")) { boot.ci(b1,type="perc") } } } } glmmTMB/man/Salamanders.Rd0000644000176200001440000000322014070567426015055 0ustar liggesusers\name{Salamanders} \title{Repeated counts of salamanders in streams} \alias{Salamanders} \docType{data} \description{ A data set containing counts of salamanders with site covariates and sampling covariates. Each of 23 sites was sampled 4 times. When using this data set, please cite Price et al. (2016) as well as the Dryad data package (Price et al. 2015). } \usage{data(Salamanders)} \format{ A data frame with 644 observations on the following 10 variables: \describe{ \item{site}{name of a location where repeated samples were taken} \item{mined}{factor indicating whether the site was affected by mountain top removal coal mining} \item{cover}{amount of cover objects in the stream (scaled)} \item{sample}{repeated sample} \item{DOP}{Days since precipitation (scaled)} \item{Wtemp}{water temperature (scaled)} \item{DOY}{day of year (scaled)} \item{spp}{abbreviated species name, possibly also life stage} \item{count}{number of salamanders observed} } } \references{ Price SJ, Muncy BL, Bonner SJ, Drayer AN, Barton CD (2016) Effects of mountaintop removal mining and valley filling on the occupancy and abundance of stream salamanders. \emph{Journal of Applied Ecology} \bold{53} 459--468. \doi{10.1111/1365-2664.12585} Price SJ, Muncy BL, Bonner SJ, Drayer AN, Barton CD (2015) Data from: Effects of mountaintop removal mining and valley filling on the occupancy and abundance of stream salamanders. \emph{Dryad Digital Repository}. \doi{10.5061/dryad.5m8f6} } \examples{ require("glmmTMB") data(Salamanders) \donttest{ zipm3 = glmmTMB(count~spp * mined + (1|site), zi=~spp * mined, Salamanders, family="poisson") } } \keyword{datasets} glmmTMB/man/nbinom2.Rd0000644000176200001440000001214314070567426014173 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/family.R \name{nbinom2} \alias{nbinom2} \alias{family_glmmTMB} \alias{nbinom1} \alias{compois} \alias{truncated_compois} \alias{genpois} \alias{truncated_genpois} \alias{truncated_poisson} \alias{truncated_nbinom2} \alias{truncated_nbinom1} \alias{beta_family} \alias{betabinomial} \alias{tweedie} \alias{ziGamma} \title{Family functions for glmmTMB} \usage{ nbinom2(link = "log") nbinom1(link = "log") compois(link = "log") truncated_compois(link = "log") genpois(link = "log") truncated_genpois(link = "log") truncated_poisson(link = "log") truncated_nbinom2(link = "log") truncated_nbinom1(link = "log") beta_family(link = "logit") betabinomial(link = "logit") tweedie(link = "log") ziGamma(link = "inverse") } \arguments{ \item{link}{(character) link function for the conditional mean ("log", "logit", "probit", "inverse", "cloglog", "identity", or "sqrt")} } \value{ returns a list with (at least) components \item{family}{length-1 character vector giving the family name} \item{link}{length-1 character vector specifying the link function} \item{variance}{a function of either 1 (mean) or 2 (mean and dispersion parameter) arguments giving a value proportional to the predicted variance (scaled by \code{sigma(.)}) } } \description{ Family functions for glmmTMB } \details{ If specified, the dispersion model uses a log link. Denoting the variance as \eqn{V}, the dispersion parameter as \eqn{\phi=\exp(\eta)}{phi=exp(eta)} (where \eqn{\eta}{eta} is the linear predictor from the dispersion model), and the predicted mean as \eqn{\mu}{mu}: \describe{ \item{gaussian}{(from base R): constant \eqn{V=\phi}{V=phi}} \item{Gamma}{(from base R) phi is the shape parameter. \eqn{V=\mu\phi}{V=mu*phi}} \item{ziGamma}{a modified version of \code{Gamma} that skips checks for zero values, allowing it to be used to fit hurdle-Gamma models} \item{nbinom2}{Negative binomial distribution: quadratic parameterization (Hardin & Hilbe 2007). \eqn{V=\mu(1+\mu/\phi) = \mu+\mu^2/\phi}{V=mu*(1+mu/phi) = mu+mu^2/phi}.} \item{nbinom1}{Negative binomial distribution: linear parameterization (Hardin & Hilbe 2007). \eqn{V=\mu(1+\phi)}{V=mu*(1+phi)}} \item{truncated_nbinom2}{Zero-truncated version of nbinom2: variance expression from Shonkwiler 2016. Simulation code (for this and the other truncated count distributions) is taken from C. Geyer's functions in the \code{aster} package; the algorithms are described in \href{https://cran.r-project.org/package=aster/vignettes/trunc.pdf}{this vignette}.} \item{compois}{Conway-Maxwell Poisson distribution: parameterized with the exact mean (Huang 2017), which differs from the parameterization used in the \pkg{COMPoissonReg} package (Sellers & Shmueli 2010, Sellers & Lotze 2015). \eqn{V=\mu\phi}{V=mu*phi}.} \item{genpois}{Generalized Poisson distribution (Consul & Famoye 1992). \eqn{V=\mu\exp(\eta)}{V=mu*exp(eta)}. (Note that Consul & Famoye (1992) define \eqn{\phi}{phi} differently.) Our implementation is taken from the \code{HMMpa} package, based on Joe and Zhu (2005) and implemented by Vitali Witowski.} \item{beta}{Beta distribution: parameterization of Ferrari and Cribari-Neto (2004) and the \pkg{betareg} package (Cribari-Neto and Zeileis 2010); \eqn{V=\mu(1-\mu)/(\phi+1)}{V=mu*(1-mu)/(phi+1)}} \item{betabinomial}{Beta-binomial distribution: parameterized according to Morris (1997). \eqn{V=\mu(1-\mu)(n(\phi+n)/(\phi+1))}{V=mu*(1-mu)*(n*(phi+n)/(phi+1))}} \item{tweedie}{Tweedie distribution: \eqn{V=\phi\mu^p}{V=phi*mu^p}. The power parameter is restricted to the interval \eqn{1)}. At present reduced-rank models (i.e., a covariance structure using \code{rr(...)}) cannot be fitted in parallel; the number of threads will be automatically set to 1, with a warning if this overrides the user-specified value.} \item{eigval_check}{Check eigenvalues of variance-covariance matrix? (This test may be very slow for models with large numbers of fixed-effect parameters.)} \item{zerodisp_val}{value of the dispersion parameter when \code{dispformula=~0} is specified} \item{start_method}{(list) Options to initialize the starting values when fitting models with reduced-rank (\code{rr}) covariance structures; \code{jitter.sd} adds variation to the starting values of latent variables when \code{method = "res"}.} } \description{ Control parameters for glmmTMB optimization } \details{ By default, \code{\link{glmmTMB}} uses the nonlinear optimizer \code{\link{nlminb}} for parameter estimation. Users may sometimes need to adjust optimizer settings in order to get models to converge. For instance, the warning \sQuote{iteration limit reached without convergence} may be fixed by increasing the number of iterations using (e.g.) \code{glmmTMBControl(optCtrl=list(iter.max=1e3,eval.max=1e3))}. Setting \code{profile=TRUE} allows \code{glmmTMB} to use some special properties of the optimization problem in order to speed up estimation in cases with many fixed effects. Control parameters may depend on the model specification. The value of the controls is evaluated inside an R object that is derived from the output of the \code{\link{mkTMBStruc}} function. For example, to specify that \code{profile} should be enabled if the model has more than 5 fixed-effect parameters, specify \code{profile=quote(length(parameters$beta)>=5)} The \code{optimizer} argument can be any optimization (minimizing) function, provided that: \itemize{ \item the first three arguments, in order, are the starting values, objective function, and gradient function; \item the function also takes a \code{control} argument; \item the function returns a list with elements (at least) \code{par}, \code{objective}, \code{convergence} (0 if convergence is successful) and \code{message} (\code{glmmTMB} automatically handles output from \code{optim()}, by renaming the \code{value} component to \code{objective}) } } \examples{ ## fit with default (nlminb) and alternative (optim/BFGS) optimizer m1 <- glmmTMB(count~ mined, family=poisson, data=Salamanders) m1B <- update(m1, control=glmmTMBControl(optimizer=optim, optArgs=list(method="BFGS"))) ## estimates are *nearly* identical: all.equal(fixef(m1), fixef(m1B)) } glmmTMB/man/getCapabilities.Rd0000644000176200001440000000155214070567426015722 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/family.R \name{getCapabilities} \alias{getCapabilities} \title{List model options that glmmTMB knows about} \usage{ getCapabilities(what = "all", check = FALSE) } \arguments{ \item{what}{(character) which type of model structure to report on ("all","family","link","covstruct")} \item{check}{(logical) do brute-force checking to test whether families are really implemented (only available for \code{what="family"})} } \value{ if \code{check==FALSE}, returns a vector of the names (or a list of name vectors) of allowable entries; if \code{check==TRUE}, returns a logical vector of working families } \description{ List model options that glmmTMB knows about } \note{ these are all the options that are \emph{defined} internally; they have not necessarily all been \emph{implemented} (FIXME!) } glmmTMB/man/glmmTMB.Rd0000644000176200001440000002322414120405266014115 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmmTMB.R \name{glmmTMB} \alias{glmmTMB} \title{Fit Models with TMB} \usage{ glmmTMB( formula, data = NULL, family = gaussian(), ziformula = ~0, dispformula = ~1, weights = NULL, offset = NULL, contrasts = NULL, na.action, se = TRUE, verbose = FALSE, doFit = TRUE, control = glmmTMBControl(), REML = FALSE, start = NULL, map = NULL, sparseX = NULL ) } \arguments{ \item{formula}{combined fixed and random effects formula, following lme4 syntax.} \item{data}{data frame (tibbles are OK) containing model variables. Not required, but strongly recommended; if \code{data} is not specified, downstream methods such as prediction with new data (\code{predict(fitted_model, newdata = ...)}) will fail. If it is necessary to call \code{glmmTMB} with model variables taken from the environment rather than from a data frame, specifying \code{data=NULL} will suppress the warning message.} \item{family}{a family function, a character string naming a family function, or the result of a call to a family function (variance/link function) information. See \code{\link{family}} for a generic discussion of families or \code{\link{family_glmmTMB}} for details of \code{glmmTMB}-specific families.} \item{ziformula}{a \emph{one-sided} (i.e., no response variable) formula for zero-inflation combining fixed and random effects: the default \code{~0} specifies no zero-inflation. Specifying \code{~.} sets the zero-inflation formula identical to the right-hand side of \code{formula} (i.e., the conditional effects formula); terms can also be added or subtracted. \strong{When using \code{~.} as the zero-inflation formula in models where the conditional effects formula contains an offset term, the offset term will automatically be dropped}. The zero-inflation model uses a logit link.} \item{dispformula}{a \emph{one-sided} formula for dispersion containing only fixed effects: the default \code{~1} specifies the standard dispersion given any family. The argument is ignored for families that do not have a dispersion parameter. For an explanation of the dispersion parameter for each family, see \code{\link{sigma}}. The dispersion model uses a log link. In Gaussian mixed models, \code{dispformula=~0} fixes the residual variance to be 0 (actually a small non-zero value), forcing variance into the random effects. The precise value can be controlled via \code{control=glmmTMBControl(zero_dispval=...)}; the default value is \code{sqrt(.Machine$double.eps)}.} \item{weights}{weights, as in \code{glm}. Not automatically scaled to have sum 1.} \item{offset}{offset for conditional model (only).} \item{contrasts}{an optional list, e.g., \code{list(fac1="contr.sum")}. See the \code{contrasts.arg} of \code{\link{model.matrix.default}}.} \item{na.action}{a function that specifies how to handle observations containing \code{NA}s. The default action (\code{na.omit}, inherited from the 'factory fresh' value of \code{getOption("na.action")}) strips any observations with any missing values in any variables. Using \code{na.action = na.exclude} will similarly drop observations with missing values while fitting the model, but will fill in \code{NA} values for the predicted and residual values for cases that were excluded during the fitting process because of missingness.} \item{se}{whether to return standard errors.} \item{verbose}{whether progress indication should be printed to the console.} \item{doFit}{whether to fit the full model, or (if FALSE) return the preprocessed data and parameter objects, without fitting the model.} \item{control}{control parameters, see \code{\link{glmmTMBControl}}.} \item{REML}{whether to use REML estimation rather than maximum likelihood.} \item{start}{starting values, expressed as a list with possible components \code{beta}, \code{betazi}, \code{betad} (fixed-effect parameters for conditional, zero-inflation, dispersion models); \code{b}, \code{bzi} (conditional modes for conditional and zero-inflation models); \code{theta}, \code{thetazi} (random-effect parameters, on the standard deviation/Cholesky scale, for conditional and z-i models); \code{thetaf} (extra family parameters, e.g., shape for Tweedie models).} \item{map}{a list specifying which parameter values should be fixed to a constant value rather than estimated. \code{map} should be a named list containing factors corresponding to a subset of the internal parameter names (see \code{start} parameter). Distinct factor values are fitted as separate parameter values, \code{NA} values are held fixed: e.g., \code{map=list(beta=factor(c(1,2,3,NA)))} would fit the first three fixed-effect parameters of the conditional model and fix the fourth parameter to its starting value. In general, users will probably want to use \code{start} to specify non-default starting values for fixed parameters. See \code{\link[TMB]{MakeADFun}} for more details.} \item{sparseX}{a named logical vector containing (possibly) elements named "cond", "zi", "disp" to indicate whether fixed-effect model matrices for particular model components should be generated as sparse matrices, e.g. \code{c(cond=TRUE)}. Default is all \code{FALSE}} } \description{ Fit a generalized linear mixed model (GLMM) using Template Model Builder (TMB). } \details{ \itemize{ \item Binomial models with more than one trial (i.e., not binary/Bernoulli) can either be specified in the form \code{prob ~ ..., weights = N}, or in the more typical two-column matrix \code{cbind(successes,failures)~...} form. \item Behavior of \code{REML=TRUE} for Gaussian responses matches \code{lme4::lmer}. It may also be useful in some cases with non-Gaussian responses (Millar 2011). Simulations should be done first to verify. \item Because the \code{\link{df.residual}} method for \code{glmmTMB} currently counts the dispersion parameter, users should multiply this value by \code{sqrt(nobs(fit) / (1+df.residual(fit)))} when comparing with \code{lm}. \item Although models can be fitted without specifying a \code{data} argument, its use is strongly recommended; drawing model components from the global environment, or using \code{df$var} notation within model formulae, can lead to confusing (and sometimes hard-to-detect) errors. \item By default, vector-valued random effects are fitted with unstructured (general symmetric positive definite) variance-covariance matrices. Structured variance-covariance matrices can be specified in the form \code{struc(terms|group)}, where \code{struc} is one of \itemize{ \item \code{diag} (diagonal, heterogeneous variance) \item \code{ar1} (autoregressive order-1, homogeneous variance) \item \code{cs} (compound symmetric, heterogeneous variance) \item \code{ou} (* Ornstein-Uhlenbeck, homogeneous variance) \item \code{exp} (* exponential autocorrelation) \item \code{gau} (* Gaussian autocorrelation) \item \code{mat} (* Matérn process correlation) \item \code{toep} (* Toeplitz) } Structures marked with * are experimental/untested. See \code{vignette("covstruct", package = "glmmTMB")}< for more information. \item For backward compatibility, the \code{family} argument can also be specified as a list comprising the name of the distribution and the link function (e.g. \code{list(family="binomial", link="logit")}). However, \strong{this alternative is now deprecated}; it produces a warning and will be removed at some point in the future. Furthermore, certain capabilities such as Pearson residuals or predictions on the data scale will only be possible if components such as \code{variance} and \code{linkfun} are present, see \code{\link{family}}. } } \note{ For more information about the \pkg{glmmTMB} package, see Brooks et al. (2017) and the \code{vignette(package="glmmTMB")} collection. For the underlying \pkg{TMB} package that performs the model estimation, see Kristensen et al. (2016). } \examples{ \donttest{ (m1 <- glmmTMB(count ~ mined + (1|site), zi=~mined, family=poisson, data=Salamanders)) summary(m1) ##' ## Zero-inflated negative binomial model (m2 <- glmmTMB(count ~ spp + mined + (1|site), zi=~spp + mined, family=nbinom2, data=Salamanders)) ## Hurdle Poisson model (m3 <- glmmTMB(count ~ spp + mined + (1|site), zi=~spp + mined, family=truncated_poisson, data=Salamanders)) ## Binomial model data(cbpp, package="lme4") (bovine <- glmmTMB(cbind(incidence, size-incidence) ~ period + (1|herd), family=binomial, data=cbpp)) ## Dispersion model sim1 <- function(nfac=40, nt=100, facsd=0.1, tsd=0.15, mu=0, residsd=1) { dat <- expand.grid(fac=factor(letters[1:nfac]), t=1:nt) n <- nrow(dat) dat$REfac <- rnorm(nfac, sd=facsd)[dat$fac] dat$REt <- rnorm(nt, sd=tsd)[dat$t] dat$x <- rnorm(n, mean=mu, sd=residsd) + dat$REfac + dat$REt dat } set.seed(101) d1 <- sim1(mu=100, residsd=10) d2 <- sim1(mu=200, residsd=5) d1$sd <- "ten" d2$sd <- "five" dat <- rbind(d1, d2) m0 <- glmmTMB(x ~ sd + (1|t), dispformula=~sd, data=dat) fixef(m0)$disp c(log(5^2), log(10^2)-log(5^2)) # expected dispersion model coefficients ## Using 'map' to fix random-effects SD to 10 m1_map <- update(m1, map=list(theta=factor(NA)), start=list(theta=log(10))) VarCorr(m1_map) } } \references{ Brooks, M. E., Kristensen, K., van Benthem, K. J., Magnusson, A., Berg, C. W., Nielsen, A., Skaug, H. J., Mächler, M. and Bolker, B. M. (2017). glmmTMB balances speed and flexibility among packages for zero-inflated generalized linear mixed modeling. \emph{The R Journal}, \bold{9}(2), 378--400. Kristensen, K., Nielsen, A., Berg, C. W., Skaug, H. and Bell, B. (2016). TMB: Automatic differentiation and Laplace approximation. \emph{Journal of Statistical Software}, \bold{70}, 1--21. Millar, R. B. (2011). \emph{Maximum Likelihood Estimation and Inference: With Examples in R, SAS and ADMB.} Wiley, New York. } glmmTMB/man/startParams.Rd0000644000176200001440000000546314070567426015137 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmmTMB.R \name{startParams} \alias{startParams} \title{Change starting parameters, either by residual method or by user input (start)} \usage{ startParams( parameters, formula, ziformula, dispformula, fr, yobs, weights, size = NULL, Xd = NULL, XdS = NULL, family, condReStruc, start = NULL, sparseX = NULL, start_method = list(method = NULL, jitter.sd = 0) ) } \arguments{ \item{formula}{current formula, containing both fixed & random effects} \item{ziformula}{a \emph{one-sided} (i.e., no response variable) formula for zero-inflation combining fixed and random effects: the default \code{~0} specifies no zero-inflation. Specifying \code{~.} sets the zero-inflation formula identical to the right-hand side of \code{formula} (i.e., the conditional effects formula); terms can also be added or subtracted. \strong{When using \code{~.} as the zero-inflation formula in models where the conditional effects formula contains an offset term, the offset term will automatically be dropped}. The zero-inflation model uses a logit link.} \item{dispformula}{a \emph{one-sided} formula for dispersion containing only fixed effects: the default \code{~1} specifies the standard dispersion given any family. The argument is ignored for families that do not have a dispersion parameter. For an explanation of the dispersion parameter for each family, see \code{\link{sigma}}. The dispersion model uses a log link. In Gaussian mixed models, \code{dispformula=~0} fixes the residual variance to be 0 (actually a small non-zero value), forcing variance into the random effects. The precise value can be controlled via \code{control=glmmTMBControl(zero_dispval=...)}; the default value is \code{sqrt(.Machine$double.eps)}.} \item{fr}{model frame} \item{yobs}{observed y} \item{weights}{weights, as in \code{glm}. Not automatically scaled to have sum 1.} \item{size}{number of trials in binomial and betabinomial families} \item{family}{family object} \item{start}{starting values, expressed as a list with possible components \code{beta}, \code{betazi}, \code{betad} (fixed-effect parameters for conditional, zero-inflation, dispersion models); \code{b}, \code{bzi} (conditional modes for conditional and zero-inflation models); \code{theta}, \code{thetazi} (random-effect parameters, on the standard deviation/Cholesky scale, for conditional and z-i models); \code{thetaf} (extra family parameters, e.g., shape for Tweedie models).} \item{sparseX}{see \code{\link{glmmTMB}}} \item{start_method}{Options to initialise the starting values for rr parameters; jitter.sd adds variation to the starting values of latent variables when start = "res".} } \description{ Change starting parameters, either by residual method or by user input (start) } \keyword{internal} glmmTMB/man/splitForm.Rd0000644000176200001440000000450414120405266014575 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reformulas.R \name{splitForm} \alias{splitForm} \alias{noSpecials} \title{Split formula containing special random effect terms} \usage{ splitForm( formula, defaultTerm = "us", allowFixedOnly = TRUE, allowNoSpecials = TRUE, debug = FALSE ) noSpecials(term, delete = TRUE, debug = FALSE) } \arguments{ \item{formula}{a formula containing special random effect terms} \item{defaultTerm}{default type for non-special RE terms} \item{allowFixedOnly}{(logical) are formulas with no RE terms OK?} \item{allowNoSpecials}{(logical) are formulas with only standard RE terms OK?} \item{debug}{debugging mode (print stuff)?} \item{term}{language object} } \value{ a list containing elements \code{fixedFormula}; \code{reTrmFormulas} list of \code{x | g} formulas for each term; \code{reTrmAddArgs} list of function+additional arguments, i.e. \code{list()} (non-special), \code{foo()} (no additional arguments), \code{foo(addArgs)} (additional arguments); \code{reTrmClasses} (vector of special functions/classes, as character) } \description{ Parse a formula into fixed formula and random effect terms, treating 'special' terms (of the form foo(x|g[,m])) appropriately } \details{ Taken from Steve Walker's lme4ord, ultimately from the flexLambda branch of lme4 . Mostly for internal use. } \examples{ splitForm(~x+y) ## no specials or RE splitForm(~x+y+(f|g)) ## no specials splitForm(~x+y+diag(f|g)) ## one special splitForm(~x+y+(diag(f|g))) ## 'hidden' special splitForm(~x+y+(f|g)+cs(1|g)) ## combination splitForm(~x+y+(1|f/g)) ## 'slash'; term splitForm(~x+y+(1|f/g/h)) ## 'slash'; term splitForm(~x+y+(1|(f/g)/h)) ## 'slash'; term splitForm(~x+y+(f|g)+cs(1|g)+cs(a|b,stuff)) ## complex special splitForm(~(((x+y)))) ## lots of parentheses splitForm(~1+rr(f|g,n=2)) noSpecials(y~1+us(1|f)) noSpecials(y~1+us(1|f),delete=FALSE) noSpecials(y~us(1|f)) noSpecials(y~us(1|f), delete=FALSE) noSpecials(y~us(1|f), debug=TRUE) noSpecials(y~us+1) ## should *not* delete unless head of a function noSpecials(~us+1) ## should work on a one-sided formula! } \author{ Steve Walker } \keyword{internal} glmmTMB/man/fitTMB.Rd0000644000176200001440000000150214070567426013751 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/glmmTMB.R \name{fitTMB} \alias{fitTMB} \title{Optimize a TMB model and package results} \usage{ fitTMB(TMBStruc) } \arguments{ \item{TMBStruc}{a list contain} } \description{ This function (called internally by \code{\link{glmmTMB}}) runs the actual model optimization, after all of the appropriate structures have been set up. It can be useful to run \code{\link{glmmTMB}} with \code{doFit=TRUE}, adjust the components as required, and then finish the fitting process with \code{fitTMB} (however, it is the user's responsibility to make sure that any modifications create an internally consistent final fitted object). } \examples{ m0 <- glmmTMB(count ~ mined + (1|site), family=poisson, data=Salamanders, doFit=FALSE) names(m0) fitTMB(m0) } glmmTMB/man/reexports.Rd0000644000176200001440000000060414070567426014661 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \docType{import} \name{reexports} \alias{reexports} \alias{refit} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{lme4}{\code{\link[lme4]{refit}}} }} glmmTMB/man/vcov.glmmTMB.Rd0000644000176200001440000000167314070567426015110 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \name{vcov.glmmTMB} \alias{vcov.glmmTMB} \title{Calculate Variance-Covariance Matrix for a Fitted glmmTMB model} \usage{ \method{vcov}{glmmTMB}(object, full = FALSE, include_mapped = FALSE, ...) } \arguments{ \item{object}{a \dQuote{glmmTMB} fit} \item{full}{return a full variance-covariance matrix?} \item{include_mapped}{include mapped variables? (these will be given variances and covariances of NA)} \item{\dots}{ignored, for method compatibility} } \value{ By default (\code{full==FALSE}), a list of separate variance-covariance matrices for each model component (conditional, zero-inflation, dispersion). If \code{full==TRUE}, a single square variance-covariance matrix for \emph{all} top-level model parameters (conditional, dispersion, and variance-covariance parameters) } \description{ Calculate Variance-Covariance Matrix for a Fitted glmmTMB model } glmmTMB/man/diagnose.Rd0000644000176200001440000000455414070567426014427 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagnose.R \name{diagnose} \alias{diagnose} \title{diagnose model problems} \usage{ diagnose( fit, eval_eps = 1e-05, evec_eps = 0.01, big_coef = 10, big_sd_log10 = 3, big_zstat = 5, check_coefs = TRUE, check_zstats = TRUE, check_hessian = TRUE, check_scales = TRUE ) } \arguments{ \item{fit}{a \code{glmmTMB} fit} \item{eval_eps}{numeric tolerance for 'bad' eigenvalues} \item{evec_eps}{numeric tolerance for 'bad' eigenvector elements} \item{big_coef}{numeric tolerance for large coefficients} \item{big_sd_log10}{numeric tolerance for badly scaled parameters (log10 scale), i.e. for default value of 3, predictor variables with sd less than 1e-3 or greater than 1e3 will be flagged)} \item{big_zstat}{numeric tolerance for Z-statistic} \item{check_coefs}{identify large-magnitude coefficients? (Only checks conditional-model parameters if a (log, logit, cloglog, probit) link is used. Always checks zero-inflation, dispersion, and random-effects parameters. May produce false positives if predictor variables have extremely large scales.)} \item{check_zstats}{identify parameters with unusually large Z-statistics (ratio of standard error to mean)? Identifies likely failures of Wald confidence intervals/p-values.} \item{check_hessian}{identify non-positive-definite Hessian components?} \item{check_scales}{identify predictors with unusually small or large scales?} } \value{ a logical value based on whether anything questionable was found } \description{ \strong{EXPERIMENTAL}. For a given model, this function attempts to isolate potential causes of convergence problems. It checks (1) whether there are any unusually large coefficients; (2) whether there are any unusually scaled predictor variables; (3) if the Hessian (curvature of the negative log-likelihood surface at the MLE) is positive definite (i.e., whether the MLE really represents an optimum). For each case it tries to isolate the particular parameters that are problematic. } \details{ Problems in one category (e.g. complete separation) will generally also appear in "downstream" categories (e.g. non-positive-definite Hessians). Therefore, it is generally advisable to try to deal with problems in order, e.g. address problems with complete separation first, then re-run the diagnostics to see whether Hessian problems persist. } glmmTMB/man/confint.glmmTMB.Rd0000644000176200001440000000641614070567426015573 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \name{confint.glmmTMB} \alias{confint.glmmTMB} \title{Calculate confidence intervals} \usage{ \method{confint}{glmmTMB}( object, parm = NULL, level = 0.95, method = c("wald", "Wald", "profile", "uniroot"), component = c("all", "cond", "zi", "other"), estimate = TRUE, parallel = c("no", "multicore", "snow"), ncpus = getOption("profile.ncpus", 1L), cl = NULL, full = FALSE, ... ) } \arguments{ \item{object}{\code{glmmTMB} fitted object.} \item{parm}{which parameters to profile, specified \itemize{ \item by index (position) [\emph{after} component selection for \code{confint}, if any] \item by name (matching the row/column names of \code{vcov(object,full=TRUE)}) \item as \code{"theta_"} (random-effects variance-covariance parameters), \code{"beta_"} (conditional and zero-inflation parameters), or \code{"disp_"} or \code{"sigma"} (dispersion parameters) } Parameter indexing by number may give unusual results when some parameters have been fixed using the \code{map} argument: please report surprises to the package maintainers.} \item{level}{Confidence level.} \item{method}{'wald', 'profile', or 'uniroot': see Details function)} \item{component}{Which of the three components 'cond', 'zi' or 'other' to select. Default is to select 'all'.} \item{estimate}{(logical) add a third column with estimate ?} \item{parallel}{method (if any) for parallel computation} \item{ncpus}{number of CPUs/cores to use for parallel computation} \item{cl}{cluster to use for parallel computation} \item{full}{CIs for all parameters (including dispersion) ?} \item{...}{arguments may be passed to \code{\link{profile.merMod}} or \code{\link[TMB]{tmbroot}}} } \description{ Calculate confidence intervals } \details{ Available methods are \describe{ \item{"wald"}{These intervals are based on the standard errors calculated for parameters on the scale of their internal parameterization depending on the family. Derived quantities such as standard deviation parameters and dispersion parameters are back-transformed. It follows that confidence intervals for these derived quantities are typically asymmetric.} \item{"profile"}{This method computes a likelihood profile for the specified parameter(s) using \code{profile.glmmTMB}; fits a spline function to each half of the profile; and inverts the function to find the specified confidence interval.} \item{"uniroot"}{This method uses the \code{\link{uniroot}} function to find critical values of one-dimensional profile functions for each specified parameter.} } At present, "wald" returns confidence intervals for variance parameters on the standard deviation/correlation scale, while "profile" and "uniroot" report them on the underlying ("theta") scale: for each random effect, the first set of parameter values are standard deviations on the log scale, while remaining parameters represent correlations on the scaled Cholesky scale (see the } \examples{ data(sleepstudy, package="lme4") model <- glmmTMB(Reaction ~ Days + (1|Subject), sleepstudy) model2 <- glmmTMB(Reaction ~ Days + (1|Subject), sleepstudy, dispformula= ~I(Days>8)) confint(model) ## Wald/delta-method CIs confint(model,parm="theta_") ## Wald/delta-method CIs confint(model,parm=1,method="profile") } glmmTMB/man/expandGrpVar.Rd0000644000176200001440000000046514120405266015221 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reformulas.R \name{expandGrpVar} \alias{expandGrpVar} \title{apply} \usage{ expandGrpVar(f) } \arguments{ \item{f}{a language object (an atom of a formula) expandGrpVar(quote(x*y)) expandGrpVar(quote(x/y))} } \description{ apply } glmmTMB/man/simulate.glmmTMB.Rd0000644000176200001440000000162414070567426015752 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \name{simulate.glmmTMB} \alias{simulate.glmmTMB} \title{Simulate from a glmmTMB fitted model} \usage{ \method{simulate}{glmmTMB}(object, nsim = 1, seed = NULL, ...) } \arguments{ \item{object}{glmmTMB fitted model} \item{nsim}{number of response lists to simulate. Defaults to 1.} \item{seed}{random number seed} \item{...}{extra arguments} } \value{ returns a list of vectors. The list has length \code{nsim}. Each simulated vector of observations is the same size as the vector of response variables in the original data set. In the binomial family case each simulation is a two-column matrix with success/failure. } \description{ Simulate from a glmmTMB fitted model } \details{ Random effects are also simulated from their estimated distribution. Currently, it is not possible to condition on estimated random effects. } glmmTMB/man/gt_load.Rd0000644000176200001440000000116414120405266014226 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{up2date} \alias{up2date} \alias{gt_load} \title{conditionally update glmmTMB object fitted with an old TMB version} \usage{ up2date(oldfit) gt_load(fn, verbose = FALSE, mustWork = FALSE) } \arguments{ \item{oldfit}{a fitted glmmTMB object} \item{fn}{partial path to system file (e.g. test_data/foo.rda)} \item{verbose}{print names of updated objects?} \item{mustWork}{fail if file not found?} } \description{ conditionally update glmmTMB object fitted with an old TMB version Load data from system file, updating glmmTMB objects } glmmTMB/man/get_cor.Rd0000644000176200001440000000223114070567426014246 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{get_cor} \alias{get_cor} \title{translate vector of correlation parameters to correlation values} \usage{ get_cor(theta) } \arguments{ \item{theta}{vector of internal correlation parameters} } \value{ a vector of correlation values } \description{ translate vector of correlation parameters to correlation values } \details{ This function follows the definition at \url{http://kaskr.github.io/adcomp/classUNSTRUCTURED__CORR__t.html}: if \eqn{L} is the lower-triangular matrix with 1 on the diagonal and the correlation parameters in the lower triangle, then the correlation matrix is defined as \eqn{\Sigma = D^{-1/2} L L^\top D^{-1/2}}{Sigma = sqrt(D) L L' sqrt(D)}, where \eqn{D = \textrm{diag}(L L^\top)}{D = diag(L L')}. For a single correlation parameter \eqn{\theta_0}{theta0}, this works out to \eqn{\rho = \theta_0/\sqrt{1+\theta_0^2}}{rho = theta0/sqrt(1+theta0^2)}. The function returns the elements of the lower triangle of the correlation matrix, in column-major order. } \examples{ th0 <- 0.5 stopifnot(all.equal(get_cor(th0),th0/sqrt(1+th0^2))) get_cor(c(0.5,0.2,0.5)) } glmmTMB/man/fixef.Rd0000644000176200001440000000224614070567426013733 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods.R \docType{methods} \name{fixef} \alias{fixef} \alias{fixef.glmmTMB} \title{Extract fixed-effects estimates} \usage{ \method{fixef}{glmmTMB}(object, ...) } \arguments{ \item{object}{any fitted model object from which fixed effects estimates can be extracted.} \item{\dots}{optional additional arguments. Currently none are used in any methods.} } \value{ an object of class \code{fixef.glmmTMB} comprising a list of components (\code{cond}, \code{zi}, \code{disp}), each containing a (possibly zero-length) numeric vector of coefficients } \description{ Extract Fixed Effects } \details{ Extract fixed effects from a fitted \code{glmmTMB} model. The print method for \code{fixef.glmmTMB} object \emph{only displays non-trivial components}: in particular, the dispersion parameter estimate is not printed for models with a single (intercept) dispersion parameter (see examples) } \examples{ data(sleepstudy, package = "lme4") fm1 <- glmmTMB(Reaction ~ Days, sleepstudy) (f1 <- fixef(fm1)) f1$cond ## show full coefficients, including dispersion parameter unlist(f1) print.default(f1) } \keyword{models} glmmTMB/man/downstream_methods.Rd0000644000176200001440000000555414120405266016532 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/Anova.R, R/effects.R, R/emmeans.R \name{Anova.glmmTMB} \alias{Anova.glmmTMB} \alias{Effect.glmmTMB} \alias{downstream_methods} \alias{emmeans.glmmTMB} \title{Downstream methods} \usage{ Anova.glmmTMB( mod, type = c("II", "III", 2, 3), test.statistic = c("Chisq", "F"), component = "cond", vcov. = vcov(mod)[[component]], singular.ok, ... ) Effect.glmmTMB(focal.predictors, mod, ...) } \arguments{ \item{mod}{a glmmTMB model} \item{type}{type of test, \code{"II"}, \code{"III"}, \code{2}, or \code{3}. Roman numerals are equivalent to the corresponding Arabic numerals. See \code{\link[car]{Anova}} for details.} \item{test.statistic}{unused: only valid choice is "Chisq" (i.e., Wald chi-squared test)} \item{component}{which component of the model to test/analyze ("cond", "zi", or "disp")} \item{vcov.}{variance-covariance matrix (usually extracted automatically)} \item{singular.ok}{OK to do ANOVA with singular models (unused) ?} \item{\dots}{Additional parameters that may be supported by the method.} \item{focal.predictors}{a character vector of one or more predictors in the model in any order.} } \description{ Methods have been written that allow \code{glmmTMB} objects to be used with several downstream packages that enable different forms of inference. For some methods (\code{Anova} and \code{emmeans}, but \emph{not} \code{effects} at present), set the \code{component} argument to "cond" (conditional, the default), "zi" (zero-inflation) or "disp" (dispersion) in order to produce results for the corresponding part of a \code{glmmTMB} model. In particular, \itemize{ \item \code{car::Anova} constructs type-II and type-III Anova tables for the fixed effect parameters of any component \item the \code{emmeans} package computes estimated marginal means (previously known as least-squares means) for the fixed effects of any component \item the \code{effects} package computes graphical tabular effect displays (only for the fixed effects of the conditional component) } } \details{ While the examples below are disabled for earlier versions of R, they may still work; it may be necessary to refer to private versions of methods, e.g. \code{glmmTMB:::Anova.glmmTMB(model, ...)}. } \examples{ warp.lm <- glmmTMB(breaks ~ wool * tension, data = warpbreaks) salamander1 <- up2date(readRDS(system.file("example_files","salamander1.rds",package="glmmTMB"))) if (require(emmeans)) { emmeans(warp.lm, poly ~ tension | wool) emmeans(salamander1, ~ mined, type="response") emmeans(salamander1, ~ mined, component="zi", type="response") } if (getRversion() >= "3.6.0") { if (require(car)) { Anova(warp.lm,type="III") Anova(salamander1) Anova(salamander1, component="zi") } if (require(effects)) { plot(allEffects(warp.lm)) plot(allEffects(salamander1)) } } } glmmTMB/man/numFactor.Rd0000644000176200001440000000267014070567426014571 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils_covstruct.R \name{numFactor} \alias{numFactor} \alias{parseNumLevels} \title{Factor with numeric interpretable levels.} \usage{ numFactor(x, ...) parseNumLevels(levels) } \arguments{ \item{x}{Vector, matrix or data.frame that constitute the coordinates.} \item{...}{Additional vectors, matrices or data.frames that constitute the coordinates.} \item{levels}{Character vector to parse into numeric values.} } \value{ Factor with specialized coding of levels. } \description{ Create a factor with numeric interpretable factor levels. } \details{ Some \code{glmmTMB} covariance structures require extra information, such as temporal or spatial coordinates. \code{numFactor} allows to associate such extra information as part of a factor via the factor levels. The original numeric coordinates are recoverable without loss of precision using the function \code{parseNumLevels}. Factor levels are sorted coordinate wise from left to right: first coordinate is fastest running. } \examples{ ## 1D example numFactor(sample(1:5,20,TRUE)) ## 2D example coords <- cbind( sample(1:5,20,TRUE), sample(1:5,20,TRUE) ) (f <- numFactor(coords)) parseNumLevels(levels(f)) ## Sorted ## Used as part of a model.matrix model.matrix( ~f ) ## parseNumLevels( colnames(model.matrix( ~f )) ) ## Error: 'Failed to parse numeric levels: (Intercept)' parseNumLevels( colnames(model.matrix( ~ f-1 )) ) } glmmTMB/man/formatVC.Rd0000644000176200001440000000226214070567426014351 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/VarCorr.R \name{formatVC} \alias{formatVC} \title{Format the 'VarCorr' Matrix of Random Effects} \usage{ formatVC( varcor, digits = max(3, getOption("digits") - 2), comp = "Std.Dev.", formatter = format, useScale = attr(varcor, "useSc"), ... ) } \arguments{ \item{varcor}{a \code{\link{VarCorr}} (-like) matrix with attributes.} \item{digits}{the number of significant digits.} \item{comp}{character vector of length one or two indicating which columns out of "Variance" and "Std.Dev." should be shown in the formatted output.} \item{formatter}{the \code{\link{function}} to be used for formatting the standard deviations and or variances (but \emph{not} the correlations which (currently) are always formatted as "0.nnn"} \item{useScale}{whether to report a scale parameter (e.g. residual standard deviation)} \item{...}{optional arguments for \code{formatter(*)} in addition to the first (numeric vector) and \code{digits}.} } \value{ a character matrix of formatted VarCorr entries from \code{varc}. } \description{ "format()" the 'VarCorr' matrix of the random effects -- for print()ing and show()ing } glmmTMB/man/formfuns.Rd0000644000176200001440000000715614120405266014463 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reformulas.R \name{expandDoubleVert} \alias{expandDoubleVert} \alias{RHSForm<-} \alias{sumTerms} \alias{reOnly} \alias{makeOp} \alias{addForm0} \alias{addForm} \alias{expandAllGrpVar} \alias{findbars_x} \alias{inForm} \alias{extractForm} \alias{dropHead} \alias{drop.special} \alias{replaceForm} \title{expand double-bar RE notation by splitting} \usage{ expandDoubleVert(term) RHSForm(formula) <- value sumTerms(termList) reOnly(f, response = FALSE, bracket = TRUE) makeOp(x, y, op = NULL) addForm0(f1, f2) addForm(...) expandAllGrpVar(bb) findbars_x( term, debug = FALSE, specials = character(0), default.special = "us", expand_doublevert_method = c("diag_special", "split") ) inForm(form, value) extractForm(term, value) dropHead(term, value) drop.special(x, value = quote(offset), preserve = NULL) replaceForm(term, target, repl) } \arguments{ \item{term}{expression/formula} \item{formula}{a formula object} \item{value}{term to remove from formula} \item{termList}{a list of formula terms} \item{f}{a formula} \item{response}{include response variable?} \item{bracket}{bracket-protect terms?} \item{x}{formula} \item{y}{a formula term (or an operator)} \item{op}{an operator} \item{f1}{formula #1} \item{f2}{formula #2} \item{...}{arguments to pass through to \code{addForm0}} \item{bb}{a list of naked grouping variables, i.e. 1 | f} \item{debug}{(logical) debug?} \item{specials}{list of special terms} \item{default.special}{character: special to use for parenthesized terms - i.e. random effects terms with unspecified structure} \item{expand_doublevert_method}{method for handling \code{||} operator: split into separate terms or replace by \code{diag}? Inherited from \emph{previous call where it was specified}. 1. atom (not a call or an expression): NULL 2. special, i.e. foo(...) where "foo" is in specials: return term 3. parenthesized term: \emph{if} the head of the head is | (i.e. it is of the form (xx|gg), then convert it to the default special type; we won't allow pathological cases like ((xx|gg)) ... [can we detect them?]} \item{preserve}{(integer) retain the specified occurrence of "value"} \item{a}{formula term} } \value{ a list of expressions } \description{ Modeled after lme4:::expandSlash, by Doug Bates } \examples{ addForm0(y~x,~1) addForm0(~x,~y) ff <- lme4::findbars(y~1+(x|f/g)) expandAllGrpVar(ff) expandAllGrpVar(quote(1|(f/g)/h)) expandAllGrpVar(quote(1|f/g/h)) expandAllGrpVar(quote(1|f*g)) splitForm(quote(us(x,n=2))) findbars_x(~ 1 + (x + y || g), expand_doublevert_method = "diag_special") findbars_x(~ 1 + (x + y || g), expand_doublevert_method = "split") findbars_x(~ 1 + (1 | f) + (1 | g)) findbars_x(~ 1 + (1 | f) + (1 | g)) findbars_x(~ 1 + (1|h) + (x + y || g), expand_doublevert_method = "split") findbars_x(~ 1 + (1|Subject)) findbars_x(~ (1||Subject)) findbars_x(~ (1|Subject)) findbars_x(~ (1|Subject), default.special = NULL) findbars_x(~ 1 + x) inForm(z~.,quote(.)) inForm(z~y,quote(.)) inForm(z~a+b+c,quote(c)) inForm(z~a+b+(d+e),quote(c)) f <- ~ a + offset(x) f2 <- z ~ a inForm(f,quote(offset)) inForm(f2,quote(offset)) extractForm(~a+offset(b),quote(offset)) extractForm(~c,quote(offset)) extractForm(~a+offset(b)+offset(c),quote(offset)) extractForm(~offset(x),quote(offset)) dropHead(~a+offset(b),quote(offset)) dropHead(~a+poly(x+z,3)+offset(b),quote(offset)) drop.special(x~a + b+ offset(z)) replaceForm(quote(a(b+x*c(y,z))),quote(y),quote(R)) ss <- ~(1 | cask:batch) + (1 | batch) replaceForm(ss,quote(cask:batch),quote(batch:cask)) replaceForm(ss, quote(`:`), quote(`\%:\%`)) } \keyword{internal} glmmTMB/man/profile.glmmTMB.Rd0000644000176200001440000000642414120405266015557 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/profile.R \name{profile.glmmTMB} \alias{profile.glmmTMB} \alias{confint.profile.glmmTMB} \title{Compute likelihood profiles for a fitted model} \usage{ \method{profile}{glmmTMB}( fitted, parm = NULL, level_max = 0.99, npts = 8, stepfac = 1/4, stderr = NULL, trace = FALSE, parallel = c("no", "multicore", "snow"), ncpus = getOption("profile.ncpus", 1L), cl = NULL, ... ) \method{confint}{profile.glmmTMB}(object, parm = NULL, level = 0.95, ...) } \arguments{ \item{fitted}{a fitted \code{glmmTMB} object} \item{parm}{which parameters to profile, specified \itemize{ \item by index (position) \item by name (matching the row/column names of \code{vcov(object,full=TRUE)}) \item as \code{"theta_"} (random-effects variance-covariance parameters) or \code{"beta_"} (conditional and zero-inflation parameters) }} \item{level_max}{maximum confidence interval target for profile} \item{npts}{target number of points in (each half of) the profile (\emph{approximate})} \item{stepfac}{initial step factor (fraction of estimated standard deviation)} \item{stderr}{standard errors to use as a scaling factor when picking step sizes to compute the profile; by default (if \code{stderr} is \code{NULL}, or \code{NA} for a particular element), uses the estimated (Wald) standard errors of the parameters} \item{trace}{print tracing information? If \code{trace=FALSE} or 0, no tracing; if \code{trace=1}, print names of parameters currently being profiled; if \code{trace>1}, turn on tracing for the underlying \code{\link{tmbprofile}} function} \item{parallel}{method (if any) for parallel computation} \item{ncpus}{number of CPUs/cores to use for parallel computation} \item{cl}{cluster to use for parallel computation} \item{...}{additional arguments passed to \code{\link{tmbprofile}}} \item{object}{a fitted profile (\code{profile.glmmTMB}) object} \item{level}{confidence level} } \value{ An object of class \code{profile.glmmTMB}, which is also a data frame, with columns \code{.par} (parameter being profiled), \code{.focal} (value of focal parameter), value (negative log-likelihood). } \description{ Compute likelihood profiles for a fitted model } \details{ Fits natural splines separately to the points from each half of the profile for each specified parameter (i.e., values above and below the MLE), then finds the inverse functions to estimate the endpoints of the confidence interval } \examples{ \dontrun{ m1 <- glmmTMB(count~ mined + (1|site), zi=~mined, family=poisson, data=Salamanders) salamander_prof1 <- profile(m1, parallel="multicore", ncpus=2, trace=1) ## testing salamander_prof1 <- profile(m1, trace=1,parm=1) salamander_prof1M <- profile(m1, trace=1,parm=1, npts = 4) salamander_prof2 <- profile(m1, parm="theta_") } salamander_prof1 <- readRDS(system.file("example_files","salamander_prof1.rds",package="glmmTMB")) if (require("ggplot2")) { ggplot(salamander_prof1,aes(.focal,sqrt(value))) + geom_point() + geom_line()+ facet_wrap(~.par,scale="free_x")+ geom_hline(yintercept=1.96,linetype=2) } salamander_prof1 <- readRDS(system.file("example_files","salamander_prof1.rds",package="glmmTMB")) confint(salamander_prof1) confint(salamander_prof1,level=0.99) } glmmTMB/man/omp_check.Rd0000644000176200001440000000111414070567426014553 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{omp_check} \alias{omp_check} \title{Check OpenMP status} \usage{ omp_check() } \value{ \code{TRUE} or {FALSE} depending on availability of OpenMP } \description{ Checks whether OpenMP has been successfully enabled for this installation of the package. (Use the \code{parallel} argument to \code{\link{glmmTMBControl}}, or set \code{options(glmmTMB.cores=[value])}, to specify that computations should be done in parallel.) } \seealso{ \code{\link[TMB]{benchmark}}, \code{\link{glmmTMBControl}} } glmmTMB/man/dtruncated_nbinom2.Rd0000644000176200001440000000135614120405266016401 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{dtruncated_nbinom2} \alias{dtruncated_nbinom2} \alias{dtruncated_poisson} \alias{dtruncated_nbinom1} \title{truncated distributions} \usage{ dtruncated_nbinom2(x, size, mu, k = 0, log = FALSE) dtruncated_poisson(x, lambda, k = 0, log = FALSE) dtruncated_nbinom1(x, phi, mu, k = 0, log = FALSE) } \arguments{ \item{x}{value} \item{size}{number of trials/overdispersion parameter} \item{mu}{mean parameter} \item{k}{truncation parameter} \item{log}{(logical) return log-probability?} \item{lambda}{mean parameter} \item{phi}{overdispersion parameter} } \description{ Probability functions for k-truncated Poisson and negative binomial distributions. } glmmTMB/DESCRIPTION0000744000176200001440000000650414122116502013257 0ustar liggesusersPackage: glmmTMB Title: Generalized Linear Mixed Models using Template Model Builder Version: 1.1.2.3 Authors@R: c(person("Arni","Magnusson",role="aut", comment=c(ORCID="0000-0003-2769-6741")), person("Hans","Skaug",role="aut"), person("Anders","Nielsen",role="aut", comment=c(ORCID="0000-0001-9683-9262")), person("Casper","Berg",role="aut", comment=c(ORCID="0000-0002-3812-5269")), person("Kasper","Kristensen",role="aut"), person("Martin","Maechler",role="aut", comment=c(ORCID="0000-0002-8685-9910")), person("Koen","van Bentham",role="aut"), person("Ben","Bolker",role=c("aut"), comment=c(ORCID="0000-0002-2127-0443"), email = "bolker@mcmaster.ca"), person("Nafis","Sadat",role="ctb", comment=c(ORCID="0000-0001-5715-616X")), person("Daniel","Lüdecke", role="ctb", comment=c(ORCID="0000-0002-8895-3206")), person("Russ","Lenth", role="ctb"), person("Joseph", "O'Brien", role = "ctb", comment = c(ORCID = "0000-0001-9851-5077")), person("Charles J.","Geyer", role="ctb"), person("Maeve","McGillycuddy", role="ctb"), person("Mollie","Brooks",role=c("aut","cre"), comment=c(ORCID="0000-0001-6963-8326"), email = "mollieebrooks@gmail.com")) Description: Fit linear and generalized linear mixed models with various extensions, including zero-inflation. The models are fitted using maximum likelihood estimation via 'TMB' (Template Model Builder). Random effects are assumed to be Gaussian on the scale of the linear predictor and are integrated out using the Laplace approximation. Gradients are calculated using automatic differentiation. License: AGPL-3 Depends: R (>= 3.2.0) Imports: methods, TMB (>= 1.7.14), lme4 (>= 1.1-18.9000), Matrix, nlme, numDeriv LinkingTo: TMB, RcppEigen Suggests: knitr, rmarkdown, testthat, MASS, lattice, ggplot2 (>= 2.2.1), mlmRev, bbmle (>= 1.0.19), pscl, coda, reshape2, car (>= 3.0.6), emmeans (>= 1.4), estimability, DHARMa, multcomp, MuMIn, effects (>= 4.0-1), dotwhisker, broom, broom.mixed, plyr, png, boot, texreg, xtable, huxtable, mvabund SystemRequirements: GNU make VignetteBuilder: knitr URL: https://github.com/glmmTMB/glmmTMB LazyData: TRUE BugReports: https://github.com/glmmTMB/glmmTMB/issues RoxygenNote: 7.1.1 NeedsCompilation: yes Encoding: UTF-8 Packaged: 2021-09-20 11:06:12 UTC; molbr Author: Arni Magnusson [aut] (), Hans Skaug [aut], Anders Nielsen [aut] (), Casper Berg [aut] (), Kasper Kristensen [aut], Martin Maechler [aut] (), Koen van Bentham [aut], Ben Bolker [aut] (), Nafis Sadat [ctb] (), Daniel Lüdecke [ctb] (), Russ Lenth [ctb], Joseph O'Brien [ctb] (), Charles J. Geyer [ctb], Maeve McGillycuddy [ctb], Mollie Brooks [aut, cre] () Maintainer: Mollie Brooks Repository: CRAN Date/Publication: 2021-09-20 14:40:02 UTC glmmTMB/build/0000755000176200001440000000000014122065444012652 5ustar liggesusersglmmTMB/build/vignette.rds0000644000176200001440000000073514122065444015216 0ustar liggesusers=O0/"d'P*U2Zvf{cy^٫\џz^K]gR)_Mr6#=. R?ε$ )7sEqu"dx㹑N0}3HS36UϠ 0FS%5&#UflkFk>(J(WwղC[y: 6OmBTnTFY7x O}΅7j >f]=ʟzZ._E"DDVlPA?m}IAglmmTMB/build/partial.rdb0000644000176200001440000001543414122065400014776 0ustar liggesusers]wFv/J$Kwb{,ŶH(Kl˒W)ɺ]% @Ɋs?>!KOnn{uӹ9]L~Ws;w`f>!!tw9a-t{)Y_)Րo69 K%Z0e?_GDz~HP;WM*'箸%%euU-̽X c'r`L\k^_w?SǏ~R0YY$AY;{O?LgUY).qP(fJtYTӠTzzbz?xדMe.wޜ]2˞4TGMn 5GϓOqALlx5mj$OCjt %=x<\zڗ3eSRR%H&+ ? SM1 GL!\Q+<ٯx8H*GJW3GlCHs96"*hۅx҃0Ez*$I@+JJ@"zDfP)/,[)P"Rʨj><>!},8.qǯH'nt\(*Ġ '|wWAL Je? Ry&Fh:1bPPUh.]ˡ\Q2 \1eM36db*SDzBiNf|ͮ?` In )pv>j^"3Ƒ_/P8 !l'b )>BH$ /èvHK(kLi1"chSl?|YId_%],KdO6w06$yC3&dU IDSIͩ)=Įb^L3#E!t:4 GlxX70ztP!sX R%#8)DTPpnAHdB%,KjVLX#N1A w+ufhggG"}5X)!UtyW*(&ģ0eSH6z0vLlѮ@48f<4״W0O 5rf=ؓH4S b:>mA[ǢL&Pqc f)Ak!$ZTtmW6hH^T :Zd~TE`Q-ćH?#,ȴS'?G!G'["i=?>CY Gl;hM!<ˁ [+cB+ !4zHjYٕPYҩ7#tHoFP<Ŧs4f4%aA_[j58d[3G&L!=ִ&KoMG.#k/3Z1lAbiMtM+nɟaq8r `H2rA yRi H_|9M"o>P40-^豤ȅAql41tDZFӝ}P^a$B QZi,*ҫoUDZ%^Gz l_>rmس|)e4uc&1E$\IˢQIzy$jX^pXv`SHLժtl1KRDT~ܒtCkza~LPFK vA6)kDdՔTCR)eR%CR xe|a^9/(V eJT-l*(Ci$,{zF T,*Nw]Yx(WreǧnēHs nN1ȿ 奛E~!N"=6R))Bjcd^d^`=yja{ܖ-uU4VaҪ-W)J=/jTfr5>tf>,?Kj~ :|&5p/hk3`m=ȧ݈>= bArѺؙ]Pz)["m L.Twh1e|;({fXIRnd+5*"Uā Aq+jy]_ zǜnȘ.5UFъ$c?sA շԒ9C?;c=v:&HkaMEuXjw>XrIR w l_']+h*`3pDL`gAEl."~'حp"v\{{{y{TA+oi Bio@~)0\P$&\{2|zƪڍxۡ5x" ZUW d9,Y*. 2, ni{kTVfAS}k#,rTaq(M}&unH_6=3UѩsXNn C^d<^uLB<ڱ᜗۞g 15{ Y-*kEj}Ԡ›~"4cU3 +KH__oP"86.;@9 b!;fIE,%J&0I^ėMf/“&tUY'4 rl: ٽ^Ei]O,Z 0_]+H+>V\޵_{{e։.Gz<DJz+1'HxQ!7s:1>`otSHAݜ"e^iMyW"yYʲ2ov6Y)$y*7(O-(HH ]uxMN=#:75%<2zl6Mg~ sT2$sFӵgf|۠_U2>&nXndDG4zZ%eaYL"3F+}\Fz9|&UPifb{;摢f"s:I,b 5KtXf%d0,UM.;<"brK\Q<ڗ2hIpTQ=`׀cHEh/=܅]X-WXCL|^h*w-X$̬+|Vm d@ݱ 7N/׽9úy|סƪ2v :m#KON9pLgo-S+ӡ^l-:w8< |/6Z;w׭!O$oWuv-aZ&);BjM 'o2u~8wkY۪rF5oKo-Tړ+f&$s$5VߗD.CV1d 9U+h岨vbYMx8_*QZ p|?W2[_nWTGrMIjQ.1z,8yox>ˣA#pn' .{jɿXcҹH!nӍ!:78w@U:Rt\ Rp(,"]SMq'pS,Ɋ. ;kDs-ژ~gushKTDP.p,+Ƅ[|oZlr Q V:Ts[7h/bZNv"e]j3Y"u UUӀz&a׉ չR{y=KmJ ҙ/0)"^ݨn$UnsD ALs.C*U=GW ? &NSzH%!k{ onzِM~oJhJ2xF|"bap h9__$\xoB/C$I%U*aFM*+P:B*+QIV ړΘŢsgt诖fRbd'36#koKt.wtn1E(ǬBL/69@DdJwULŭk~Aבwz/ R٨9udyq ׸fC缁F߿{H/Ta l 3)d'sj[rcΖ@%v ካ=9Z1*tbT#ѝCI?5Q5DC }NFEnp{B޼ ;heYakICh\#8O`k8`˻[0u䂽% 3J7 me+40 }VpQQD:Vw:FPQylr=DRQz; DD˞[(3tp23NQ7Ljl8" qnްtx 8v<C3"Yi jpG9K"F}%Q_x0"l?խ_V~_(~;mGf cIwAuiWCb$ffIE3 9LcjD #nv޺ouG0X&3qhvl6%2ԯd핛sG?@xi"K(s9e@=)ݳԖ77at GJPbH-nnz;jqFXpf0D/{AU-[g-8_>}J=I!i8+€&`ECT}RdPVD(Z C{e!#wɁ+J\7EˢY"7EƜw>E+6[ALzͰ|nu5VVi%3p?\TH;K }QO||&q 96f_xDBzn0;@fbHF34C6mZy-zE5_G7ym۲I뚕*!#~7&Y-(_Ek1Rw#=7MLЎgi~9.SK+e:j=CuxםM9gBODbS&qԉ)Ș 4V$~T-i][e`%?vbjb:`ch 5_'B x\|[?"ۖOMv(6}Q `) }G`6Nc Fa'?qF7;PS,z>dޒx%# 8a*\/%K_ yXp'l.\e%gJɊ&rX$ "skNB7pzG-L<~/9gO~ Ûd77^]DŽCG/ 9Pj''Lsc ^|6 A2Ny;yƷQ6[CjG֎uq?PdglmmTMB/tests/0000755000176200001440000000000014122065444012715 5ustar liggesusersglmmTMB/tests/testthat/0000755000176200001440000000000014121123551014546 5ustar liggesusersglmmTMB/tests/testthat/test-predict.R0000644000176200001440000002700414120405266017310 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) sleepstudy <- transform(sleepstudy, DaysFac = factor(cut(Days,2)) ) ssNA <- transform(sleepstudy, Days = replace(Days,c(1,27,93,145), NA)) ssNA2 <- transform(sleepstudy, Days = replace(Days,c(2,49), NA)) data(cbpp, package = "lme4") set.seed(101) cbpp_zi <- cbpp cbpp_zi[sample(nrow(cbpp),size=15,replace=FALSE),"incidence"] <- 0 ## 'newdata' nd <- subset(sleepstudy, Subject=="308", select=-1) nd$Subject <- "new" nd$DaysFac <- "new" test_that("manual prediction of pop level pred", { prnd <- predict(fm2, newdata=nd, allow.new.levels=TRUE) expect_equal( as.numeric(prnd), fixef(fm2)$cond[1] + fixef(fm2)$cond[2] * nd$Days , tol=1e-10) }) test_that("population-level prediction", { prnd <- predict(fm2) expect_equal(length(unique(prnd)),180) prnd2 <- predict(fm2, re.form=~0) prnd3 <- predict(fm2, re.form=NA) expect_equal(prnd2,prnd3) expect_equal(length(unique(prnd2)),10) ## make sure we haven't messed up any internal structures ... prnd4 <- predict(fm2) expect_equal(prnd, prnd4) }) test_that("new levels of fixed effect factor", { skip_on_cran() g1 <- glmmTMB(Reaction ~ Days + Subject, sleepstudy) expect_error( predict(g1, nd), "Prediction is not possible for unknown fixed effects") }) test_that("new levels in RE term", { skip_on_cran() g2 <- glmmTMB(Reaction ~ us(DaysFac | Subject), sleepstudy) expect_error( predict(g2, nd), "Prediction is not possible for terms") }) test_that("new levels in AR1 (OK)", { skip_on_cran() g3 <- glmmTMB(Reaction ~ ar1(DaysFac + 0| Subject), sleepstudy) expect_warning( predict(g3, nd), ## OK: AR1 does not introduce new parameters "Predicting new random effect levels") }) context("Predict two-column response case") test_that("two-column response", { skip_on_cran() fm <- glmmTMB( cbind(count,4) ~ mined, family=betabinomial, data=Salamanders) expect_equal(predict(fm, type="response"), c(0.05469247, 0.29269818)[Salamanders$mined] ) }) test_that("Prediction with dispformula=~0", { skip_on_cran() y <- 1:10 f <- glmmTMB(y ~ 1, dispformula=~0, data = NULL) expect_equal(predict(f), rep(5.5, 10)) }) ss <- sleepstudy fm2_ex <- update(fm2, data=ssNA, na.action=na.exclude) fm2_om <- update(fm2, data=ssNA, na.action=na.omit) pp_ex <- predict(fm2_ex) pp_om <- predict(fm2_om) test_that("NA values in predictions", { expect_equal(length(pp_ex),nrow(ssNA)) expect_true(all(is.na(pp_ex)==is.na(ssNA$Days))) expect_equal(length(pp_om),length(na.omit(ssNA$Days))) expect_true(!any(is.na(pp_om))) }) ## na.pass test_that("na.pass", { pp_ndNA <- predict(fm2,newdata=ssNA) expect(all(is.na(ssNA$Days)==is.na(pp_ndNA)), failure_message="NAs don't match with na.pass+predict") pp_ndNA2 <- predict(fm2,newdata=ssNA2) expect(all(is.na(ssNA2$Days)==is.na(pp_ndNA2)), failure_message="NAs don't match with na.pass+predict+newdata") }) ## na.omit test_that("na.omit", { pp_ndNA_om <- predict(fm2,newdata=ssNA,na.action=na.omit) expect_equal(length(pp_ndNA_om),sum(complete.cases(ssNA))) }) tmbm1 <- glmmTMB(cbind(incidence, size - incidence) ~ period + (1 | herd), data = cbpp, family = binomial) tmbm2 <- update(tmbm1,incidence/size ~ . , weights = size) test_that("different binomial specs: fitted & predicted agree", { expect_equal(fitted(tmbm1),fitted(tmbm2)) expect_equal(predict(tmbm1),predict(tmbm2)) }) ## context("zero-inflation prediction") g0_zi <- update(tmbm2, ziformula = ~period) un <- function(x) lapply(x,unname) mypred <- function(form,dd,cc,vv,linkinv=identity,mu.eta=NULL) { X <- model.matrix(form,dd) pred <- drop(X %*% cc) se <- drop(sqrt(diag(X %*% vv %*% t(X)))) if (!is.null(mu.eta)) se <- se*mu.eta(pred) pred <- linkinv(pred) return(un(list(fit=pred,se.fit=se))) } ## FIXME: predictions should have row names of data dd <- data.frame(unique(cbpp["period"]),size=1,herd=NA) ff <- make.link("logit") test_that("type='link'", { link_pred <- mypred(~period,dd,fixef(g0_zi)$cond,vcov(g0_zi)$cond) expect_equal(un(predict(g0_zi,newdata=dd,se.fit=TRUE)), link_pred) }) test_that("various types", { cond_pred <- mypred(~period,dd,fixef(g0_zi)$cond,vcov(g0_zi)$cond, ff$linkinv,ff$mu.eta) expect_equal(un(predict(g0_zi,newdata=dd,se.fit=TRUE,type="conditional")), cond_pred) zprob_pred <- mypred(~period,dd,fixef(g0_zi)$zi,vcov(g0_zi)$zi, ff$linkinv,ff$mu.eta) expect_equal(un(predict(g0_zi,newdata=dd,se.fit=TRUE,type="zprob")), zprob_pred) expect_equal(unname(predict(g0_zi,newdata=dd,se.fit=TRUE,type="response")$fit), cond_pred$fit*(1-zprob_pred$fit)) }) test_that("type='zlink'", { zlink_pred <- mypred(~period,dd,fixef(g0_zi)$zi,vcov(g0_zi)$zi) expect_equal(un(predict(g0_zi,newdata=dd,se.fit=TRUE,type="zlink")), zlink_pred) }) test_that("deprecated zitype parameter", { expect_warning(predict(g0_zi,newdata=dd,zitype="zprob")) }) ## context("complex bases") data("sleepstudy",package="lme4") nd <- data.frame(Days=0, Subject=factor("309", levels=levels(sleepstudy$Subject))) test_that("poly", { g1 <- glmmTMB(Reaction~poly(Days,3), sleepstudy) expect_equal(predict(g1, newdata=data.frame(Days=0)), 255.7690496, tolerance=1e-5) }) test_that("splines", { if (getRversion()>="3.5.1") { ## work around predict/predvars bug in 3.5.0 & previous versions g2 <- glmmTMB(Reaction~splines::ns(Days,5), sleepstudy) } else { library(splines) g2 <- glmmTMB(Reaction~ns(Days,5), sleepstudy) } expect_equal(predict(g2, newdata=data.frame(Days=0)),257.42672, tolerance=1e-5) }) test_that("scale", { skip_on_cran() g3 <- glmmTMB(Reaction~scale(Days), sleepstudy) expect_equal(predict(g3, newdata=data.frame(Days=0)), 251.40507651, tolerance=1e-5) }) test_that("poly_RE", { g1 <- glmmTMB(Reaction~(1|Subject) + poly(Days,3), sleepstudy) expect_equal(predict(g1, newdata=nd, allow.new.levels=TRUE), 178.1629812, tolerance=1e-5) }) test_that("splines_RE", { if (getRversion()>="3.5.1") { g2 <- glmmTMB(Reaction~(1|Subject) + splines::ns(Days,5), sleepstudy) } else { library(splines) g2 <- glmmTMB(Reaction~(1|Subject) + ns(Days,5), sleepstudy) } expect_equal(predict(g2, newdata=nd, allow.new.levels=TRUE), 179.7784754, tolerance=1e-5) }) test_that("scale_RE", { skip_on_cran() g3 <- glmmTMB(Reaction~(1|Subject) + scale(Days), sleepstudy) expect_equal(predict(g3, newdata=nd, allow.new.levels=TRUE), 173.83923026, tolerance=1e-5) }) test_that("complex bases in dispformula", { skip_on_cran() g4A <- glmmTMB(Reaction~1, sleepstudy) g4B <- glmmTMB(Reaction~1, disp=~poly(Days,2), sleepstudy) expect_equal(predict(g4A, newdata=nd, se.fit=TRUE), list(fit = 298.507945749154, se.fit = 4.18682101029576), tolerance=1e-5) expect_equal(predict(g4B, newdata=nd, se.fit=TRUE), list(fit = 283.656705454758, se.fit = 4.74204256781178)) }) test_that("fix_predvars works for I(x^2)", { skip_on_cran() ## GH512; @strengejacke set.seed(123) n <- 500 d <- data.frame( y = rbinom(n, size = 1, prob = .2), x = rnorm(n), site = sample(letters, size = n, replace = TRUE), area = sample(LETTERS[1:9], size = n, replace = TRUE) ) form <- y ~ x + I(x^2) + I(x^3) + (1 | area) m1 <- lme4::glmer(form, family = binomial("logit"), data = d) m2 <- glmmTMB(form, family = binomial("logit"), data = d) nd <- data.frame(x = c(-2, -1, 0, 1, 2), area = NA) p1 <- predict(m1, newdata = nd, type = "link", re.form = NA) p2 <- predict(m2, newdata = nd, type = "link") expect_equal(unname(p1),unname(p2), tolerance=1e-4) }) test_that("contrasts carried over", { skip_on_cran() ## GH 439, @cvoeten iris2 <- transform(iris, grp=factor(c("a","b"))) contrasts(iris2$Species) <- contr.sum contrasts(iris2$grp) <- contr.sum mod1 <- glmmTMB(Sepal.Length ~ Species,iris) mod2 <- glmmTMB(Sepal.Length ~ Species,iris2) iris3 <- iris[1,] iris3$Species <- "extra" ## these are not *exactly* equal because of numeric differences ## when estimating parameters differently ... (?) expect_equal(predict(mod1),predict(mod2),tolerance=1e-6) ## make sure we actually imposed contrasts correctly/differently expect_false(isTRUE(all.equal(fixef(mod1)$cond,fixef(mod2)$cond))) expect_error(predict(mod1,newdata=iris2), "contrasts mismatch") expect_equal(predict(mod1,newdata=iris2,allow.new.levels=TRUE), predict(mod1,newdata=iris)) mod3 <- glmmTMB(Sepal.Length ~ 1|Species, iris) expect_equal(c(predict(mod3,newdata=data.frame(Species="ABC"), allow.new.levels=TRUE)), 5.843333, tolerance=1e-6) mod4 <- glmmTMB(Sepal.Length ~ grp + (1|Species), iris2) expect_equal(c(predict(mod4, newdata=data.frame(Species="ABC",grp="a"), allow.new.levels=TRUE)), 5.839998, tolerance=1e-6) ## works with char rather than factor in new group vble expect_equal(predict(mod3, newdata=iris3, allow.new.levels=TRUE), 5.843333, tolerance=1e-6) }) test_that("dispersion", { mod5 <- glmmTMB(Sepal.Length ~ Species, disp=~ Species, iris) expect_equal(length(unique(predict(mod5, type="disp"))), length(unique(iris$Species))) expect_equal(length(unique(predict(mod5, type="disp", se.fit=TRUE)$se.fit)), length(unique(iris$Species))) }) test_that("offset-only model (GH #625)", { skip_on_cran() owls_nb0 <- glmmTMB(SiblingNegotiation ~ offset(log(BroodSize)), family = nbinom2(), data=Owls) expect_equal(mean(predict(owls_nb0)), 1.88220473712677) }) test_that("fast prediction", { ## use tighter-than-default tolerances ## expect_equal(predict(fm2,fast=FALSE),predict(fm2,fast=TRUE), tolerance=1e-13) expect_equal(predict(fm2, type="response",fast=FALSE), predict(fm2, type="response", fast=TRUE), tolerance=1e-13) ## handling NAs etc. expect_equal(pp_ex, predict(fm2_ex, fast=FALSE)) }) test_that("inverse-link prediction", { skip_on_cran() ## example from John Maindonald (GH #696) ## this highlights a particular case where the prediction on the (cloglog) link scale ## is large (3.98), which leads to a prediction of 1.0 unless the cloglog-inverse-link ## function is clamped (as in make.link("cloglog")'s version) ffly <- read.csv(system.file("test_data", "ffly.csv", package="glmmTMB")) ffly$obs <- factor(ffly$obs) form1 <- cbind(Dead,Live)~0+trtGp/TrtTime+(1|obs)+(1|trtGpRep) ObsTMB.cll <- glmmTMB(form1, family=binomial(link="cloglog"), data=ffly) p0 <- predict(ObsTMB.cll, re.form=NA)[63] p0R <- make.link("cloglog")$linkinv(p0) p1 <- predict(ObsTMB.cll, re.form=NA, type="response")[63] expect_equal(p0R, p1) }) test_that("fast prediction not allowed with NA (correct errors)", { expect_error(predict(fm2, re.form=NA, fast=TRUE), "fast=TRUE is not compatible") expect_equal(predict(fm2, re.form=NA, fast=FALSE), predict(fm2, re.form=NA, fast=NULL)) }) glmmTMB/tests/testthat/test-reml.R0000644000176200001440000000276514070567426016637 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB"), require("lme4")) context("REML") test_that("REML check against lmer", { ## Example 1: Compare results with lmer fm1.lmer <- lmer(Reaction ~ Days + (Days | Subject), sleepstudy, REML=TRUE) fm1.glmmTMB <- glmmTMB(Reaction ~ Days + (Days | Subject), sleepstudy, REML=TRUE) expect_equal( logLik(fm1.lmer) , logLik(fm1.glmmTMB) ) expect_equal(as.vector(predict(fm1.lmer)) , predict(fm1.glmmTMB), tolerance=2e-3) expect_equal(vcov(fm1.glmmTMB)$cond, as.matrix(vcov(fm1.lmer)) , tolerance=1e-3) ## Example 2: Compare results with lmer data(Orthodont,package="nlme") Orthodont$nsex <- as.numeric(Orthodont$Sex=="Male") Orthodont$nsexage <- with(Orthodont, nsex*age) fm2.lmer <- lmer(distance ~ age + (age|Subject) + (0+nsex|Subject) + (0 + nsexage|Subject), data=Orthodont, REML=TRUE, control=lmerControl(check.conv.grad = .makeCC("warning", tol = 5e-3))) fm2.glmmTMB <- glmmTMB(distance ~ age + (age|Subject) + (0+nsex|Subject) + (0 + nsexage|Subject), data=Orthodont, REML=TRUE) expect_equal( logLik(fm2.lmer) , logLik(fm2.glmmTMB), tolerance=1e-5 ) expect_equal(as.vector(predict(fm2.lmer)) , predict(fm2.glmmTMB), tolerance=1e-4) expect_equal(vcov(fm2.glmmTMB)$cond, as.matrix(vcov(fm2.lmer)) , tolerance=1e-2) }) glmmTMB/tests/testthat/test-varstruc.R0000644000176200001440000001041014120405266017520 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB"), require("lme4")) source(system.file("test_data/glmmTMB-test-funs.R", package="glmmTMB", mustWork=TRUE)) data(sleepstudy, cbpp, package = "lme4") ## fsleepstudy and fitted models come from inst/test_data/models.rda ## OR running inst/test_data/make_ex.R context("variance structures") test_that("diag", { ## two formulations of diag and lme4 all give same log-lik expect_equal(logLik(fm_diag1),logLik(fm_diag2_lmer)) expect_equal(logLik(fm_diag1),logLik(fm_diag2)) }) test_that("cs_us", { ## for a two-level factor, compound symmetry and unstructured ## give same result expect_equal(logLik(fm_us1),logLik(fm_cs1)) expect_equal(logLik(fm_us1),logLik(fm_us1_lmer)) }) test_that("cs_homog", { ## *homogenous* compound symmetry vs. nested random effects expect_equal(logLik(fm_nest),logLik(fm_nest_lmer)) }) test_that("basic ar1", { vv <- VarCorr(fm_ar1)[["cond"]] cc <- cov2cor(vv[[2]]) expect_equal(cc[1,],cc[,1]) expect_equal(unname(cc[1,]), cc[1,2]^(0:(nrow(cc)-1))) }) test_that("print ar1 (>1 RE)", { cco <- gsub(" +"," ", trimws(capture.output(print(summary(fm_ar1),digits=1)))) expect_equal(cco[12:14], c("Subject (Intercept) 4e-01 0.6", "Subject.1 row1 4e+03 60.8 0.87 (ar1)", "Residual 8e+01 8.9")) }) test_that("ar1 requires factor time", { skip_on_cran() expect_error(glmmTMB(Reaction ~ 1 + (1|Subject) + ar1(as.numeric(row)+0| Subject), fsleepstudy), "expects a single") ## works even when the factor is a weird/hard-to-recognize component expect_is(glmmTMB(Reaction ~ 1 + (1|Subject) + ar1(relevel(factor(row),"2")+0| Subject), fsleepstudy), "glmmTMB") }) ## FIXME: simpler to check formatVC() directly? get_vcout <- function(x,g="\\bSubject\\b") { cc <- capture.output(print(VarCorr(x))) cc1 <- grep(g,cc,value=TRUE,perl=TRUE) ss <- strsplit(cc1,"[^[:alnum:][:punct:]]+")[[1]] return(ss[nchar(ss)>0]) } test_that("varcorr_print", { skip_on_cran() ss <- get_vcout(fm_cs1) expect_equal(length(ss),5) expect_equal(ss[4:5],c("0.081","(cs)")) ss2 <- get_vcout(fm_ar1,g="\\bSubject.1\\b") expect_equal(length(ss2),5) expect_equal(ss2[4:5],c("0.873","(ar1)")) ## test case with two different size V-C set.seed(101) dd <- data.frame(y=rnorm(1000),c=factor(rep(1:2,500)), w=factor(rep(1:10,each=100)), s=factor(rep(1:10,100))) ## non-pos-def case (we don't care at the moment) m1 <- suppressWarnings(glmmTMB(y~c+(c|w)+(1|s),data=dd, family=gaussian)) cc <- squash_white(capture.output(print(VarCorr(m1),digits=2))) expect_equal(cc, c("Conditional model:", "Groups Name Std.Dev. Corr", "w (Intercept) 3.1e-05", "c2 4.9e-06 0.98", "s (Intercept) 3.4e-05", "Residual 9.6e-01")) }) test_that("cov_struct_order", { skip_on_cran() ff <- system.file("test_data","cov_struct_order.rds",package="glmmTMB") if (nchar(ff)>0) { dat <- readRDS(ff) } else { set.seed(101) nb <- 100 ns <- nb*3 nt <- 100 cor <- .7 dat <- data.frame(Block = factor(rep(1:nb, each = ns/nb*nt)), Stand = factor(rep(1:ns, each = nt)), Time = rep(1:nt, times = ns), blockeff = rep(rnorm(nb, 0, .5), each = ns/nb*nt), standeff = rep(rnorm(ns, 0, .8), each = nt), resid = c(t(MASS::mvrnorm(ns, mu = rep(0, nt), Sigma = 1.2*cor^abs(outer(0:(nt-1),0:(nt-1),"-")))))) dat$y <- with(dat, 5 + blockeff + standeff + resid)+rnorm(nrow(dat), 0, .1) dat$Time <- factor(dat$Time) ## saveRDS(dat, file="../../inst/test_data/cov_struct_order.rds",version=2) } fit1 <- glmmTMB(y ~ (1|Block) + (1|Stand)+ ar1(Time +0|Stand), data = dat) expect_equal(unname(fit1$fit$par), c(4.98852432, -4.22220615, -0.76452645, -0.24762133, 0.08879302, 1.00022657), tol=1e-3) }) glmmTMB/tests/testthat/test-mapopt.R0000644000176200001440000000626714120405266017166 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) data(Salamanders, package = "glmmTMB") if (identical(Sys.getenv("NOT_CRAN"), "true")) { m1 <- glmmTMB(count~ mined, family=poisson, data=Salamanders, start=list(beta=c(0,2)), map=list(beta=factor(c(1,NA)))) m2 <- glmmTMB(count~ mined + (1|site), family=poisson, data=Salamanders, start=list(theta=log(2)), map=list(theta=factor(NA))) m3 <- glmmTMB(count~ mined + (1|site), zi = ~1, family=poisson, data=Salamanders, start=list(theta=log(2), betazi=c(-1)), map=list(theta=factor(NA), betazi=factor(NA))) m4_nomap <- glmmTMB(count~ mined + (1|site), zi=~mined, family=poisson, data=Salamanders) m4 <- glmmTMB(count~ mined + (1|site), zi=~mined, family=poisson, data=Salamanders, map=list(theta=factor(NA)), start = list(theta=log(10))) m1optim <- update(m1, control=glmmTMBControl(optimizer=optim, optArgs=list(method="BFGS"))) test_that("basic mapping works", { expect_equal(fixef(m1)$cond[[2]], 2.0) expect_equal(exp(getME(m2,"theta")), 2.0) expect_equal(fixef(m3)$zi[[1]], -1.0) }) test_that("predict works with mapped params", expect_equal(lapply(predict(m1,se.fit=TRUE),unique), list(fit = c(-1.18646939995962, 0.81353060004038), se.fit = 0.0342594326326739), tolerance=1e-6) ) m1_sd <- c(`(Intercept)` = 0.0342594326326741, minedno = NA_real_) test_that("vcov works with mapped params", { expect_equal(dim(vcov(m1)$cond),c(1,1)) expect_equal(dim(vcov(m1,full=TRUE)),c(1,1)) expect_equal(dim(vcov(m2)$cond),c(2,2)) expect_equal(dim(vcov(m2,full=TRUE)),c(2,2)) v1 <- vcov(m1,include_mapped=TRUE) expect_equal(dim(v1$cond),c(2,2)) expect_equal(sqrt(diag(v1$cond)), m1_sd, tolerance=1e-6) }) test_that("summary works with mapped params", { expect_equal(summary(m1)$coef$cond[,"Std. Error"], m1_sd) }) test_that("confint works with mapped params", { cm1 <- confint(m1) expect_equal(dim(cm1), c(1,3)) expect_equal(rownames(cm1), "(Intercept)") cm2 <- confint(m2) expect_equal(dim(cm2), c(2,3)) expect_equal(rownames(cm2), c("(Intercept)","minedno")) cm3 <- confint(m3) expect_equal(dim(cm3), c(2,3)) expect_equal(rownames(cm3), c("(Intercept)","minedno")) cm4 <- confint(m4) expect_equal(dim(cm4), c(4,3)) expect_equal(rownames(cm4), c("cond.(Intercept)", "cond.minedno", "zi.(Intercept)", "zi.minedno")) cm4_nomap <- confint(m4_nomap) }) context("alternate optimizers") test_that("alternate optimizers work", { expect_equal(fixef(m1),fixef(m1optim), tol=1e-4) expect_false(identical(fixef(m1),fixef(m1optim))) }) test_that("summary", { expect_equal(coef(summary(m1))$cond["minedno",], c(Estimate = 2, `Std. Error` = NA, `z value` = NA, `Pr(>|z|)` = NA)) expect_equal(coef(summary(m3))$zi["(Intercept)",], c(Estimate = -1, `Std. Error` = NA, `z value` = NA, `Pr(>|z|)` = NA)) }) } ## skip on CRAN glmmTMB/tests/testthat/test-env.R0000644000176200001440000000253214070567426016460 0ustar liggesusers## check that everything works in weird environments stopifnot(require("testthat"), require("glmmTMB")) data(sleepstudy, cbpp, package = "lme4") ## need global env for test_that sleepstudy <<- transform(sleepstudy, DaysFac = factor(Days)) context("basic examples") test_that("basic example #1", { fitFun <- function(dat){ glmmTMB(Reaction ~ Days + (1|Subject), data=dat) } f0 <- glmmTMB(Reaction ~ Days + (1|Subject), data=sleepstudy) f1 <- fitFun(sleepstudy) uncall <- function(x) { x$call <- NULL return(x) } expect_equal(uncall(f0),uncall(f1)) }) test_that("paranoia", { formFun <- function() { return(Reaction ~ Days + (1|Subject)) } fitFun <- function(f,dat){ glmmTMB(f, data=dat) } f0 <- glmmTMB(Reaction ~ Days + (1|Subject), data=sleepstudy) f1 <- fitFun(formFun(),sleepstudy) uncall <- function(x) { x$call <- NULL return(x) } expect_equal(uncall(f0),uncall(f1)) }) test_that("dispformula env", { fitFun2 <- function(dat){ glmmTMB(count ~ 1, data=dat, family="poisson" ) } m0 <- fitFun2(Salamanders) m1 <- glmmTMB(count ~ 1, data= Salamanders, family="poisson") uncall <- function(x) { x$call <- NULL return(x) } expect_equal(uncall(summary(m0)), uncall(summary(m1))) }) glmmTMB/tests/testthat/test-rr.R0000644000176200001440000000754314070567426016322 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) ## make sure tests don't run in parallel except where we want them to op <- getOption("glmmTMB.cores", 1) on.exit(options(glmmTMB.cores = op)) options(glmmTMB.cores = 1) if (require(mvabund)) { data(spider) sppTot <- sort(colSums(spider$abund), decreasing = TRUE) tmp <- cbind(spider$abund, spider$x) tmp$id <- 1:nrow(tmp) spiderDat <- reshape(tmp, idvar = "id", timevar = "Species", times = colnames(spider$abund), varying = list(colnames(spider$abund)), v.names = "abund", direction = "long") spiderDat_common <- subset(spiderDat, Species %in% names(sppTot)[1:4]) test_that("rr model fit", { ## Fit poison model with rr spider_p1 <<- glmmTMB(abund ~ Species + rr(Species + 0|id, d = 1), family = poisson, data=spiderDat_common) spider_p2 <<- update(spider_p1, control = glmmTMBControl(start_method = list(method = "res", jitter.sd = 0.2))) expect_equal(as.numeric(logLik(spider_p1)), c(-736.0022), tolerance=1e-4) expect_equal(fixef(spider_p1),fixef(spider_p2), tolerance=1e-4) }) test_that("rr works with symbolic d", { d <- 1 spider_p1d <- glmmTMB(abund ~ Species + rr(Species + 0|id, d = d), family = poisson, data=spiderDat_common) expect_equal(fixef(spider_p1d), fixef(spider_p1)) expect_equal(VarCorr(spider_p1d), VarCorr(spider_p1)) }) test_that("rr error about un-eval'able d", { if (exists("junk")) rm(junk) expect_error(glmmTMB(abund ~ Species + rr(Species + 0|id, d = junk), family = poisson, data=spiderDat_common), "can't evaluate reduced-rank dimension") }) test_that("rr error about non-numeric d", { junk <- "junk" expect_error(glmmTMB(abund ~ Species + rr(Species + 0|id, d = junk), family = poisson, data=spiderDat_common), "non-numeric value for reduced-rank dimension") }) test_that("rr warning about parallel eval", { options(glmmTMB.cores = 2) expect_warning( glmmTMB(abund ~ Species + rr(Species + 0|id, d = 2), family = poisson, data=spiderDat_common) ## have to protect () in the regex ... , "rr\\(\\) not compatible with parallel execution") options(glmmTMB.cores = 1) }) set.seed(101) n <- 1000 ngrp <- 10 dd <- data.frame(x=rnorm(n),y=rnorm(n),z=rnorm(n), g1=factor(sample(ngrp,size=n,replace=TRUE)), g2=factor(sample(ngrp,size=n,replace=TRUE))) beta <- 1; names(beta) <- "(Intercept)" theta <- rep(1,2*10) dd$w <- suppressMessages(simulate(~1 + (x+y+z|g1) + (x+y+z|g2), newdata=dd, family=gaussian, newparams=list(beta = beta, theta=theta,sigma=1))[[1]]) test_that("rr eigenvalues", { m1 <- glmmTMB(w ~ 1 + rr(x+y+z|g1,2), data=dd) eigenvalues <- zapsmall(eigen(VarCorr(m1)$cond$g1)$values) expect_equal(eigenvalues[3:4], c(0, 0)) m2 <- glmmTMB(w ~ 1 + rr(x+y+z|g1,3) + (x+y+z|g2), data=dd) eigenvalues <- zapsmall(eigen(VarCorr(m2)$cond$g1)$values) expect_equal(eigenvalues[4], 0) }) ## FIXME: test, remove if unnecessary options(glmmTMB.control = op) ## just in case on.exit() is inappropriate? } ## if require(mvabund) glmmTMB/tests/testthat/test-VarCorr.R0000644000176200001440000001547214070567426017255 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB"), require("lme4")) source(system.file("test_data/glmmTMB-test-funs.R", package="glmmTMB", mustWork=TRUE)) data("Orthodont", package="nlme") fm1 <- glmmTMB(distance ~ age + (age|Subject), data = Orthodont) fm1C <- lmer(distance ~ age + (age|Subject), data = Orthodont, REML=FALSE, control=lmerControl(check.conv.grad = .makeCC("warning", tol = 2e-2))) gm1 <- glmmTMB(incidence/size ~ period + (1 | herd), weights=size, data = cbpp, family = binomial) gm1C <- glmer(incidence/size ~ period + (1 | herd), weights=size, data = cbpp, family = binomial) ## make glmmTMB VarCorr look like lme4 VarCorr stripTMBVC <- function(x) { r <- VarCorr(x)[["cond"]] for (i in seq_along(r)) { attr(r[[i]],"blockCode") <- NULL } return(r) } test_that("basic glmer vs glmmTMB", { expect_equal(stripTMBVC(fm1),unclass(VarCorr(fm1C)), tol=2e-3) expect_equal(stripTMBVC(gm1),unclass(VarCorr(gm1C)), tol=5e-3) }) ## have to take only last 4 lines ## some white space diffs introduced in fancy-corr-printing pfun <- function(x) squash_white(capture.output(print(VarCorr(x),digits=2))) expect_equal(tail(pfun(fm1),4), pfun(fm1C)) data("Pixel", package="nlme") ## nPix <- nrow(Pixel) complex_form <- pixel ~ day + I(day^2) + (day | Dog) + (1 | Side/Dog) test_that("bad model convergence warning", { expect_warning(fmPix1 <<- glmmTMB(complex_form, data = Pixel), "convergence problem") }) fmPix1B <- lmer(complex_form, data = Pixel, control=lmerControl(check.conv.grad = .makeCC("warning", tol = 5e-3))) vPix1B <- unlist(lapply(VarCorr(fmPix1B),c)) vPix1 <- unlist(lapply(VarCorr(fmPix1)[["cond"]],c)) ## "manual" (1 | Dog / Side) : fmPix3 <- glmmTMB(pixel ~ day + I(day^2) + (day | Dog) + (1 | Dog) + (1 | Side:Dog), data = Pixel) vPix3 <- unlist(lapply(VarCorr(fmPix3)[["cond"]],c)) fmP1.r <- fmPix1$obj$env$report() ## str(fmP1.r) ## List of 4 ## $ corrzi: list() ## $ sdzi : list() ## $ corr :List of 3 ## ..$ : num [1, 1] 1 ## ..$ : num [1, 1] 1 ## ..$ : num [1:2, 1:2] 1 -0.598 -0.598 1 ## $ sd :List of 3 ## ..$ : num 16.8 ## ..$ : num 9.44 ## ..$ : num [1:2] 24.83 1.73 ## fmP1.r $ corr vv <- VarCorr(fmPix1) set.seed(12345) dd <- data.frame(a=gl(10,100), b = rnorm(1000)) test2 <- suppressMessages(simulate(~1+(b|a), newdata=dd, family=poisson, newparams= list(beta = c("(Intercept)" = 1), theta = c(1,1,1)))) ## Zero-inflation : set all i.0 indices to 0: i.0 <- sample(c(FALSE,TRUE), 1000, prob=c(.3,.7), replace=TRUE) test2[i.0, 1] <- 0 mydata <<- cbind(dd, test2) ## GLOBAL ## The zeros in the 10 groups: xx <- xtabs(~ a + (sim_1 == 0), mydata) ## FIXME: actually need to fit this! test_that("non-trivial dispersion model", { data(sleepstudy, package="lme4") fm3 <- glmmTMB(Reaction ~ Days + (1|Subject), dispformula=~ Days, sleepstudy) cc0 <- capture.output(print(fm3)) cc1 <- capture.output(print(summary(fm3))) expect_true(any(grepl("Dispersion model:",cc0))) expect_true(any(grepl("Dispersion model:",cc1))) }) ## FIXME: slow ( ~ 49 seconds ) ## ??? wrong context? # not simulated this way, but returns right structure test_that("weird variance structure", { mydata <- cbind(dd, test2) gm <- suppressWarnings(glmmTMB(sim_1 ~ 1+(b|a), zi = ~1+(b|a), data=mydata, family=poisson())) cc2 <- capture.output(print(gm)) expect_equal(sum(grepl("Zero-inflation model:",cc2)),3) }) ## eight updateCholesky() warnings .. which will suppress *unless* they are in the last iter. if (FALSE) { str(gm.r <- gm$obj$env$report()) ## List of 4 ## $ corrzi:List of 1 ## ..$ : num [1:2, 1:2] 1 0.929 0.929 1 ## $ sdzi :List of 1 ## ..$ : num [1:2] 3.03e-05 1.87e-04 ## $ corr :List of 1 ## ..$ : num [1:2, 1:2] 1 0.921 0.921 1 ## $ sd :List of 1 ## ..$ : num [1:2] 0.779 1.575 } vc <- VarCorr(fm1) ## default print method: standard dev and corr getVCText <- function(obj,...) { c1 <- capture.output(print(obj,...)) c1f <- read.fwf(textConnection(c1[-(1:3)]),header=FALSE, fill=TRUE, widths=c(10,12,9,6)) return(c1f[,-(1:2)]) ## just value columns } ##expect_equal(c1,c("", "Conditional model:", ## " Groups Name Std.Dev. Corr ", ## " Subject (Intercept) 2.19409 ", ## " age 0.21492 -0.581", ## " Residual 1.31004 ")) expect_equal(getVCText(vc), structure(list(V3 = c(2.1941, 0.21492, 1.31004), V4 = c(NA, -0.581, NA)), .Names = c("V3", "V4"), class = "data.frame", row.names = c(NA, -3L)), tolerance=1e-5) ## both variance and std.dev. c2 <- getVCText(vc,comp=c("Variance","Std.Dev."),digits=2) ## c2 <- capture.output(print(vc,comp=c("Variance","Std.Dev."),digits=2)) ## expect_equal(c2, ## c("", "Conditional model:", ## " Groups Name Variance Std.Dev. Corr ", ## " Subject (Intercept) 4.814 2.19 ", ## " age 0.046 0.21 -0.58", ## " Residual 1.716 1.31 ")) expect_equal(c2, structure(list(V3 = c(4.814, 0.046, 1.716), V4 = c(2.19, 0.21, 1.31)), .Names = c("V3", "V4"), class = "data.frame", row.names = c(NA, -3L))) ## variance only c3 <- getVCText(vc,comp=c("Variance")) ## c3 <- capture.output(print(vc,)) ## expect_equal(c3, ## c("", "Conditional model:", ## " Groups Name Variance Corr ", ## " Subject (Intercept) 4.814050 ", ## " age 0.046192 -0.581", ## " Residual 1.716203 ")) expect_equal(c3,structure(list(V3 = c(4.814071, 0.046192, 1.716208), V4 = c(NA, -0.581, NA)), .Names = c("V3", "V4"), class = "data.frame", row.names = c(NA, -3L)), tolerance=5e-5) if (FALSE) { ## not yet ... as.data.frame(vc) as.data.frame(vc,order="lower.tri") } Orthodont$units <- factor(seq(nrow(Orthodont))) ## suppress 'false convergence' warning suppressWarnings(fm0 <- glmmTMB(distance ~ age + (1|Subject) + (1|units), dispformula=~0, data = Orthodont)) test_that("VarCorr omits resid when dispformula=~0", { expect_false(attr(VarCorr(fm0)$cond,"useSc")) ## Residual vars not printed expect_false(any(grepl("Residual",capture.output(print(VarCorr(fm0)))))) }) test_that("vcov(.,full=TRUE) works for zero-disp models", { expect_equal(dim(vcov(fm0,full=TRUE)),c(4,4)) }) glmmTMB/tests/testthat/test-edgecases.R0000644000176200001440000000077014070567426017615 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) context("test edge cases") test_that("profiling failure", { ## data from https://github.com/glmmTMB/glmmTMB/issues/399 dd <- readRDS(system.file("test_data","IC_comp_data.rds", package="glmmTMB")) expect_warning(glmmTMB( ProbDiv ~ stdQlty + stdLaying + (1|Year) + (1|Site) + (1|PairID), family = "binomial", control=glmmTMBControl(profile = TRUE), data = dd), "a Newton step failed") }) glmmTMB/tests/testthat/test-disp.R0000644000176200001440000000275414070567426016635 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) ## context("Testing dispersion") sim1=function(nfac=24, nt=100, facsd=.1, tsd=.15, mu=0, residsd=1) { dat=expand.grid(fac=factor(1:nfac, levels=1:nfac), t= 1:nt) n=nrow(dat) dat$REfac=rnorm(nfac, sd= facsd)[dat$fac] dat$REt=rnorm(nt, sd= tsd)[dat$t] dat$x=rnorm(n, mean=mu, sd=residsd) + dat$REfac + dat$REt return(dat) } set.seed(101) d1=sim1(mu=100, residsd =10) d2=sim1(mu=200, residsd =5) d1=transform(d1, fac=paste0(fac, "_D10"), disp="ten") d2=transform(d2, fac=paste0(fac, "_D5"), disp="five") ## global assignment for testthat dat <<- rbind(d1, d2) dat$disp = factor(dat$disp, levels = c("ten", "five")) m0 <<- glmmTMB(x~disp+(1|fac), dispformula=~disp, dat) test_that("disp calc", { expect_equal(unname(fixef(m0)$disp), c(log(10^2), log(5^2)-log(10^2)), tol=1e-2) }) test_that("predict dispersion", { expect_equal(predict(m0, type="disp"), c(rep(10, 24*100), rep(5, 24*100)), tol=1e-2) }) dat2 <<- rbind(head(d1, 50), head(d2, 50)) #smaller for faster fitting when not checking estimates ## suppress "... false convergence (8) ..." suppressWarnings(nbm0 <<- glmmTMB(round(x)~disp+(1|fac), ziformula=~0, dispformula=~disp, dat2, family=nbinom1, se=FALSE) ) pm0 <<- update(nbm0, family=poisson) ## suppress "... false convergence (8) ..." nbm1 <<- suppressWarnings(update(pm0, family=nbinom1)) test_that("update maintains dispformula in call", { expect_equal(getCall(nbm0), getCall(nbm1)) }) glmmTMB/tests/testthat/test-basics.R0000644000176200001440000003033514120405266017123 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) ## loaded by gt_load() in setup_makeex.R, but need to do this ## again to get it to work in devtools::check() environment (ugh) gm0 <- up2date(gm0) data(sleepstudy, cbpp, package = "lme4") data(quine, package="MASS") ## n.b. for test_that, this must be assigned within the global ## environment ... cbpp <<- transform(cbpp, prop = incidence/size, obs=factor(seq(nrow(cbpp)))) ## utility: hack/replace parts of the updated result that will ## be cosmetically different matchForm <- function(obj, objU, family=FALSE, fn = FALSE) { for(cmp in c("call","frame")) # <- more? objU[[cmp]] <- obj[[cmp]] ## Q: why are formulas equivalent but not identical? A: their environments may differ objU$modelInfo$allForm <- obj$modelInfo$allForm if (family) objU$modelInfo$family <- obj$modelInfo$family ## objective function/gradient may change between TMB versions if (fn) { for (f in c("fn","gr","he","retape","env","report","simulate")) { objU$obj[[f]] <- obj$obj[[f]] } } return(objU) } lm0 <- lm(Reaction~Days,sleepstudy) fm00 <- glmmTMB(Reaction ~ Days, sleepstudy) fm0 <- glmmTMB(Reaction ~ 1 + ( 1 | Subject), sleepstudy) fm1 <- glmmTMB(Reaction ~ Days + ( 1 | Subject), sleepstudy) fm2 <- glmmTMB(Reaction ~ Days + (Days| Subject), sleepstudy) fm3 <- glmmTMB(Reaction ~ Days + ( 1 | Subject) + (0+Days | Subject), sleepstudy) test_that("Basic Gaussian Sleepdata examples", { expect_is(fm00, "glmmTMB") expect_is(fm0, "glmmTMB") expect_is(fm1, "glmmTMB") expect_is(fm2, "glmmTMB") expect_is(fm3, "glmmTMB") expect_equal(fixef(fm00)[[1]],coef(lm0),tol=1e-5) expect_equal(sigma(fm00)*sqrt(nobs(fm00)/(df.residual(fm00)+1)), summary(lm0)$sigma,tol=1e-5) expect_equal(fixef(fm0)[[1]], c("(Intercept)" = 298.508), tolerance = .0001) expect_equal(fixef(fm1)[[1]], c("(Intercept)" = 251.405, Days = 10.4673), tolerance = .0001) expect_equal(fixef(fm2)$cond, fixef(fm1)$cond, tolerance = 1e-5)# seen 1.042 e-6 expect_equal(fixef(fm3)$cond, fixef(fm1)$cond, tolerance = 5e-6)# seen 2.250 e-7 expect_equal(head(ranef(fm0)$cond$Subject[,1],3), c(37.4881849228705, -71.5589277273216, -58.009085500647), tolerance=1e-5) ## test *existence* of summary method -- nothing else for now expect_is(suppressWarnings(summary(fm3)),"summary.glmmTMB") }) test_that("Update Gaussian", { skip_on_cran() ## call doesn't match (formula gets mangled?) ## timing different fm1u <- update(fm0, . ~ . + Days) expect_equal(fm1, matchForm(fm1, fm1u, fn=TRUE)) }) test_that("Variance structures", { skip_on_cran() ## above: fm2 <- glmmTMB(Reaction ~ Days + (Days| Subject), sleepstudy) expect_is(fm2us <- glmmTMB(Reaction ~ Days + us(Days| Subject), sleepstudy), "glmmTMB") expect_is(fm2cs <- glmmTMB(Reaction ~ Days + cs(Days| Subject), sleepstudy), "glmmTMB") expect_is(fm2diag <- glmmTMB(Reaction ~ Days + diag(Days| Subject), sleepstudy), "glmmTMB") expect_equal(getME(fm2, "theta"), getME(fm2us,"theta")) ## FIXME: more here, compare results against lme4 ... }) test_that("Sleepdata Variance components", { expect_equal(c(unlist(VarCorr(fm3))), c(cond.Subject = 584.247907378213, cond.Subject.1 = 33.6332741779585), tolerance=1e-5) }) test_that("Basic Binomial CBPP examples", { ## Basic Binomial CBPP examples ---- intercept-only fixed effect expect_is(gm0, "glmmTMB") expect_is(gm1, "glmmTMB") expect_equal(fixef(gm0)[[1]], c("(Intercept)" = -2.045671), tolerance = 1e-3)#lme4 results expect_equal(fixef(gm1)[[1]], c("(Intercept)" = -1.398343,#lme4 results period2 = -0.991925, period3 = -1.128216, period4 = -1.579745), tolerance = 1e-3) # <- TODO: lower eventually }) test_that("Multiple RE, reordering", { ### Multiple RE, reordering skip_on_cran() tmb1 <- glmmTMB(cbind(incidence, size-incidence) ~ period + (1|herd) + (1|obs), data = cbpp, family=binomial()) tmb2 <- glmmTMB(cbind(incidence, size-incidence) ~ period + (1|obs) + (1|herd), data = cbpp, family=binomial()) expect_equal(fixef(tmb1), fixef(tmb2), tolerance = 1e-8) expect_equal(getME(tmb1, "theta"), getME(tmb2, "theta")[c(2,1)], tolerance = 5e-7) }) test_that("Alternative family specifications [via update(.)]", { ## intercept-only fixed effect res_chr <- matchForm(gm0, update(gm0, family= "binomial"), fn = TRUE) expect_equal(gm0, res_chr) expect_equal(gm0, matchForm(gm0, update(gm0, family= binomial()), fn = TRUE)) expect_warning(res_list <- matchForm(gm0, update(gm0, family= list(family = "binomial", link = "logit")), family=TRUE, fn=TRUE)) expect_equal(gm0, res_list) }) test_that("Update Binomial", { ## matchForm(): call doesn't match (formula gets mangled?) ## timing different gm1u <- update(gm0, . ~ . + period) expect_equal(gm1, matchForm(gm1, gm1u, fn=TRUE), tolerance = 5e-8) }) test_that("internal structures", { ## RE terms only in cond and zi model, not disp: GH #79 expect_equal(names(fm0$modelInfo$reTrms), c("cond","zi")) }) test_that("close to lme4 results", { skip_on_cran() expect_true(require("lme4")) L <- load(system.file("testdata", "lme-tst-fits.rda", package="lme4", mustWork=TRUE)) expect_is(L, "character") message("Loaded testdata from lme4:\n ", paste(strwrap(paste(L, collapse = ", ")), collapse = "\n ")) if(FALSE) { ## part of the above [not recreated here for speed mostly:] ## intercept only in both fixed and random effects fit_sleepstudy_0 <- lmer(Reaction ~ 1 + ( 1 | Subject), sleepstudy) ## fixed slope, intercept-only RE fit_sleepstudy_1 <- lmer(Reaction ~ Days + ( 1 | Subject), sleepstudy) ## fixed slope, intercept & slope RE fit_sleepstudy_2 <- lmer(Reaction ~ Days + (Days|Subject), sleepstudy) ## fixed slope, independent intercept & slope RE fit_sleepstudy_3 <- lmer(Reaction ~ Days + (1|Subject)+ (0+Days|Subject), sleepstudy) cbpp$obs <- factor(seq(nrow(cbpp))) ## intercept-only fixed effect fit_cbpp_0 <- glmer(cbind(incidence, size-incidence) ~ 1 + (1|herd), cbpp, family=binomial) ## include fixed effect of period fit_cbpp_1 <- update(fit_cbpp_0, . ~ . + period) ## include observation-level RE fit_cbpp_2 <- update(fit_cbpp_1, . ~ . + (1|obs)) ## specify formula by proportion/weights instead fit_cbpp_3 <- update(fit_cbpp_1, incidence/size ~ period + (1 | herd), weights = size) } ## What we really want to compare against - Maximum Likelihood (package 'DESCRIPTION' !) fi_0 <- lmer(Reaction ~ 1 + ( 1 | Subject), sleepstudy, REML=FALSE) fi_1 <- lmer(Reaction ~ Days + ( 1 | Subject), sleepstudy, REML=FALSE) fi_2 <- lmer(Reaction ~ Days + (Days| Subject), sleepstudy, REML=FALSE) fi_3 <- lmer(Reaction ~ Days + (1|Subject) + (0+Days|Subject), sleepstudy, REML=FALSE) ## Now check closeness to lme4 results ## ...................................... }) context("trickier examples") data(Owls) ## is <<- necessary ... ? Owls <- transform(Owls, ArrivalTime=scale(ArrivalTime,center=TRUE,scale=FALSE), NCalls= SiblingNegotiation) test_that("basic zero inflation", { skip_on_cran() expect_true(require("pscl")) o0.tmb <- glmmTMB(NCalls~(FoodTreatment + ArrivalTime) * SexParent + offset(logBroodSize), ziformula=~1, data = Owls, family=poisson(link = "log")) o0.pscl <-zeroinfl(NCalls~(FoodTreatment + ArrivalTime) * SexParent + offset(logBroodSize)|1, data = Owls) expect_equal(summary(o0.pscl)$coefficients$count, summary(o0.tmb)$coefficients$cond, tolerance=1e-5) expect_equal(summary(o0.pscl)$coefficients$zero, summary(o0.tmb)$coefficients$zi, tolerance=1e-5) o1.tmb <- glmmTMB(NCalls~(FoodTreatment + ArrivalTime) * SexParent + offset(logBroodSize) + diag(1 | Nest), ziformula=~1, data = Owls, family=poisson(link = "log")) expect_equal(ranef(o1.tmb)$cond$Nest[1,1], -0.484, tolerance=1e-2) #glmmADMB gave -0.4842771 }) test_that("alternative binomial model specifications", { skip_on_cran() d <<- data.frame(y=1:10,N=20,x=1) ## n.b. global assignment for testthat m0 <- suppressWarnings(glmmTMB(cbind(y,N-y) ~ 1, data=d, family=binomial())) m3 <- glmmTMB(y/N ~ 1, weights=N, data=d, family=binomial()) expect_equal(fixef(m0),fixef(m3)) m1 <- glmmTMB((y>5)~1,data=d,family=binomial) m2 <- glmmTMB(factor(y>5)~1,data=d,family=binomial) expect_equal(c(unname(logLik(m1))),-6.931472,tol=1e-6) expect_equal(c(unname(logLik(m2))),-6.931472,tol=1e-6) }) test_that("formula expansion", { ## test that formulas are expanded in the call/printed form <- Reaction ~ Days + (1|Subject) expect_equal(grep("Reaction ~ Days", capture.output(print(glmmTMB(form, sleepstudy))), fixed=TRUE),1) }) test_that("NA handling", { skip_on_cran() data(sleepstudy,package="lme4") ss <- sleepstudy ss$Days[c(2,20,30)] <- NA op <- options(na.action=NULL) expect_error(glmmTMB(Reaction~Days,ss),"missing values in object") op <- options(na.action=na.fail) expect_error(glmmTMB(Reaction~Days,ss),"missing values in object") expect_equal(unname(fixef(glmmTMB(Reaction~Days,ss,na.action=na.omit))[[1]]), c(249.70505,11.11263), tolerance=1e-6) op <- options(na.action=na.omit) expect_equal(unname(fixef(glmmTMB(Reaction~Days,ss))[[1]]), c(249.70505,11.11263), tolerance=1e-6) }) test_that("quine NB fit", { skip_on_cran() quine.nb1 <- MASS::glm.nb(Days ~ Sex/(Age + Eth*Lrn), data = quine) quine.nb2 <- glmmTMB(Days ~ Sex/(Age + Eth*Lrn), data = quine, family=nbinom2()) expect_equal(coef(quine.nb1),fixef(quine.nb2)[["cond"]], tolerance=1e-4) }) ## quine.nb3 <- glmmTMB(Days ~ Sex + (1|Age), data = quine, ## family=nbinom2()) test_that("contrasts arg", { skip_on_cran() quine.nb1 <- MASS::glm.nb(Days ~ Sex*Age, data = quine, contrasts=list(Sex="contr.sum",Age="contr.sum")) quine.nb2 <- glmmTMB(Days ~ Sex*Age, data = quine, family=nbinom2(), contrasts=list(Sex="contr.sum",Age="contr.sum")) expect_equal(coef(quine.nb1),fixef(quine.nb2)[["cond"]], tolerance=1e-4) }) test_that("zero disp setting", { skip_on_cran() set.seed(101) dd <- data.frame(y=rnorm(100),obs=1:100) m0 <- glmmTMB(y~1, data=dd) v0 <- sigma(m0)^2 m1 <- glmmTMB(y~1+(1|obs), data=dd) tmpf <- function(x) c(sigma(x)^2,c(VarCorr(x)[["cond"]]$obs)) m <- -log10(sqrt(.Machine$double.eps)) pvec <- c(1,5,m,2*m,20) res <- matrix(NA,ncol=2,nrow=length(pvec)) for (i in (seq_along(pvec))) { mz <- update(m1,dispformula=~0, control=glmmTMBControl(zerodisp_val=log(10^(-pvec[i])))) res[i,] <- tmpf(mz) } res <- rbind(res,tmpf(m1)) expect_true(var(res[,1]+res[,2])<1e-8) }) test_that("dollar/no data arg warning", { expect_warning(glmmTMB(Reaction ~ sleepstudy$Days, data = sleepstudy), "is not recommended") attach(sleepstudy) expect_warning(glmmTMB(Reaction ~ Days), "is recommended") op <- options(warn = 2) ## check that warning is suppressed expect_is(glmmTMB(Reaction ~ Days, data = NULL), "glmmTMB") detach(sleepstudy) options(op) }) test_that("double bar notation", { data("sleepstudy", package="lme4") m1 <- glmmTMB(Reaction ~ 1 + (Days || Subject), sleepstudy) expect_equal(c(VarCorr(m1)$cond$Subject), c(564.340387730194, 0, 0, 140.874101713108), tolerance = 1e-6) }) glmmTMB/tests/testthat/test-Anova.R0000644000176200001440000000675114070567426016743 0ustar liggesusersrequire(glmmTMB) require(testthat) data(sleepstudy,package="lme4") ## m <- load(system.file("test_data","models.rda",package="glmmTMB", mustWork=TRUE)) if (require(car) && getRversion()>="3.6.0") { ## only testing on recent R: see comments ## https://github.com/glmmTMB/glmmTMB/pull/547#issuecomment-580690208 ## https://github.com/glmmTMB/glmmTMB/issues/493#issuecomment-578569564 fm0 <- lme4::lmer(Reaction~Days+(1|Subject),sleepstudy,REML=FALSE) expect_equal(Anova(fm1),Anova(fm0),tolerance=3e-6) expect_equal(Anova(fm1,type="III"),Anova(fm0,type="III"),tolerance=3e-6) ## test Anova on various components fmd <- glmmTMB(Reaction~Days+(1|Subject), disp=~I(Days>5), sleepstudy, REML=FALSE) ad <- Anova(fmd,component="disp") expect_equal(ad[1,1],18.767,tolerance=1e-5) expect_equal(rownames(ad), "I(Days > 5)") ac <- Anova(fmd,component="cond") expect_equal(ac[1,1], 160.1628, tolerance=1e-5) expect_equal(rownames(ac), "Days") expect_error(Anova(fmd,component="zi"), "trivial fixed effect") ## test that zi and cond tests are different a1 <- Anova(fm3ZIP) a2 <- Anova(fm3ZIP, component="zi") a3 <- Anova(fm3ZIP, type="III") a4 <- Anova(fm3ZIP, type="III",component="zi") get_pval <- function(x) c(na.omit(x$`Pr(>Chisq)`)) expect_equal(get_pval(a1),1.82693150434104e-13) expect_equal(get_pval(a2),numeric(0)) expect_equal(get_pval(a3),c(0, 1.82693150434104e-13)) expect_equal(get_pval(a4),0.81337346580467) test_that("Anova matches zi attributes correctly", { ## zi and cond for cases with different models (GH 673) ## set up case where one of the 'term' indices matches up with an element ## in the 'assign' attribute for the conditional model > the number of terms for the zi model ## here conditional model ## ?? not sure why the data simulation/model fitting has to be within test_that() but apparently it does ?? set.seed(101) n <- 100 ## give data unique name to avoid interfering with dd below (only relevant in some contexts?) dd673 <<- data.frame(x=rnorm(n),y=rnorm(n),f=factor(sample(5,size=n,replace=TRUE))) beta <- rep(1,6) X <- model.matrix(~x+f,data=dd673) dd673 <<- within(dd673, { eta_cond <- exp(X %*% beta) eta_zi <- plogis(x+y) z <- ifelse(runif(n)length(az)-1) expect_equal(Anova(m673, type=3), structure(list(Chisq = c(9.61229896219151, 43.6023716865556, 10.880414540324), Df = c(1, 1, 4), `Pr(>Chisq)` = c(0.00193278528775503, 4.02351135879168e-11, 0.0279413796944278)), class = c("anova", "data.frame"), row.names = c("(Intercept)", "x", "f"), heading = c("Analysis of Deviance Table (Type III Wald chisquare tests)\n", "Response: z"))) expect_equal(Anova(m673, type=3, component="zi"), structure(list(Chisq = c(1.66987565287872, 12.4629514668969, 0.888720602608306), Df = c(1, 1, 1), `Pr(>Chisq)` = c(0.196275190560059, 0.000415103515329875, 0.345824248063703)), class = c("anova", "data.frame"), row.names = c("(Intercept)", "x", "y"), heading = c("Analysis of Deviance Table (Type III Wald chisquare tests)\n", "Response: z"))) } ) } glmmTMB/tests/testthat/setup_makeex.R0000644000176200001440000000071714120405266017375 0ustar liggesusers## Retrieve from cache, or re-build, fitted objects that are used many times ## (rebuilt on every new installation *but* shared between e.g. different archs) not_cran <- identical(Sys.getenv("NOT_CRAN"), "true") file_ok <- gt_load("test_data/models.rda") if (!file_ok) { make_ex <- system.file("test_data", "make_ex.R", package="glmmTMB", mustWork = TRUE) cat("running fits to build test examples ...\n") source(make_ex, chdir = TRUE, echo = FALSE) } glmmTMB/tests/testthat/test-saveload.R0000644000176200001440000000070214070567426017463 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) context("Saving and loading glmmTMB objects") test_that("summary consistency", { data(sleepstudy, package="lme4") fm1 <- glmmTMB(Reaction ~ Days + (1|Subject), sleepstudy) s1 <- capture.output(print(summary(fm1))) save(fm1, file="fm1.Rdata") load("fm1.Rdata") file.remove("fm1.Rdata") s2 <- capture.output(print(summary(fm1))) expect_identical(s1, s2) }) glmmTMB/tests/testthat/test-sparseX.R0000644000176200001440000000160014121123551017270 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) ## load(system.file("test_data", "models.rda", package="glmmTMB", mustWork=TRUE)) context("sparse X models") test_that("basic fits", { fm2S <- update(fm2,sparseX=c(cond=TRUE)) ## loosened from 1e-6 to 2e-6 for Solaris ... expect_equal(fixef(fm2), fixef(fm2S), tolerance=2e-6) expect_equal(VarCorr(fm2), VarCorr(fm2S),tolerance=1e-3) expect_equal(dim(getME(fm2,"X")), dim(getME(fm2S,"X"))) expect_equal(predict(fm2), predict(fm2S), tolerance=1e-3) nd <- sleepstudy[1,] expect_equal(predict(fm2, newdata=nd),predict(fm2S, newdata=nd), tolerance=1e-3) }) test_that("back-compatibility", { x <- up2date(readRDS(system.file("test_data","oldfit.rds", package="glmmTMB"))) expect_is(VarCorr(x),"VarCorr.glmmTMB") expect_is(predict(x),"numeric") }) glmmTMB/tests/testthat/test-offset.R0000644000176200001440000000556714120405266017156 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB"), require("lme4")) context("offsets") set.seed(101) n <- 10000 mux <- 10 sdx <- 10 a <- .1 b <- 0 residsd <- .01 x <- rnorm(n, mux, sdx) o <<- 2*x+100 o2 <- rep(c(1,5),each=n/2) r1 <- rnorm(n, sd=residsd) r2 <- rnorm(n, sd=residsd*o2) y0 <- a*x + b + r1 y1 <- a*x+b+o+r1 y2 <- a*x+b+r2 y3 <- a*x + b + o + r2 ## global assignment for testthat dat <<- data.frame(y0, y1, y2, y3, x, o, o2, o3=o) m.lm <- lm(y1~x, offset=o, dat) m.lm0 <- lm(y1~x, dat) test_that("LM with offset as argument", { skip_on_cran() m1 <- glmmTMB(y1~x, offset=o, data = dat) expect_equal(fixef(m1)[[1]], coef(m.lm), tol=1e-6) m3 <- glmmTMB(y1~x, offset=o, data = NULL) expect_equal(fixef(m3)[[1]], coef(m.lm), tol=1e-6) }) test_that("LM with offset in formula", { skip_on_cran() m2 <- glmmTMB(y1~x+offset(o), data = dat) expect_equal(fixef(m2)[[1]], coef(m.lm), tol=1e-6) m4 <- glmmTMB(y1~x+offset(o), data = NULL) expect_equal(fixef(m4)[[1]], coef(m.lm), tol=1e-6) }) ## test_that("LM with offset in zero-inflation formula", { ## don't have anything sensible to try here yet ... ## glmmTMB(y~x,zi=~1+offset(o), dat) ## }) test_that("LM with offset in formula - variable not in environment", { skip_on_cran() m5 <- glmmTMB(y1~x,offset=o3, dat) expect_equal(fixef(m5)[[1]],coef(m.lm), tol=1e-6) nullvalue <- NULL m6 <- glmmTMB(y1~x,offset=nullvalue, dat) expect_equal(fixef(m6)[[1]],coef(m.lm0), tol=1e-6) }) test_that("LM with offset in dispersion formula", { skip_on_cran() expect_equal(sigma(glmmTMB(y1~x, dat)), sigma(glmmTMB(y2~x,disp=~1+offset(log(o2)*2), dat)), tolerance=1e-3) }) test_that("LM with multiple offsets (cond/dispersion)", { skip_on_cran() m1 <<- glmmTMB(y0~x, dat) m2 <<- glmmTMB(y3~x+offset(o),disp=~1+offset(log(o2)*2), dat) expect_equal(sigma(m1),sigma(m2),tolerance=1e-3) expect_equal(fixef(m1),fixef(m2),tolerance=1e-3) }) ## this was broken by an earlier multiple-offset formulation test_that("LM with random crap in the formula", { skip_on_cran() m1 <<- suppressWarnings(glmmTMB(y0~dat$x, data = dat)) m2 <<- glmmTMB(y0~x, data = dat) expect_equal(unname(fixef(m1)$cond), unname(fixef(m2)$cond)) }) test_that("offset in do.call", { skip_on_cran() ss <- lme4::sleepstudy off <- rnorm(nrow(ss),10,20) m1 <<- glmmTMB(Reaction ~ Days,ss,offset=off) m2 <<- do.call(glmmTMB,list(Reaction ~ Days,ss,offset=off)) expect_equal(fixef(m1),fixef(m2)) }) test_that("LONG offset in do.call", { skip_on_cran() ss <- lme4::sleepstudy ss <- do.call(rbind,replicate(5,ss,simplify=FALSE)) off <- rnorm(nrow(ss),10,20) m1 <- glmmTMB(Reaction ~ Days,ss,offset=off) #works m2 <- do.call(glmmTMB,list(Reaction ~ Days,ss,offset=off)) #breaks expect_equal(coef(m1),coef(m2)) }) glmmTMB/tests/testthat/test-control.R0000644000176200001440000000401514120405266017333 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) ## Some selected L1-distances between two fits distFits <- function(fit1, fit2) { s1 <- summary(fit1) s2 <- summary(fit2) glmmTMB:::namedList( max(abs((coef(s1)$cond - coef(s2)$cond)[,"Estimate"])), max(abs((coef(s1)$cond - coef(s2)$cond)[,"Std. Error"])), abs(logLik(fit1) - logLik(fit2)) ) } test_that("profile method", { skip_on_cran() myfit <- function(...) { glmmTMB(count ~ mined * spp + (1|site), family = poisson, data = Salamanders, control = glmmTMBControl(...)) } m1 <- myfit( profile=FALSE ) m2 <- myfit( profile=TRUE ) expect_true( all( distFits(m1, m2) < c(1e-4, 1e-2, 1e-4) ) ) ## ########################################################### myfit <- function(...) { glmmTMB(count ~ mined * spp + (1|site), zi = ~ (1 | spp), family = poisson, data = Salamanders, control = glmmTMBControl(...)) } m1 <- myfit( profile=FALSE ) m2 <- myfit( profile=TRUE ) expect_true( all( distFits(m1, m2) < c(1e-4, 1e-2, 1e-4) ) ) }) test_that("parallel regions", { skip_on_cran() myfit <- function(...) { glmmTMB(count ~ mined * spp + (1|site), family = poisson, data = Salamanders, verbose = FALSE, control = glmmTMBControl(...)) } # Record time and model capture_time_model <- function(...) { start_time <- Sys.time() model <- myfit(...) end_time <- Sys.time() return(list(model = model, elapsed_time = end_time - start_time )) } m1 <- capture_time_model( parallel = 1 ) ## DON'T grab all cores - bad on large machines ## FIXME: check if parallel setting is persistent ??? m2 <- capture_time_model( parallel = min(4, parallel::detectCores() )) expect_true( all( distFits(m1[[1]], m2[[1]]) < c(1e-4, 1e-2, 1e-4) ) ) # expect_true( m1[[2]] <= m2[[2]]) }) glmmTMB/tests/testthat/test-zi.R0000644000176200001440000000471114120405266016300 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB"), require("lme4")) ## simulate something smaller than the Owls data set? context("ZI models") data(Owls) test_that("zi", { skip_on_cran() ## Fit negative binomial model with "constant" Zero Inflation : owls_nb1 <<- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + (1|Nest)+offset(log(BroodSize)), family = nbinom1(), ziformula = ~1, data=Owls) owls_nb2 <<- update(owls_nb1, ziformula = ~ FoodTreatment*SexParent + (1|Nest)) owls_nb3 <<- update(owls_nb1,ziformula=~.) expect_equal(fixef(owls_nb2), structure(list(cond = structure(c(0.812028613585629, -0.342496105044418, -0.0751681324132088, 0.122484981295054), .Names = c("(Intercept)", "FoodTreatmentSatiated", "SexParentMale", "FoodTreatmentSatiated:SexParentMale")), zi = structure(c(-2.20863281353936, 1.86779027553285, -0.825200772653965, 0.451435813933449), .Names = c("(Intercept)", "FoodTreatmentSatiated", "SexParentMale", "FoodTreatmentSatiated:SexParentMale")), disp = structure(1.33089630005212, .Names = "(Intercept)")), .Names = c("cond", "zi", "disp"), class = "fixef.glmmTMB"), tolerance=1e-5) expect_equal(fixef(owls_nb2),fixef(owls_nb3)) }) test_that("zi beta and Gamma", { skip_on_cran() suppressWarnings(RNGversion("3.5.1")) set.seed(101) dd <- data.frame(yb=c(rbeta(100,shape1=2,shape2=1),rep(0,10)), yg=c(rgamma(100,shape=1.5,rate=1),rep(0,10))) expect_error(glmmTMB(yb~1, data=dd, family=beta_family), "y values must be") m1 <- glmmTMB(yb~1, data=dd, family=beta_family, zi=~1) expect_equal(unname(plogis(fixef(m1)[["zi"]])),1/11) expect_equal(unname(fixef(m1)[["cond"]]), 0.6211636, tolerance=1e-5) ## need *both* ziformula and family=ziGamma for gamma-hurdle expect_error(glmmTMB(yg~1, data=dd, family=Gamma), "non-positive values not allowed") expect_error(glmmTMB(yg~1, zi=~1, data=dd, family=Gamma), "non-positive values not allowed") expect_error(glmmTMB(yg~1, data=dd, family=ziGamma), "non-positive values not allowed") m2 <- glmmTMB(yg~1, data=dd, family=ziGamma(link="log"), zi=~1) expect_equal(unname(plogis(fixef(m2)[["zi"]])),1/11) expect_equal(unname(fixef(m2)[["cond"]]), 0.3995267, tolerance=1e-5) }) glmmTMB/tests/testthat/test-methods.R0000644000176200001440000005227414120405266017330 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) data(sleepstudy, cbpp, Pastes, package = "lme4") if (getRversion() < "3.3.0") { sigma.default <- function (object, use.fallback = TRUE, ...) sqrt(deviance(object, ...)/(nobs(object, use.fallback = use.fallback) - length(coef(object)))) } ## load(system.file("test_data", "models.rda", package="glmmTMB", mustWork=TRUE)) context("basic methods") test_that("Fitted and residuals", { expect_equal(length(fitted(fm2)),nrow(sleepstudy)) expect_equal(mean(fitted(fm2)),298.507891) expect_equal(mean(residuals(fm2)),0,tol=1e-5) ## Pearson and response are the same for a Gaussian model expect_equal(residuals(fm2,type="response"), residuals(fm2,type="pearson")) ## ... but not for Poisson or NB ... expect_false(mean(residuals(fm2P,type="response"))== mean(residuals(fm2P,type="pearson"))) expect_false(mean(residuals(fm2NB,type="response"))== mean(residuals(fm2NB,type="pearson"))) rr2 <- function(x) sum(residuals(x,type="pearson")^2) ## test Pearson resids for gaussian, Gamma vs. base-R versions ss <- as.data.frame(state.x77) expect_equal(rr2(glm(Murder~Population,ss,family=gaussian)), rr2(glmmTMB(Murder~Population,ss,family=gaussian))) expect_equal(rr2(glm(Murder~Population,ss,family=Gamma(link="log"))), rr2(glmmTMB(Murder~scale(Population),ss, family=Gamma(link="log"))),tol=1e-5) ## weights are incorporated in Pearson residuals ## GH 307 tmbm4 <- glm(incidence/size ~ period, data = cbpp, family = binomial, weights = size) tmbm5 <- glmmTMB(incidence/size ~ period, data = cbpp, family = binomial, weights = size) expect_equal(residuals(tmbm4,type="pearson"), residuals(tmbm5,type="pearson"),tolerance=1e-6) ## two-column responses give vector of residuals GH 307 tmbm6 <- glmmTMB(cbind(incidence,size-incidence) ~ period, data = cbpp, family = binomial) expect_equal(residuals(tmbm4,type="pearson"), residuals(tmbm6,type="pearson"),tolerance=1e-6) ## predict handles na.exclude correctly ## GH 568 b <- rnorm(332) mu <- exp(1.5 + .26*b) y <- sapply(mu, function(mu){rpois(1, lambda = mu)}) napos <- 51 b[napos] <- NA y.na <- y y.na[napos] <- NA mod.ex <- glmmTMB(y ~ b, family = "poisson", na.action = "na.exclude", data = NULL) ## Get predictions/resids pr.ex <- predict(mod.ex, type = "response") # SEEMS to work fine expect_equal(which(is.na(pr.ex)),napos) rs.ex <- residuals(mod.ex, type = "response") expect_equal(unname(which(is.na(rs.ex))),napos) pr.rs.ex <- pr.ex + rs.ex expect_equal(unname(pr.rs.ex), y.na) }) test_that("Predict", { expect_equal(predict(fm2),predict(fm2,newdata=sleepstudy)) pr2se <- predict(fm2, se.fit=TRUE) i <- sample(nrow(sleepstudy), 20) newdata <- sleepstudy[i, ] pr2sub <- predict(fm2, newdata=newdata, se.fit=TRUE) expect_equivalent(pr2se$fit, predict(fm2)) expect_equivalent(pr2se$fit[i], pr2sub$fit) expect_equivalent(pr2se$se.fit[i], pr2sub$se.fit) expect_equal(unname( pr2se$ fit[1] ), 254.2208, tol=1e-4) expect_equal(unname( pr2se$se.fit[1] ), 12.94514, tol=1e-4) expect_equal(unname( pr2se$ fit[100] ), 457.9684, tol=1e-4) expect_equal(unname( pr2se$se.fit[100] ), 14.13943, tol=1e-4) ## predict without response in newdata expect_equal(predict(fm2), predict(fm2,newdata=sleepstudy[,c("Days","Subject")])) }) test_that("VarCorr", { vv <- VarCorr(fm2) vv2 <- vv$cond$Subject expect_equal(dim(vv2),c(2,2)) expect_equal(outer(attr(vv2,"stddev"), attr(vv2,"stddev"))*attr(vv2,"correlation"), vv2,check.attributes=FALSE) vvd <- VarCorr(fm2diag) expect_equal(vvd$cond$Subject[1,2],0) ## off-diagonal==0 }) test_that("drop1", { dd <- drop1(fm2,test="Chisq") expect_equal(dd$AIC,c(1763.94,1785.48),tol=1e-4) }) test_that("anova", { aa <- anova(fm0,fm2) expect_equal(aa$AIC,c(1785.48,1763.94),tol=1e-4) }) test_that("anova ML/REML checks", { skip_on_cran() ## FIXME: too slow? ## speed up/save so we don't need to skip on CRAN fmA1 <- glmmTMB(Reaction ~ Days + (Days | Subject), sleepstudy, REML = TRUE) fmA2 <- glmmTMB(Reaction ~ Days + diag(Days | Subject), sleepstudy, REML = TRUE) fmA3 <- glmmTMB(Reaction ~ 1 + (1 | Subject), sleepstudy, REML = TRUE) fmA4 <- glmmTMB(Reaction ~ Days + (1 | Subject), sleepstudy, REML = FALSE) fmA5 <- glmmTMB(Reaction ~ 1 + (1 | Subject), sleepstudy, REML = FALSE) dd <- data.frame(y=rnorm(100),a=rnorm(100), b=rnorm(100)) fmA6 <- glmmTMB(y~a*b, data=dd, REML=TRUE) fmA7 <- glmmTMB(y~b*a, data=dd, REML=TRUE) ## ML, differing fixed effects expect_equal(class(anova(fmA4,fmA5)), c("anova", "data.frame")) ## REML, differing RE expect_equal(class(anova(fmA1,fmA2)), c("anova", "data.frame")) ## REML, FE in different order expect_equal(class(anova(fmA6,fmA7)), c("anova", "data.frame")) expect_false(identical(attr(terms(fmA6),"term.labels"), attr(terms(fmA7),"term.labels"))) ## REML, differing fixed expect_error(anova(fmA1,fmA3), "Can't compare REML fits with different") ## REML vs ML expect_error(anova(fmA1,fmA4), "Can't compare REML and ML") }) test_that("terms", { ## test whether terms() are returned with predvars for doing ## model prediction etc. with complex bases dd <<- data.frame(x=1:10,y=1:10) require("splines") ## suppress convergence warnings(we know this is a trivial example) suppressWarnings(m <- glmmTMB(y~ns(x,3),dd)) ## if predvars is not properly attached to term, this will ## fail as it tries to construct a 3-knot spline from a single point expect_equal(model.matrix(delete.response(terms(m)),data=data.frame(x=1)), structure(c(1, 0, 0, 0), .Dim = c(1L, 4L), .Dimnames = list("1", c("(Intercept)", "ns(x, 3)1", "ns(x, 3)2", "ns(x, 3)3")), assign = c(0L, 1L, 1L, 1L))) }) test_that("terms back-compatibility", { f0 <- up2date(readRDS(system.file("test_data", "oldfit.rds", package="glmmTMB", mustWork=TRUE))) expect_true(!is.null(terms(f0))) }) test_that("summary_print", { getVal <- function(x,tag="Dispersion") { cc <- capture.output(print(summary(x))) if (length(gg <- grep(tag,cc,value=TRUE))==0) return(NULL) cval <- sub("^.*: ","",gg) ## get value after colon ... return(as.numeric(cval)) } ## no dispersion printed for Gaussian or disp==1 families expect_equal(getVal(fm2),654.9,tolerance=1e-2) expect_equal(getVal(fm2P),NULL) expect_equal(getVal(fm2G),0.00654,tolerance=1e-2) expect_equal(getVal(fm2NB,"Dispersion"),286,tolerance=1e-2) }) test_that("sigma", { s1 <<- sigma(lm(Reaction~Days,sleepstudy)) s2 <<- sigma(glm(Reaction~Days,sleepstudy,family=Gamma(link="log"))) s3 <<- MASS::glm.nb(round(Reaction)~Days,sleepstudy) ## remove bias-correction expect_equal(sigma(fm3),s1*(1-1/nobs(fm3)),tolerance=1e-3) expect_equal(sigma(fm3G),s2,tolerance=5e-3) expect_equal(s3$theta,sigma(fm3NB),tolerance=1e-4) }) test_that("confint", { ci <- confint(fm2, 1:2, estimate=FALSE) expect_equal(ci, structure(c(238.406083254105, 7.52295734348693, 264.404107485727, 13.4116167530013), .Dim = c(2L, 2L), .Dimnames = list(c("(Intercept)", "Days"), c("2.5 %", "97.5 %"))), tolerance=1e-6) ciw <- confint(fm2, 1:2, method="Wald", estimate=FALSE) expect_warning(confint(fm2,type="junk"), "extra arguments ignored") ## Gamma test Std.Dev and sigma ci.2G <- confint(fm2G, full=TRUE, estimate=FALSE) ci.2G.expect <- structure(c(5.48101734463434, 0.0247781469519971, 0.0720456818285145, 0.0676097041325336, 0.0115949839239226, -0.518916569224983, 5.58401849103742, 0.0429217639958554, 0.0907365112607892, 0.150456372082291, 0.026437653590095, 0.481694558589466), .Dim = c(6L, 2L), .Dimnames = list(c("cond.(Intercept)", "cond.Days", "sigma", "cond.Std.Dev.(Intercept)", "cond.Std.Dev.Days", "cond.Cor.Days.(Intercept)"), c("2.5 %", "97.5 %"))) expect_equal(ci.2G, ci.2G.expect, tolerance=1e-6) ## nbinom2 test Std.Dev and sigma ci.2NB <- confint(fm2NB, full=TRUE, estimate=FALSE) ci.2NB.expect <- structure(c(5.48098712803496, 0.0248163866132581, 183.810585063238, 0.0661772559176498, 0.0113436359250623, -0.520883925243851, 5.58422550729504, 0.0428993237779538, 444.73566599561, 0.150917871951769, 0.0263549890118426, 0.502211628076133), .Dim = c(6L, 2L), .Dimnames = list(c("cond.(Intercept)", "cond.Days", "sigma", "cond.Std.Dev.(Intercept)", "cond.Std.Dev.Days", "cond.Cor.Days.(Intercept)"), c("2.5 %", "97.5 %"))) expect_equal(ci.2NB, ci.2NB.expect, tolerance=1e-6) ## profile CI ## ... no RE ci.prof0 <- confint(fm_noRE, full=TRUE, method="profile", npts=3) expect_equal(ci.prof0, structure(c(238.216039176535, 7.99674863649355, 7.51779308310198, 264.368471102549, 12.8955469713508, 7.93347860201449), .Dim = 3:2, .Dimnames = list(c("(Intercept)", "Days", "d~(Intercept)"), c("2.5 %", "97.5 %"))), tolerance=1e-5) ci.prof <- confint(fm2,parm=1,method="profile", npts=3) expect_equal(ci.prof, structure(c(237.27249, 265.13383), .Dim = 1:2, .Dimnames = list( "(Intercept)", c("2.5 %", "97.5 %"))), tolerance=1e-6) ## uniroot CI ci.uni <- confint(fm2,parm=1,method="uniroot") expect_equal(ci.uni, structure(c(237.68071,265.12949,251.4050979), .Dim = c(1L, 3L), .Dimnames = list("(Intercept)", c("2.5 %", "97.5 %", "Estimate"))), tolerance=1e-6) ## check against 'raw' tmbroot tmbr <- TMB::tmbroot(fm2$obj,name=1) expect_equal(ci.uni[1:2],unname(c(tmbr))) ## GH #438 cc <- confint(fm4) expect_equal(dim(cc),c(5,3)) expect_equal(rownames(cc), c("(Intercept)", "Illiteracy", "Population", "Area", "`HS Grad`")) }) test_that("confint with theta/beta", { set.seed(101) n <- 1e2 bd <- data.frame( year=factor(sample(2002:2018, size=n, replace=TRUE)), class=factor(sample(1:20, size=n, replace=TRUE)), x1 = rnorm(n), x2 = rnorm(n), x3 = factor(sample(c("low","reg","high"), size=n, replace=TRUE), levels=c("low","reg","high")), count = rnbinom(n, mu = 3, size=1)) m1 <- glmmTMB(count~x1+x2+x3+(1|year/class), data = bd, zi = ~x2+x3+(1|year/class), family = truncated_nbinom2, ) expect_equal(rownames(confint(m1, "beta_")), c("cond.(Intercept)", "cond.x1", "cond.x2", "cond.x3reg", "cond.x3high", "zi.(Intercept)", "zi.x2", "zi.x3reg", "zi.x3high")) expect_equal(rownames(confint(m1, "theta_")), c("class:year.cond.Std.Dev.(Intercept)", "year.cond.Std.Dev.(Intercept)", "class:year.zi.Std.Dev.(Intercept)", "year.zi.Std.Dev.(Intercept)")) }) test_that("profile", { p1_th <- profile(fm1,parm="theta_",npts=4) expect_true(all(p1_th$.par=="theta_1|Subject.1")) p1_b <- profile(fm1,parm="beta_",npts=4) expect_equal(unique(as.character(p1_b$.par)), c("(Intercept)","Days")) }) test_that("profile (no RE)", { p0_th <- profile(fm_noRE,npts=4) expect_equal(dim(p0_th),c(43,3)) }) test_that("vcov", { expect_equal(dim(vcov(fm2)[[1]]),c(2,2)) expect_equal(dim(vcov(fm2,full=TRUE)),c(6,6)) expect_equal(rownames(vcov(fm2,full=TRUE)), structure(c("(Intercept)", "Days", "d~(Intercept)", "theta_Days|Subject.1", "theta_Days|Subject.2", "theta_Days|Subject.3"), .Names = c("cond1", "cond2", "disp", "theta1", "theta2", "theta3"))) ## vcov doesn't include dispersion for non-dispersion families ... expect_equal(dim(vcov(fm2P,full=TRUE)),c(5,5)) ## oops, dot_check() disabled in vcov.glmmTMB ... ## expect_error(vcov(fm2,x="junk"),"unknown arguments") }) set.seed(101) test_that("simulate", { sm2 <<- rowMeans(do.call(cbind, simulate(fm2, 10))) sm2P <<- rowMeans(do.call(cbind, simulate(fm2P, 10))) sm2G <<- rowMeans(do.call(cbind, simulate(fm2G, 10))) sm2NB <<- rowMeans(do.call(cbind, simulate(fm2NB, 10))) expect_equal(sm2, sleepstudy$Reaction, tol=20) expect_equal(sm2P, sleepstudy$Reaction, tol=20) expect_equal(sm2G, sleepstudy$Reaction, tol=20) expect_equal(sm2NB, sleepstudy$Reaction, tol=20) }) test_that("formula", { expect_equal(formula(fm2),Reaction ~ Days + (Days | Subject)) expect_equal(formula(fm2, fixed.only=TRUE),Reaction ~ Days) expect_equal(formula(fm2, component="disp"), ~1) expect_equal(formula(fm2, component="disp", fixed.only=TRUE), ~1) expect_equal(formula(fm2, component="zi"), ~0) expect_equal(formula(fm2, component="zi", fixed.only=TRUE), ~0) }) context("simulate consistency with glm/lm") test_that("binomial", { s1 <- simulate(f1b, 5, seed=1) s2 <- simulate(f2b, 5, seed=1) s3 <- simulate(f3b, 5, seed=1) expect_equal(max(abs(as.matrix(s1) - as.matrix(s2))), 0) expect_equal(max(abs(as.matrix(s1) - as.matrix(s3))), 0) }) test_that("residuals from binomial factor responses", { expect_equal(residuals(fm2Bf),residuals(fm2Bn)) }) mkstr <- function(dd) { ff <- which(vapply(dd,is.factor,logical(1))) for (i in ff) { dd[[i]] <- as.character(dd[[i]]) } return(dd) } rr <- function(txt) { read.table(header=TRUE,stringsAsFactors=FALSE,text=txt, colClasses=rep(c("character","numeric"),c(5,2))) } context("Ranef etc.") test_that("as.data.frame(ranef(.)) works", { expect_equal( mkstr(as.data.frame(ranef(fm3ZIP))[c("cond.1","cond.19","zi.1"),]), rr( " component grpvar term grp condval condsd cond.1 cond Subject (Intercept) 308 1.066599e-02 0.040430751 cond.19 cond Subject Days 308 2.752424e-02 0.007036958 zi.1 zi Subject (Intercept) 308 -2.850238e-07 0.127106817 "), tolerance=1e-5) expect_equal( mkstr(as.data.frame(ranef(fm2diag2))[c("cond.1","cond.19"),]), rr( " component grpvar term grp condval condsd cond.1 cond Subject (Intercept) 308 1.854597 13.294388 cond.19 cond Subject Days 308 9.236420 2.699692 "), tolerance=1e-5) }) test_that("ranef(.) works with more than one grouping factor", { expect_equal(sort(names(ranef(fmP)[["cond"]])), c("batch","sample")) expect_equal(dim(as.data.frame(ranef(fmP))), c(40,6)) }) test_that("coef(.) works", { cc <- coef(fm3ZIP) expect_equal(cc[["cond"]][[1]][1,], structure(list(`(Intercept)` = 5.54291514202372, Days = 0.0613847280572168), row.names = "308", class = "data.frame"), tolerance=1e-5) expect_equal(cc[["zi"]][[1]][1,,drop=FALSE], structure(list(`(Intercept)` = -13.2478200379555), row.names = "308", class = "data.frame"), tolerance=1e-5) }) test_that("simplified coef(.) printing", { op <- options(digits=2) cc <- capture.output(print(coef(fm0))) expect_equal(cc[1:3],c("$Subject", " Days (Intercept)", "308 20.6 249")) options(op) }) ## weird stuff here with environments, testing ... test_that("various binomial response types work", { skip_on_cran() ## FIXME: test for factors, explicit cbind(.,.) ## do we need to define this within this scope? ddb <- data.frame(y=I(yb)) ddb <- within(ddb, { w <- rowSums(yb) prop <- y[,1]/w }) s1 <- simulate(f1b, 1, seed=1) f1 <- fixef(refit(f1b,s1[[1]])) s3 <- simulate(f3b, 1, seed=1) f3 <- fixef(refit(f3b,s3[[1]])) expect_equal(f1,f3) expect_error(refit(f4b,s3[[1]]), "can't find response in data") }) test_that("binomial response types work with data in external scope", { s1 <- simulate(f1b, 1, seed=1) f1 <- fixef(refit(f1b,s1[[1]])) s3 <- simulate(f3b, 1, seed=1) f3 <- fixef(refit(f3b,s3[[1]])) expect_equal(f1,f3) }) test_that("confint works for models with dispformula", { ## FIXME: should make this an example sim1 <- function(nfac=40, nt=100, facsd=0.1, tsd=0.15, mu=0, residsd=1) { dat <- expand.grid(fac=factor(letters[1:nfac]), t=1:nt) n <- nrow(dat) dat$REfac <- rnorm(nfac, sd=facsd)[dat$fac] dat$REt <- rnorm(nt, sd=tsd)[dat$t] dat$x <- rnorm(n, mean=mu, sd=residsd) + dat$REfac + dat$REt dat } set.seed(101) d1 <- sim1(mu=100, residsd=10) d2 <- sim1(mu=200, residsd=5) d1$sd <- "ten" d2$sd <- "five" dat <- rbind(d1, d2) m1 <- glmmTMB(x ~ sd + (1|t), dispformula=~sd, data=dat) ref_val <- structure(c(3.14851028784965, 1.30959944530366, 3.25722952319077, 1.46335165911997, 3.20286990552021, 1.38647555221182), .Dim = 2:3, .Dimnames = list(c("disp.(Intercept)", "disp.sdten"), c("2.5 %", "97.5 %", "Estimate"))) cc <- confint(m1) expect_equal(cc[grep("^disp",rownames(cc)),], ref_val, tolerance = 1e-6) }) simfun <- function(formula, family, data, beta=c(0,1)) { ss <- list(beta=beta) if (grepl("nbinom",family)) ss$betad <- 0 suppressWarnings(m1 <- glmmTMB(formula, family=family, data=data, start=ss, control=glmmTMBControl(optCtrl=list(eval.max=0,iter.max=0)))) return(m1) } ntab <- function(formula=y~x, family, data, seed=101) { set.seed(seed) m1 <- simfun(formula, family, data) return(table(exp(data$x),unlist(simulate(m1)))) } pfun <- function(i,tab, dist="nbinom2", data, plot=TRUE) { n <- as.numeric(names(tab[i,])) s_tab <- tab[i,]/sum(tab[i,]) if (plot) plot(n,s_tab) m <- exp(data$x)[i] argList <- switch(dist, nbinom1=list(n, phi=1, mu=m), nbinom2=list(n, size=1, mu=m), poisson=list(n, lambda=m)) expected <- do.call(paste0("dtruncated_",dist), argList) if (plot) lines(n,expected) return(list(n = n, obs = s_tab, exp = expected)) } test_that("trunc nbinom simulation", { ## GH 572 dd <- data.frame(f=factor(1:2), y=rep(1,2)) ## results for second element of sim, depending on family: simres <- c(truncated_nbinom2=1,truncated_nbinom1=2) for (f in paste0("truncated_nbinom",1:2)) { ## generate a model with two groups, one with a ridiculously low (log mean). ## don't allow the optimizer to actually do anything, so coefs will remain ## at their starting values m1 <- simfun(y~f, family=f, data=dd, beta=c(-40,39)) expect_equal(fixef(m1)$cond, c(`(Intercept)` = -40, f2 = 39)) expect_equal(fitted(m1),c(4.24835425529159e-18, 0.367879441171442)) ## should NOT get NaN (or zero) for the first group if hack/fix is working expect_equal(unname(unlist(simulate(m1,seed=101))),c(1,1)) } }) test_that("trunc nbinom sim 2", { set.seed(101) dd <- expand.grid(x=log(1:5), rep=1:10000, y=1) t1 <- ntab(family="truncated_nbinom1", data=dd) t2 <- ntab(family="truncated_nbinom2", data=dd) p1 <- pfun(1,tab=t1,dist="nbinom1",data=dd, plot=FALSE) p2 <- pfun(1,tab=t2,dist="nbinom2",data=dd, plot=FALSE) expect_equal(unname(p1$obs), p1$exp, tolerance = 0.01) expect_equal(unname(p2$obs), p2$exp, tolerance = 0.01) if (FALSE) { op <- par(ask=TRUE) for (i in 1:nrow(t1)) pfun(i,tab=t1,dist="nbinom1",data=dd) for (i in 1:nrow(t2)) pfun(i,tab=t2,dist="nbinom2",data=dd) par(op) } }) test_that("trunc poisson simulation", { dd <- expand.grid(x=log(1:5), rep=1:10000, y=1) t3 <- ntab(family="truncated_poisson", data=dd) expect_equal(unname(t3[1,1:6]), c(5829L, 2905L, 963L, 242L, 56L, 5L)) ## explore if (FALSE) { op <- par(ask=TRUE) for (i in 1:nrow(t3)) pfun(i,tab=t3,dist="poisson",data=dd) par(op) } }) glmmTMB/tests/testthat/test-utils.R0000644000176200001440000000201614070567426017025 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) test_that("addForm", { expect_equal(addForm(y~x,~1,~z),y~x+1+z) expect_warning(addForm(y~x,z~1), "discarding LHS") }) test_that("noSpecials", { expect_equal(noSpecials(y~1+us(1|f)), y~1) expect_equal(noSpecials(y~1+us(1|f),delete=FALSE), y~1+(1|f)) expect_equal(noSpecials(y~us(1|f)), y ~ 1) expect_equal(noSpecials(y~us(1|f), delete=FALSE), y~ (1|f)) expect_equal(noSpecials(y~us+1), y ~ us + 1) expect_equal(noSpecials(~us+1), ~ us + 1) expect_equal(noSpecials(~1+x+us(1|f), delete=FALSE), ~ 1 + x + (1|f)) }) test_that("extractForm", { expect_equal(extractForm(~a+offset(b),quote(offset)), list(quote(offset(b)))) expect_equal(extractForm(~c,quote(offset)), NULL) expect_equal(extractForm(~a+offset(b)+offset(c),quote(offset)), list(quote(offset(b)),quote(offset(c)))) expect_equal(extractForm(~offset(x),quote(offset)), list(quote(offset(x)))) }) glmmTMB/tests/testthat/test-downstream.R0000644000176200001440000000616414070567426020060 0ustar liggesusersrequire(glmmTMB) require(testthat) data(sleepstudy,package="lme4") ## m <- load(system.file("test_data","models.rda",package="glmmTMB", mustWork=TRUE)) if (require(emmeans)) { context("emmeans") m1 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + (1|Nest)+offset(log(BroodSize)), family = nbinom1(), zi = ~1, data=Owls) em1 <- emmeans(m1, poly ~ FoodTreatment | SexParent) em2 <- emmeans(m1, poly ~ FoodTreatment | SexParent, type = "response") expect_is(em1,"emm_list") expect_true(any(grepl("given on the log (not the response) scale", capture.output(print(em1)),fixed=TRUE))) expect_true(any(grepl("back-transformed from the log scale", capture.output(print(em2))))) expect_equal(summary(em1[[2]])$estimate[1], -0.8586306, tolerance=1e-4) expect_equal(summary(em2[[2]])$ratio[1], 0.42374, tolerance=1e-4) m2 <- glmmTMB(count ~ spp + mined + (1|site), zi=~spp + mined, family=nbinom2, data=Salamanders) rgc <- ref_grid(m2, component = "cond") expect_is(rgc, "emmGrid") expect_equal(predict(rgc)[2], -1.574079, tolerance=1e-4) expect_equal(predict(rgc, type="response")[2], 0.207198, tolerance=1e-4) rgz <- ref_grid(m2, component = "zi") expect_is(rgz, "emmGrid") expect_equal(predict(rgz)[2], 2.071444, tolerance=1e-4) expect_equal(predict(rgz, type="response")[2], 0.88809654, tolerance=1e-4) ## test zeroing out non-focal variance components V <- vcov(m2)[["cond"]] v <- V["minedno","minedno"] V[] <- 0 V["minedno","minedno"] <- v expect_equal(as.data.frame(emmeans(m2, ~mined, component="cond"))[["SE"]], c(0.38902257366905, 0.177884950308125)) expect_equal(as.data.frame(emmeans(m2, ~mined, component="cond", vcov.=V))[["SE"]], c(0, 0.366598230362198)) } if (require(effects)) { context("effects") ## pass dd: some kind of scoping issue in testthat context f <- function(x,dd) { sapply(allEffects(x), function(y) { y$transformation$inverse(y$fit) }) } fm2_tmb <- glmmTMB(round(Reaction)~Days+(1|Subject),family=poisson,data=sleepstudy) fm2_lmer <- lme4::glmer(round(Reaction)~Days+(1|Subject),family=poisson,data=sleepstudy) if (getRversion() >= "3.6.0") { ## only testing on recent R: see comments ## https://github.com/glmmTMB/glmmTMB/pull/547#issuecomment-580690208 ## https://github.com/glmmTMB/glmmTMB/issues/493#issuecomment-578569564 expect_equal(f(fm2_tmb),f(fm2_lmer),tolerance=2e-5) ## set.seed(101) dd <<- data.frame(y=rnbinom(1000,mu=4,size=1), x = rnorm(1000), f=factor(rep(LETTERS[1:20],each=50))) fm3_tmb <- glmmTMB(y~x,family=nbinom2,data=dd) fm3_MASS <- MASS::glm.nb(y~x,data=dd) ## suppressing "overriding variance function for effects: computed variances may be incorrect" warning here expect_equal(suppressWarnings(f(fm3_tmb,dd)),f(fm3_MASS,dd),tolerance=2e-5) } ## recent R } ## effects glmmTMB/tests/testthat/test-altopt.R0000644000176200001440000000076314120405266017164 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) data(sleepstudy, cbpp, package = "lme4") context("alternative optimizers") test_that("downstream methods work with optim()", { skip_on_cran() m1 <- glmmTMB(count~ mined, family = poisson, data = Salamanders, control = glmmTMBControl(optimizer = optim, optArgs = list(method="BFGS"))) expect_is(summary(m1),"summary.glmmTMB") expect_is(confint(m1),"matrix") }) glmmTMB/tests/testthat/test-families.R0000644000176200001440000004153514120405266017454 0ustar liggesusers## test more exotic familes/model types stopifnot(require("testthat"), require("glmmTMB")) simfun0 <- function(beta=c(2,1), sd.re=5, ngrp=10,nobs=200, invlink=exp) { x <- rnorm(nobs) f <- factor(rep(1:ngrp,nobs/ngrp)) u <- rnorm(ngrp,sd=sd.re) eta <- beta[1]+beta[2]*x+u[f] mu <- invlink(eta) return(data.frame(x,f,mu)) } context("alternative binomial specifications") test_that("binomial", { load(system.file("testdata","radinger_dat.RData",package="lme4")) radinger_dat <<- radinger_dat ## global assignment for testthat mod1 <<- glmmTMB(presabs~predictor+(1|species),family=binomial, radinger_dat) mod2 <<- update(mod1,as.logical(presabs)~.) expect_equal(predict(mod1),predict(mod2)) ## Compare 2-column and prop/size specification dd <- data.frame(success=1:10, failure=11:20) dd$size <- rowSums(dd) dd$prop <- local( success / size, dd) mod4 <- glmmTMB(cbind(success,failure)~1,family=binomial,data=dd) mod5 <- glmmTMB(prop~1,weights=size,family=binomial,data=dd) expect_equal( logLik(mod4) , logLik(mod5) ) expect_equal( fixef(mod4)$cond , fixef(mod5)$cond ) ## Now with extra weights dd$w <- 2 mod6 <- glmmTMB(cbind(success,failure)~1,family=binomial,data=dd,weights=w) mod7 <- glmmTMB(prop~1,weights=size*w,family=binomial,data=dd) mod6.glm <- glm(cbind(success,failure)~1,family=binomial,data=dd,weights=w) mod7.glm <- glm(prop~1,weights=size*w,family=binomial,data=dd) expect_equal( logLik(mod6)[[1]] , logLik(mod6.glm)[[1]] ) expect_equal( logLik(mod7)[[1]] , logLik(mod7.glm)[[1]] ) expect_equal( fixef(mod6)$cond , fixef(mod7)$cond ) ## Test TRUE/FALSE specification x <- c(TRUE, TRUE, FALSE) dx <- data.frame(x) m1 <- glmmTMB(x~1, family=binomial(), data=dx) m2 <- glm (x~1, family=binomial(), data=dx) expect_equal( as.numeric(logLik(m1)), as.numeric(logLik(m2)) ) expect_equal( as.numeric(unlist(fixef(m1))), as.numeric(coef(m2)) ) ## Mis-specifications prop <- c(.1, .2, .3) ## weights=1 => prop * weights non integers expect_warning( glmmTMB(prop~1, family=binomial()) ) ## Warning as glm x <- c(1, 2, 3) ## weights=1 => x > weights ! expect_error ( glmmTMB(x~1, family=binomial(), data = data.frame(x))) ## Error as glm }) context("non-integer count warnings") test_that("count distributions", { dd <- data.frame(y=c(0.5,1,1,1)) for (f in c("binomial","betabinomial","poisson", "genpois", ## "compois", ## fails anyway ... "truncated_genpois", # "truncated_compois", "nbinom1", "nbinom2" # why do these truncated cases fail? ##, "truncated_nbinom1", ##"truncated_nbinom2" )) { expect_warning(m <- glmmTMB(y~1,data=dd,family=f), "non-integer") } }) context("fitting exotic families") test_that("beta", { skip_on_cran() set.seed(101) nobs <- 200; eps <- 0.001; phi <- 0.1 dd0 <- simfun0(nobs=nobs,sd.re=1,invlink=plogis) y <- with(dd0,rbeta(nobs,shape1=mu/phi,shape2=(1-mu)/phi)) dd <<- data.frame(dd0,y=pmin(1-eps,pmax(eps,y))) m1 <- glmmTMB(y~x+(1|f),family=beta_family(), data=dd) expect_equal(fixef(m1)[[1]], structure(c(1.98250567574413, 0.843382531038295), .Names = c("(Intercept)", "x")), tol=1e-5) expect_equal(c(VarCorr(m1)[[1]][[1]]), 0.433230926800709, tol=1e-5) ## allow family="beta", but with warning expect_warning(m2 <- glmmTMB(y~x+(1|f),family="beta", data=dd),"please use") expect_equal(coef(summary(m1)),coef(summary(m2))) }) test_that("nbinom", { skip_on_cran() nobs <- 200; phi <- 0.1 set.seed(101) dd0 <- simfun0(nobs=nobs) ## global assignment for testthat (??) dd <- data.frame(dd0,y=rnbinom(nobs,size=phi,mu=dd0$mu)) m1 <- glmmTMB(y~x+(1|f),family=nbinom2(), data=dd) expect_equal(fixef(m1)[[1]], structure(c(2.09866748794435, 1.12703589660625), .Names = c("(Intercept)", "x")), tol=1e-5) expect_equal(c(VarCorr(m1)[[1]][[1]]), 9.54680210862774, tol=1e-5) expect_equal(sigma(m1),0.09922738,tol=1e-5) ## nbinom1 ## to simulate, back-calculate shape parameters for NB2 ... nbphi <- 2 nbvar <- nbphi*dd0$mu ## n.b. actual model is (1+phi)*var, ## so estimate of phi is approx. 1 ## V = mu*(1+mu/k) -> mu/k = V/mu-1 -> k = mu/(V/mu-1) k <- with(dd0,mu/(nbvar/mu - 1)) y <- rnbinom(nobs,size=k,mu=dd$mu) dd <- data.frame(dd0,y=y) ## global assignment for testthat m1 <- glmmTMB(y~x+(1|f),family=nbinom1(), data=dd) expect_equal(c(unname(c(fixef(m1)[[1]])), c(VarCorr(m1)[[1]][[1]]), sigma(m1)), c(1.93154240357181, 0.992776302432081, 16.447888398429, 1.00770603513152), tol=1e-5) ## identity link: GH #20 x <- 1:100; m <- 2; b <- 100 y <- m*x+b set.seed(101) dat <<- data.frame(obs=rnbinom(length(y), mu=y, size=5), x=x) ## with(dat, plot(x, obs)) ## coef(mod1 <- MASS::glm.nb(obs~x,link="identity",dat)) expect_equal(fixef(glmmTMB(obs~x, family=nbinom2(link="identity"), dat)), structure(list(cond = structure(c(115.092240041138, 1.74390840106971), .Names = c("(Intercept)", "x")), zi = numeric(0), disp = structure(1.71242627201796, .Names = "(Intercept)")), .Names = c("cond", "zi", "disp"), class = "fixef.glmmTMB")) ## segfault (GH #248) dd <- data.frame(success=1:10,failure=10) expect_error(glmmTMB(cbind(success,failure)~1,family=nbinom2,data=dd), "matrix-valued responses are not allowed") }) test_that("dbetabinom", { skip_on_cran() set.seed(101) nobs <- 200; eps <- 0.001; phi <- 0.1 dd0 <- simfun0(nobs=nobs,sd.re=1,invlink=plogis) p <- with(dd0,rbeta(nobs,shape1=mu/phi,shape2=(1-mu)/phi)) p <- pmin(1-eps,pmax(p,eps)) b <- rbinom(nobs,size=5,prob=p) dd <<- data.frame(dd0,y=b,N=5) m1 <- glmmTMB(y/N~x+(1|f), weights=N, family=betabinomial(), data=dd) expect_equal(c(unname(c(fixef(m1)[[1]])), c(VarCorr(m1)[[1]][[1]]), sigma(m1)), c(2.1482114,1.0574946,0.7016553,8.3768711), tolerance=1e-5) ## Two-column specification m2 <- glmmTMB(cbind(y, N-y) ~ x + (1|f), family=betabinomial(), data=dd) expect_identical(m1$fit, m2$fit) ## Rolf Turner example: X <- readRDS(system.file("test_data","turner_bb.rds",package="glmmTMB")) fmla <- cbind(Dead, Alive) ~ (Trt + 0)/Dose + (Dose | Rep) ## baseline (binomial, not betabinomial) fit0 <- glmmTMB(fmla, data = X, family = binomial(link = "cloglog"), dispformula = ~1) skip_on_cran() ## fails ATLAS tests with failure in inner optimization ## loop ("gradient function must return a numeric vector of length 16") fit1 <- suppressWarnings( ## NaN function evaluation; ## non-pos-def Hessian; ## false convergence warning from nlminb glmmTMB(fmla, data = X, family = betabinomial(link = "cloglog"), dispformula = ~1) ) fit1_glmmA <- readRDS(system.file("test_data","turner_bb_GLMMadaptive.rds", package="glmmTMB")) suppressWarnings( fit2 <- glmmTMB(fmla, data = X, family = betabinomial(link = "cloglog"), dispformula = ~1, start=list(beta=fixef(fit0)$cond)) ## non-pos-def Hessian warning ## diagnose() suggests a singular fit ## but fixed effects actually look OK ) ff1 <- fixef(fit1)$cond ff2 <- fixef(fit2)$cond ## conclusions: ## (1) glmmTMB fit from initial starting vals is bad ## (2) glmmTMB fit from restart is OK (for fixed effects) ## (3) GLMMadaptive matches OK **but not** for nAGQ=1 (which _should_ ## fit) -- np <- length(ff1) ff_GA <- fit1_glmmA[1:np,ncol(fit1_glmmA)] expect_equal(ff_GA, ff2, tolerance=0.05) if (FALSE) { ## graphical exploration ... cc <- cbind(ff1,ff2,fit1_glmmA[1:np,]) matplot(cc,type="b") ## plot diffs between glmmTMB fit and GLMMadaptive for nAGQ>1 adiff <- sweep(fit1_glmmA[1:np,-1],1,ff2,"-") matplot(adiff, type="b", ylab="diff from glmmTMB") } }) test_that("truncated", { skip_on_cran() ## Poisson set.seed(101) z_tp <<- rpois(1000,lambda=exp(1)) z_tp <<- z_tp[z_tp>0] if (FALSE) { ## n.b.: keep library() calls commented out, they may ## trigger CRAN complaints ## library(glmmADMB) g0_tp <- glmmadmb(z_tp~1,family="truncpoiss",link="log") fixef(g0) ## 0.9778591 } g1_tp <- glmmTMB(z_tp~1,family=truncated_poisson(), data=data.frame(z_tp)) expect_equal(unname(fixef(g1_tp)[[1]]),0.9778593,tol=1e-5) ## Truncated poisson with zeros => invalid: num_zeros <- 10 z_tp0 <<- c(rep(0, num_zeros), z_tp) expect_error(g1_tp0 <- glmmTMB(z_tp0~1,family=truncated_poisson(), data=data.frame(z_tp0))) ## Truncated poisson with zeros and zero-inflation: g1_tp0 <- glmmTMB(z_tp0~1,family=truncated_poisson(), ziformula=~1, data=data.frame(z_tp0)) expect_equal( plogis(as.numeric(fixef(g1_tp0)$zi)), num_zeros/length(z_tp0), tol=1e-7 ) ## Test zero-prob expect_equal(fixef(g1_tp0)$cond, fixef(g1_tp)$cond, tol=1e-6) ## Test conditional model ## nbinom2 set.seed(101) z_nb <<- rnbinom(1000,size=2,mu=exp(2)) z_nb <<- z_nb[z_nb>0] if (FALSE) { ## library(glmmADMB) g0_nb2 <- glmmadmb(z_nb~1,family="truncnbinom",link="log") fixef(g0_nb2) ## 1.980207 g0_nb2$alpha ## 1.893 } g1_nb2 <- glmmTMB(z_nb~1,family=truncated_nbinom2(), data=data.frame(z_nb)) expect_equal(c(unname(fixef(g1_nb2)[[1]]),sigma(g1_nb2)), c(1.980207,1.892970),tol=1e-5) ## Truncated nbinom2 with zeros => invalid: num_zeros <- 10 z_nb0 <<- c(rep(0, num_zeros), z_nb) expect_error(g1_nb0 <- glmmTMB(z_nb0~1,family=truncated_nbinom2(), data=data.frame(z_nb0))) ## Truncated nbinom2 with zeros and zero-inflation: g1_nb0 <- glmmTMB(z_nb0~1,family=truncated_nbinom2(), ziformula=~1, data=data.frame(z_nb0)) expect_equal( plogis(as.numeric(fixef(g1_nb0)$zi)), num_zeros/length(z_nb0), tol=1e-7 ) ## Test zero-prob expect_equal(fixef(g1_nb0)$cond, fixef(g1_nb2)$cond, tol=1e-6) ## Test conditional model ## nbinom1: constant mean, so just a reparameterization of ## nbinom2 (should have the same likelihood) ## phi=(1+mu/k)=1+exp(2)/2 = 4.69 if (FALSE) { ## library(glmmADMB) g0_nb1 <- glmmadmb(z_nb~1,family="truncnbinom1",link="log") fixef(g0_nb1) ## 2.00112 g0_nb1$alpha ## 3.784 } g1_nb1 <- glmmTMB(z_nb~1,family=truncated_nbinom1(), data=data.frame(z_nb)) expect_equal(c(unname(fixef(g1_nb1)[[1]]),sigma(g1_nb1)), c(1.980207,3.826909),tol=1e-5) ## Truncated nbinom1 with zeros => invalid: expect_error(g1_nb0 <- glmmTMB(z_nb0~1,family=truncated_nbinom1(), data=data.frame(z_nb0))) ## Truncated nbinom2 with zeros and zero-inflation: g1_nb0 <- glmmTMB(z_nb0~1,family=truncated_nbinom1(), ziformula=~1, data=data.frame(z_nb0)) expect_equal( plogis(as.numeric(fixef(g1_nb0)$zi)), num_zeros/length(z_nb0), tol=1e-7 ) ## Test zero-prob expect_equal(fixef(g1_nb0)$cond, fixef(g1_nb1)$cond, tol=1e-6) ## Test conditional model }) ##Genpois test_that("truncated_genpois",{ skip_on_cran() tgp1 <<- glmmTMB(z_nb ~1, data=data.frame(z_nb), family=truncated_genpois()) tgpdat <<- data.frame(y=simulate(tgp1)[,1]) tgp2 <<- glmmTMB(y ~1, tgpdat, family=truncated_genpois()) expect_equal(sigma(tgp1), sigma(tgp2), tol=1e-1) expect_equal(fixef(tgp1)$cond[1], fixef(tgp2)$cond[1], tol=1e-2) cc <- confint(tgp2, full=TRUE) expect_lt(cc["sigma", "2.5 %"], sigma(tgp1)) expect_lt(sigma(tgp1), cc["sigma", "97.5 %"]) expect_lt(cc["cond.(Intercept)", "2.5 %"], unname(fixef(tgp1)$cond[1])) expect_lt(unname(fixef(tgp1)$cond[1]), cc["cond.(Intercept)", "97.5 %"]) }) context("trunc compois") ##Compois test_that("truncated_compois",{ skip_on_cran() cmpdat <<- data.frame(f=factor(rep(c('a','b'), 10)), y=c(15,5,20,7,19,7,19,7,19,6,19,10,20,8,21,8,22,7,20,8)) tcmp1 <<- glmmTMB(y~f, cmpdat, family= truncated_compois()) expect_equal(unname(fixef(tcmp1)$cond), c(2.9652730653, -0.9773987194), tol=1e-6) expect_equal(sigma(tcmp1), 0.1833339, tol=1e-6) expect_equal(predict(tcmp1,type="response")[1:2], c(19.4, 7.3), tol=1e-6) }) context("compois") test_that("compois", { skip_on_cran() # cmpdat <<- data.frame(f=factor(rep(c('a','b'), 10)), # y=c(15,5,20,7,19,7,19,7,19,6,19,10,20,8,21,8,22,7,20,8)) cmp1 <<- glmmTMB(y~f, cmpdat, family=compois()) expect_equal(unname(fixef(cmp1)$cond), c(2.9652730653, -0.9773987194), tol=1e-6) expect_equal(sigma(cmp1), 0.1833339, tol=1e-6) expect_equal(predict(cmp1,type="response")[1:2], c(19.4, 7.3), tol=1e-6) }) context("genpois") test_that("genpois", { skip_on_cran() gendat <<- data.frame(y=c(11,10,9,10,9,8,11,7,9,9,9,8,11,10,11,9,10,7,13,9)) gen1 <<- glmmTMB(y~1, family=genpois(), gendat) expect_equal(unname(fixef(gen1)$cond), 2.251292, tol=1e-6) expect_equal(sigma(gen1), 0.235309, tol=1e-6) }) context("tweedie") test_that("tweedie", { skip_on_cran() ## Boiled down tweedie:::rtweedie : rtweedie <- function (n, xi = power, mu, phi, power = NULL) { mu <- array(dim = n, mu) if ((power > 1) & (power < 2)) { rt <- array(dim = n, NA) lambda <- mu^(2 - power)/(phi * (2 - power)) alpha <- (2 - power)/(1 - power) gam <- phi * (power - 1) * mu^(power - 1) N <- rpois(n, lambda = lambda) for (i in (1:n)) { rt[i] <- sum(rgamma(N[i], shape = -alpha, scale = gam[i])) } } else stop() as.vector(rt) } ## Simulation experiment nobs <- 2000; mu <- 4; phi <- 2; p <- 1.7 set.seed(101) y <- rtweedie(nobs, mu=mu, phi=phi, power=p) twm <- glmmTMB(y ~ 1, family=tweedie(), data = NULL) ## Check mu expect_equal(unname( exp(fixef(twm)$cond) ), mu, tolerance = .1) ## Check phi expect_equal(unname( exp(fixef(twm)$disp) ), phi, tolerance = .1) ## Check power expect_equal(unname( plogis(twm$fit$par["thetaf"]) + 1 ), p, tolerance = .01) ## Check internal rtweedie used by simulate y2 <- c(simulate(twm)[,1],simulate(twm)[,1]) twm2 <- glmmTMB(y2 ~ 1, family=tweedie(), data = NULL) expect_equal(fixef(twm)$cond, fixef(twm2)$cond, tol=1e-1) expect_equal(sigma(twm), sigma(twm2), tol=1e-1) }) test_that("gaussian_sqrt", { set.seed(101) nobs <- 200 dd0_sqrt <- simfun0(nobs=nobs,sd.re=1,invlink=function(x) x^2) dd0_sqrt$y <- rnorm(nobs,mean=dd0_sqrt$mu,sd=0.1) g1 <- glmmTMB(y~x+(1|f), family=gaussian(link="sqrt"), data=dd0_sqrt) expect_equal(fixef(g1), structure(list(cond = c(`(Intercept)` = 2.03810165917618, x = 1.00241002916226 ), zi = numeric(0), disp = c(`(Intercept)` = -4.68350239019746)), class = "fixef.glmmTMB"), tol=1e-6) }) context("link function info available") fam1 <- c("poisson","nbinom1","nbinom2","compois") fam2 <- c("binomial","beta_family","betabinomial","tweedie") for (f in c(fam1,paste0("truncated_",fam1),fam2)) { ## print(f) expect_true("linkinv" %in% names(get(f)())) } context("link info added to family") d.AD <- data.frame(counts=c(18,17,15,20,10,20,25,13,12), outcome=gl(3,1,9), treatment=gl(3,3)) glm.D93 <- glmmTMB(counts ~ outcome + treatment, family = poisson(), d.AD) expect_warning(glm.D93B <- glmmTMB(counts ~ outcome + treatment, family = list(family="poisson", link="log"), d.AD)) ## note update(..., family= ...) is only equal up to tolerance=5e-5 ... glm.D93C <- glmmTMB(counts ~ outcome + treatment, family = "poisson", d.AD) expect_equal(predict(glm.D93),predict(glm.D93B)) expect_equal(predict(glm.D93),predict(glm.D93C)) glmmTMB/tests/testthat/test-weight.R0000644000176200001440000000407114070567426017157 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB"), require("MASS")) context("weight") set.seed(1) nrep <- 20 nsim <- 5 sdi <- .1 sdii <- .2 rho <- -.1 slope <- .8 ni<-100 dat <- expand.grid(i=1:ni, rep=1:nrep , x=c(0 ,.2, .4)) RE <- MASS::mvrnorm(n = ni, mu =c(0, 0), Sigma = matrix(c(sdi*sdi, rho*sdi*sdii, rho*sdi*sdii ,sdii*sdii),2,2)) inddat <- transform(dat, y=rpois(n=nrow(dat), lambda = exp(RE[i,1] + x*(slope + RE[i,2])))) ## aggdat = ddply(inddat, ~i+x+y, summarize, freq=length(rep)) aggdat <- with(inddat,as.data.frame(table(i,x,y), stringsAsFactors=FALSE)) aggdat <- aggdat[with(aggdat,order(i,x,y)),] ## cosmetic/match previous aggdat <- subset(aggdat,Freq>0) ## drop zero categories aggdat <- transform(aggdat, i=as.integer(i), x=as.numeric(x), y=as.numeric(y)) ## only difference from previous is name of weights arg (Freq vs freq) test_that("Weights can be an argument", { wei_glmmtmb <<- glmmTMB(y ~ x+(x|i), data=aggdat, weight=Freq, family="poisson") expect_equal(unname(fixef(wei_glmmtmb)$cond), c(-0.00907013282660578, 0.944062427131668), tolerance=1e-6) }) test_that("Return weights", { expect_equal(weights(wei_glmmtmb), aggdat$Freq) expect_equal(weights(wei_glmmtmb, type="prior"), aggdat$Freq) ## partial matching expect_equal(weights(wei_glmmtmb, type="prio"), aggdat$Freq) expect_error(weights(wei_glmmtmb, type = "working"),"should be one of") expect_warning(weights(wei_glmmtmb, junk = "abc"), "unused arguments ignored") }) ind_glmmtmb <<- glmmTMB(y ~ x+(x|i), data=inddat, family="poisson") test_that("Estimates are the same", { expect_equal(summary(wei_glmmtmb)$coefficients$cond, summary(ind_glmmtmb)$coefficients$cond, tolerance=1e-6) expect_equal(ranef(wei_glmmtmb), ranef(ind_glmmtmb), tolerance=1e-5) expect_equal(AIC(wei_glmmtmb), AIC(ind_glmmtmb), tolerance=1e-5) }) glmmTMB/tests/testthat/test-start.R0000644000176200001440000000100414070567426017016 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) data(sleepstudy, cbpp, package = "lme4") test_that("error messages for user-spec start", { expect_error( glmmTMB(Reaction~Days+(Days|Subject), sleepstudy, start=list(beta=c(2))), "parameter vector length mismatch.*length\\(beta\\)==1, should be 2") expect_error(glmmTMB(Reaction~Days+(Days|Subject), sleepstudy, start=list(junk=5)), "unrecognized vector 'junk'") }) glmmTMB/tests/testthat/test-formulas.R0000644000176200001440000000267414120405266017514 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB")) context("formula parsing") nrt <- function(x) length(x$reTrmFormulas) test_that("basic splitForm", { expect_equal(nrt(splitForm(y~(x+q))),0) ## reTrms part should be empty sf1 <- splitForm(y~(x+q)+(1|f)) sf2 <- splitForm(y~(x+q)+us(1|f)) sf3 <- splitForm(y~(x+q)+diag(1|f)) sf4 <- splitForm(~x+y+(f|g)+cs(1|g)) expect_equal(nrt(sf1),1) expect_equal(sf1$reTrmFormulas,list(quote(1|f))) expect_equal(sf1,sf2) expect_equal(sf3$reTrmClasses,"diag") expect_equal(sf4$reTrmClasses,c("us","cs")) }) test_that("slash terms", { sf5 <- splitForm(~x+y+(1|f/g)) sf6 <- splitForm(~x+y+(1|f/g/h)) sf7 <- splitForm(~x+y+(1|(f/g)/h)) expect_equal(sf5$reTrmClasses, rep("us",2)) expect_equal(sf6$reTrmClasses, rep("us",3)) expect_equal(sf6,sf7) }) test_that("grpvar terms", { sf8 <- splitForm(~x+y+(1|f*g)) sf9 <- splitForm(~x+y+(1|f+g+h)) expect_equal(sf8$reTrmClasses,rep("us",3)) expect_equal(sf8$reTrmFormula,list(quote(1|f),quote(1|g),quote(1|f:g))) expect_equal(sf9$reTrmClasses,rep("us",3)) expect_equal(sf9$reTrmFormula,list(quote(1|f),quote(1|g),quote(1|h))) }) test_that("noSpecial", { ## handle parentheses in formulas: GH #174 ff <- y~1+(((us(1|f)))) expect_equal(noSpecials(ff,delete=FALSE),y~1+(1|f)) expect_equal(noSpecials(ff),y~1) ## 'naked' special - left alone: GH #261 ff2 <- y ~ us expect_equal(noSpecials(ff2),ff2) }) glmmTMB/tests/testthat/test-bootMer.R0000644000176200001440000000165014120405266017264 0ustar liggesusersstopifnot(require("testthat"), require("glmmTMB"), require("lme4")) ## cat("ON CRAN:", testthat:::on_cran(), "\n") context("bootMer") fun <- function(x) predict(x)[1] test_that("Bernoulli responses", { skip_on_cran() Salamanders$pres <- as.numeric(Salamanders$count>0) m <- glmmTMB(pres ~ mined +(1|site), family=binomial, data=Salamanders) b <- lme4::bootMer(m, fun, nsim=2, seed=101) expect_true(var(c(b$t))>0) expect_equal(suppressWarnings(c(confint(b))), c(-1.579923,-1.250725),tolerance=1e-5) }) test_that("binomial responses", { skip_on_cran() m <- glmmTMB(count ~ mined + (1|site), family=poisson, data=Salamanders) ss1 <- simulate(m,nsim=2,seed=101) b <- bootMer(m, fun, nsim=2, seed=101) expect_true(var(c(b$t))>0) expect_equal(suppressWarnings(c(confint(b))), c(-0.7261239,-0.6921794), tolerance=1e-5) }) glmmTMB/tests/AAAtest-all.R0000644000176200001440000000052114070567426015077 0ustar liggesusersif (require("testthat")) { pkg <- "glmmTMB" require(pkg, character.only=TRUE) print(sessionInfo()) test_check(pkg, reporter="summary") print(warnings()) # TODO? catch most of these by expect_warning(..) } else { warnings("Package 'testthat' not available, cannot run unit tests for package", sQuote(pkg)) } glmmTMB/src/0000755000176200001440000000000014122065444012342 5ustar liggesusersglmmTMB/src/glmmTMB.cpp0000644000176200001440000006020514070567426014361 0ustar liggesusers#include #include "init.h" #include "distrib.h" // don't need to include omp.h; we get it via TMB.hpp namespace glmmtmb{ template bool isNA(Type x){ return R_IsNA(asDouble(x)); } template bool notFinite(Type x) { return (!R_FINITE(asDouble(x))); } } enum valid_family { gaussian_family = 0, binomial_family = 100, betabinomial_family =101, beta_family =200, Gamma_family =300, poisson_family =400, truncated_poisson_family =401, genpois_family =402, compois_family =403, truncated_genpois_family =404, truncated_compois_family =405, nbinom1_family =500, nbinom2_family =501, truncated_nbinom1_family =502, truncated_nbinom2_family =503, t_family =600, tweedie_family = 700 }; enum valid_link { log_link = 0, logit_link = 1, probit_link = 2, inverse_link = 3, cloglog_link = 4, identity_link = 5, sqrt_link = 6 }; enum valid_covStruct { diag_covstruct = 0, us_covstruct = 1, cs_covstruct = 2, ar1_covstruct = 3, ou_covstruct = 4, exp_covstruct = 5, gau_covstruct = 6, mat_covstruct = 7, toep_covstruct = 8, rr_covstruct = 9 }; enum valid_ziPredictCode { corrected_zipredictcode = 0, uncorrected_zipredictcode = 1, prob_zipredictcode = 2, disp_zipredictcode = 3 }; template Type inverse_linkfun(Type eta, int link) { Type ans; switch (link) { case log_link: ans = exp(eta); break; case identity_link: ans = eta; break; case logit_link: ans = invlogit(eta); break; case probit_link: ans = pnorm(eta); break; case cloglog_link: ans = Type(1) - exp(-exp(eta)); break; case inverse_link: ans = Type(1) / eta; break; case sqrt_link: ans = eta*eta; // pow(eta, Type(2)) doesn't work ... ? break; // TODO: Implement remaining links default: error("Link not implemented!"); } // End switch return ans; } /* logit transformed inverse_linkfun without losing too much accuracy */ template Type logit_inverse_linkfun(Type eta, int link) { Type ans; switch (link) { case logit_link: ans = eta; break; case probit_link: ans = glmmtmb::logit_pnorm(eta); break; case cloglog_link: ans = glmmtmb::logit_invcloglog(eta); break; default: ans = logit( inverse_linkfun(eta, link) ); } // End switch return ans; } /* log transformed inverse_linkfun without losing too much accuracy */ template Type log_inverse_linkfun(Type eta, int link) { Type ans; switch (link) { case log_link: ans = eta; break; case logit_link: ans = -logspace_add(Type(0), -eta); break; default: ans = log( inverse_linkfun(eta, link) ); } // End switch return ans; } template struct per_term_info { // Input from R int blockCode; // Code that defines structure int blockSize; // Size of one block int blockReps; // Repeat block number of times int blockNumTheta; // Parameter count per block matrix dist; vector times;// For ar1 case // Report output matrix corr; vector sd; matrix fact_load; // For rr case }; template struct terms_t : vector > { terms_t(SEXP x){ (*this).resize(LENGTH(x)); for(int i=0; i(t); } // Optionally, pass distance matrix: SEXP d = getListElement(y, "dist"); if(!isNull(d)){ RObjectTestExpectedType(d, &isMatrix, "dist"); (*this)(i).dist = asMatrix(d); } } } }; template Type termwise_nll(array &U, vector theta, per_term_info& term, bool do_simulate = false) { Type ans = 0; if (term.blockCode == diag_covstruct){ // case: diag_covstruct vector sd = exp(theta); for(int i = 0; i < term.blockReps; i++){ ans -= dnorm(vector(U.col(i)), Type(0), sd, true).sum(); if (do_simulate) { U.col(i) = rnorm(Type(0), sd); } } term.sd = sd; // For report } else if (term.blockCode == us_covstruct){ // case: us_covstruct int n = term.blockSize; vector logsd = theta.head(n); vector corr_transf = theta.tail(theta.size() - n); vector sd = exp(logsd); density::UNSTRUCTURED_CORR_t nldens(corr_transf); density::VECSCALE_t > scnldens = density::VECSCALE(nldens, sd); for(int i = 0; i < term.blockReps; i++){ ans += scnldens(U.col(i)); if (do_simulate) { U.col(i) = sd * nldens.simulate(); } } term.corr = nldens.cov(); // For report term.sd = sd; // For report } else if (term.blockCode == cs_covstruct){ // case: cs_covstruct int n = term.blockSize; vector logsd = theta.head(n); Type corr_transf = theta(n); vector sd = exp(logsd); Type a = Type(1) / (Type(n) - Type(1)); Type rho = invlogit(corr_transf) * (Type(1) + a) - a; matrix corr(n,n); for(int i=0; i nldens(corr); density::VECSCALE_t > scnldens = density::VECSCALE(nldens, sd); for(int i = 0; i < term.blockReps; i++){ ans += scnldens(U.col(i)); if (do_simulate) { U.col(i) = sd * nldens.simulate(); } } term.corr = nldens.cov(); // For report term.sd = sd; // For report } else if (term.blockCode == toep_covstruct){ // case: toep_covstruct int n = term.blockSize; vector logsd = theta.head(n); vector sd = exp(logsd); vector parms = theta.tail(n-1); // Corr parms parms = parms / sqrt(Type(1.0) + parms * parms ); // Now in (-1,1) matrix corr(n,n); for(int i=0; i j ? i-j : j-i) - 1 ) ); density::MVNORM_t nldens(corr); density::VECSCALE_t > scnldens = density::VECSCALE(nldens, sd); for(int i = 0; i < term.blockReps; i++){ ans += scnldens(U.col(i)); if (do_simulate) { U.col(i) = sd * nldens.simulate(); } } term.corr = nldens.cov(); // For report term.sd = sd; // For report } else if (term.blockCode == ar1_covstruct){ // case: ar1_covstruct // * NOTE: Valid parameter space is phi in [-1, 1] // * NOTE: 'times' not used as we assume unit distance between consecutive time points. int n = term.blockSize; Type logsd = theta(0); Type corr_transf = theta(1); Type phi = corr_transf / sqrt(1.0 + pow(corr_transf, 2)); Type sd = exp(logsd); for(int j = 0; j < term.blockReps; j++){ ans -= dnorm(U(0, j), Type(0), sd, true); // Initialize if (do_simulate) { U(0, j) = rnorm(Type(0), sd); } for(int i=1; i::value) { // Disable AD for this part term.corr.resize(n,n); term.sd.resize(n); for(int i=0; i::value) { // Disable AD for this part term.corr.resize(n,n); term.sd.resize(n); for(int i=0; i dist = term.dist; if(! ( dist.cols() == n && dist.rows() == n ) ) error ("Dimension of distance matrix must equal blocksize."); // First parameter is sd Type sd = exp( theta(0) ); // Setup correlation matrix matrix corr(n,n); for(int i=0; i nldens(corr); density::SCALE_t > scnldens = density::SCALE(nldens, sd); for(int i = 0; i < term.blockReps; i++){ ans += scnldens(U.col(i)); if (do_simulate) { U.col(i) = sd * nldens.simulate(); } } term.corr = corr; // For report term.sd.resize(n); // For report term.sd.fill(sd); } else if (term.blockCode == rr_covstruct){ // case: reduced rank for(int i = 0; i < term.blockReps; i++){ ans -= dnorm(vector(U.col(i)), Type(0), 1, true).sum(); if (do_simulate) { U.col(i) = rnorm(U.rows(), Type(0), Type(1)); } } int p = term.blockSize; int nt = theta.size(); int rank = (2*p + 1 - (int)sqrt(pow(2.0*p + 1, 2) - 8*nt) ) / 2 ; matrix Lambda(p, rank); vector lam_diag = theta.head(rank); vector lam_lower = theta.tail(nt - rank); for (int j = 0; j < rank; j++){ for (int i = 0; i < p; i++){ if (j > i) Lambda(i, j) = 0; else if(i == j) Lambda(i, j) = lam_diag(j); else Lambda(i, j) = lam_lower(j*p - (j + 1)*j/2 + i - 1 - j); //Fills by column } } for(int i = 0; i < term.blockReps; i++){ vector usub = U.col(i).segment(0, rank); U.col(i) = Lambda * usub; } term.fact_load = Lambda; if(isDouble::value) { term.corr = Lambda * Lambda.transpose(); term.sd = term.corr.diagonal().array().sqrt(); term.corr.array() /= term.sd * term.sd.transpose(); } } else error("covStruct not implemented!"); return ans; } template Type allterms_nll(vector &u, vector theta, vector >& terms, bool do_simulate = false) { Type ans = 0; int upointer = 0; int tpointer = 0; int nr, np = 0, offset; for(int i=0; i < terms.size(); i++){ nr = terms(i).blockSize * terms(i).blockReps; // Note: 'blockNumTheta=0' ==> Same parameters as previous term. bool emptyTheta = ( terms(i).blockNumTheta == 0 ); offset = ( emptyTheta ? -np : 0 ); np = ( emptyTheta ? np : terms(i).blockNumTheta ); vector dim(2); dim << terms(i).blockSize, terms(i).blockReps; array useg( &u(upointer), dim); vector tseg = theta.segment(tpointer + offset, np); ans += termwise_nll(useg, tseg, terms(i), do_simulate); upointer += nr; tpointer += terms(i).blockNumTheta; } return ans; } template Type objective_function::operator() () { #ifdef _OPENMP this -> max_parallel_regions = omp_get_max_threads(); // std::cout << "OpenMP max_parallel_regions=" << this -> max_parallel_regions << "\n"; #else this -> max_parallel_regions = 1; // std::cout << "no OpenMP (max_parallel_regions=1)\n"; #endif DATA_MATRIX(X); bool sparseX = X.rows()==0 && X.cols()==0; DATA_SPARSE_MATRIX(Z); DATA_MATRIX(Xzi); bool sparseXzi = Xzi.rows()==0 && Xzi.cols()==0; DATA_SPARSE_MATRIX(Zzi); DATA_MATRIX(Xd); bool sparseXd = Xd.rows()==0 && Xd.cols()==0; DATA_VECTOR(yobs); DATA_VECTOR(size); //only used in binomial DATA_VECTOR(weights); DATA_VECTOR(offset); DATA_VECTOR(zioffset); DATA_VECTOR(doffset); // Define covariance structure for the conditional model DATA_STRUCT(terms, terms_t); // Define covariance structure for the zero inflation DATA_STRUCT(termszi, terms_t); // Parameters related to design matrices PARAMETER_VECTOR(beta); PARAMETER_VECTOR(betazi); PARAMETER_VECTOR(b); PARAMETER_VECTOR(bzi); PARAMETER_VECTOR(betad); // Joint vector of covariance parameters PARAMETER_VECTOR(theta); PARAMETER_VECTOR(thetazi); // Extra family specific parameters (e.g. tweedie) PARAMETER_VECTOR(thetaf); DATA_INTEGER(family); DATA_INTEGER(link); // Flags DATA_INTEGER(ziPredictCode); bool zi_flag = (betazi.size() > 0); DATA_INTEGER(doPredict); DATA_IVECTOR(whichPredict); // One-Step-Ahead (OSA) residuals DATA_VECTOR_INDICATOR(keep, yobs); // Joint negative log-likelihood Type jnll=0; // Random effects PARALLEL_REGION jnll += allterms_nll(b, theta, terms, this->do_simulate); PARALLEL_REGION jnll += allterms_nll(bzi, thetazi, termszi, this->do_simulate); // Linear predictor vector eta = Z * b + offset; if (!sparseX) { eta += X*beta; } else { DATA_SPARSE_MATRIX(XS); eta += XS*beta; } vector etazi = Zzi * bzi + zioffset; if (!sparseXzi) { etazi += Xzi*betazi; } else { DATA_SPARSE_MATRIX(XziS); etazi += XziS*betazi; } vector etad = doffset; if (!sparseXd) { etad += Xd*betad; } else { DATA_SPARSE_MATRIX(XdS); etad += XdS*betad; } // Apply link vector mu(eta.size()); for (int i = 0; i < mu.size(); i++) mu(i) = inverse_linkfun(eta(i), link); vector pz = invlogit(etazi); vector phi = exp(etad); // "zero-truncated" likelihood: ignore zeros in positive distributions // exact zero: use for positive distributions (Gamma, beta) #define zt_lik_zero(x,loglik_exp) (zi_flag && (x == Type(0)) ? -INFINITY : loglik_exp) // close to zero: use for count data (cf binomial()$initialize) #define zt_lik_nearzero(x,loglik_exp) (zi_flag && (x < Type(0.001)) ? -INFINITY : loglik_exp) // Observation likelihood Type s1, s2, s3, log_nzprob; Type tmp_loglik; for (int i=0; i < yobs.size(); i++) PARALLEL_REGION { if ( !glmmtmb::isNA(yobs(i)) ) { switch (family) { case gaussian_family: tmp_loglik = dnorm(yobs(i), mu(i), sqrt(phi(i)), true); SIMULATE{yobs(i) = rnorm(mu(i), sqrt(phi(i)));} break; case poisson_family: tmp_loglik = dpois(yobs(i), mu(i), true); SIMULATE{yobs(i) = rpois(mu(i));} break; case binomial_family: s1 = logit_inverse_linkfun(eta(i), link); // logit(p) tmp_loglik = dbinom_robust(yobs(i), size(i), s1, true); SIMULATE{yobs(i) = rbinom(size(i), mu(i));} break; case Gamma_family: s1 = phi(i); // shape s2 = mu(i) / phi(i); // scale tmp_loglik = zt_lik_zero(yobs(i),dgamma(yobs(i), s1, s2, true)); SIMULATE{yobs(i) = rgamma(s1, s2);} break; case beta_family: // parameterization after Ferrari and Cribari-Neto 2004, betareg package s1 = mu(i)*phi(i); s2 = (Type(1)-mu(i))*phi(i); tmp_loglik = zt_lik_zero(yobs(i),dbeta(yobs(i), s1, s2, true)); SIMULATE{yobs(i) = rbeta(s1, s2);} break; case betabinomial_family: // Transform to logit scale independent of link s3 = logit_inverse_linkfun(eta(i), link); // logit(p) // Was: s1 = mu(i) * phi(i); s1 = log_inverse_linkfun( s3, logit_link) + log(phi(i)); // s1 = log(mu*phi) // Was: s2 = (Type(1) - mu(i)) * phi(i); s2 = log_inverse_linkfun(-s3, logit_link) + log(phi(i)); // s2 = log((1-mu)*phi) tmp_loglik = glmmtmb::dbetabinom_robust(yobs(i), s1, s2, size(i), true); SIMULATE { yobs(i) = rbinom(size(i), rbeta(exp(s1), exp(s2)) ); } break; case nbinom1_family: case truncated_nbinom1_family: // Was: // s1 = mu(i); // s2 = mu(i) * (Type(1)+phi(i)); // (1+phi) guarantees that var >= mu // tmp_loglik = dnbinom2(yobs(i), s1, s2, true); s1 = log_inverse_linkfun(eta(i), link); // log(mu) s2 = s1 + etad(i) ; // log(var - mu) tmp_loglik = dnbinom_robust(yobs(i), s1, s2, true); if (family != truncated_nbinom1_family) { SIMULATE { s1 = mu(i); s2 = mu(i) * (Type(1)+phi(i)); // (1+phi) guarantees that var >= mu yobs(i) = rnbinom2(s1, s2); } } else { s3 = logspace_add( Type(0), etad(i) ); // log(1. + phi(i) log_nzprob = logspace_sub( Type(0), -mu(i) / phi(i) * s3 ); // 1-prob(0) tmp_loglik -= log_nzprob; tmp_loglik = zt_lik_nearzero(yobs(i), tmp_loglik); SIMULATE{ s1 = mu(i)/phi(i); //sz yobs(i) = glmmtmb::rtruncated_nbinom(asDouble(s1), 0, asDouble(mu(i))); } } break; case nbinom2_family: case truncated_nbinom2_family: s1 = log_inverse_linkfun(eta(i), link); // log(mu) s2 = 2. * s1 - etad(i) ; // log(var - mu) tmp_loglik = dnbinom_robust(yobs(i), s1, s2, true); SIMULATE { s1 = mu(i); s2 = mu(i) * (Type(1) + mu(i) / phi(i)); yobs(i) = rnbinom2(s1, s2); } if (family == truncated_nbinom2_family) { // s3 := log( 1. + mu(i) / phi(i) ) s3 = logspace_add( Type(0), s1 - etad(i) ); log_nzprob = logspace_sub( Type(0), -phi(i) * s3 ); tmp_loglik -= log_nzprob; tmp_loglik = zt_lik_nearzero( yobs(i), tmp_loglik); SIMULATE{ yobs(i) = glmmtmb::rtruncated_nbinom(asDouble(phi(i)), 0, asDouble(mu(i))); } } break; case truncated_poisson_family: log_nzprob = logspace_sub(Type(0), -mu(i)); // log(1-exp(-mu(i))) = P(x>0) tmp_loglik = dpois(yobs(i), mu(i), true) - log_nzprob; tmp_loglik = zt_lik_nearzero(yobs(i), tmp_loglik); SIMULATE{ yobs(i) = glmmtmb::rtruncated_poisson(0, asDouble(mu(i))); } break; case genpois_family: s1 = mu(i) / sqrt(phi(i)); //theta s2 = Type(1) - Type(1)/sqrt(phi(i)); //lambda tmp_loglik = glmmtmb::dgenpois(yobs(i), s1, s2, true); SIMULATE{yobs(i)=glmmtmb::rgenpois(mu(i) / sqrt(phi(i)), Type(1) - Type(1)/sqrt(phi(i)));} break; case truncated_genpois_family: s1 = mu(i) / sqrt(phi(i)); //theta s2 = Type(1) - Type(1)/sqrt(phi(i)); //lambda log_nzprob = logspace_sub(Type(0), -s1); tmp_loglik = zt_lik_nearzero(yobs(i), glmmtmb::dgenpois(yobs(i), s1, s2, true) - log_nzprob); SIMULATE{yobs(i)=glmmtmb::rtruncated_genpois(mu(i) / sqrt(phi(i)), Type(1) - Type(1)/sqrt(phi(i)));} break; case compois_family: s1 = mu(i); //mean s2 = 1/phi(i); //nu tmp_loglik = dcompois2(yobs(i), s1, s2, true); SIMULATE{yobs(i)=rcompois2(mu(i), 1/phi(i));} break; case truncated_compois_family: s1 = mu(i); //mean s2 = 1/phi(i); //nu log_nzprob = logspace_sub(Type(0), dcompois2(Type(0), s1, s2, true)); tmp_loglik = zt_lik_nearzero(yobs(i), dcompois2(yobs(i), s1, s2, true) - log_nzprob); SIMULATE{yobs(i)=glmmtmb::rtruncated_compois2(mu(i), 1/phi(i));} break; case tweedie_family: s1 = mu(i); // mean s2 = phi(i); // phi s3 = invlogit(thetaf(0)) + Type(1); // p, 1 > corr(terms.size()); vector > sd(terms.size()); for(int i=0; i 0){ corr(i) = terms(i).corr; sd(i) = terms(i).sd; } } vector > corrzi(termszi.size()); vector > sdzi(termszi.size()); for(int i=0; i 0){ corrzi(i) = termszi(i).corr; sdzi(i) = termszi(i).sd; } } vector > fact_load(terms.size()); for(int i=0; i 0){ fact_load(i) = terms(i).fact_load; } } REPORT(corr); REPORT(sd); REPORT(corrzi); REPORT(sdzi); REPORT(fact_load); SIMULATE { REPORT(yobs); REPORT(b); REPORT(bzi); } // For predict if(ziPredictCode==disp_zipredictcode){ //zi irrelevant; just reusing variable switch(family){ case gaussian_family: mu = sqrt(phi); break; case Gamma_family: mu = 1/sqrt(phi); break; default: mu = phi; } } else { if(zi_flag) { switch(ziPredictCode){ case corrected_zipredictcode: mu *= (Type(1) - pz); // Account for zi in prediction break; case uncorrected_zipredictcode: //mu = mu; // Predict mean of 'family' //comented out for clang 7.0.0. with no effect break; case prob_zipredictcode: mu = pz; // Predict zi probability eta = etazi; // want to return linear pred for zi break; default: error("Invalid 'ziPredictCode'"); } }} whichPredict -= 1; // R-index -> C-index vector mu_predict = mu(whichPredict); vector eta_predict = eta(whichPredict); REPORT(mu_predict); REPORT(eta_predict); // ADREPORT expensive for long vectors - only needed by predict() method if (doPredict==1) { ADREPORT(mu_predict); } else if (doPredict == 2) { ADREPORT(eta_predict); } return jnll; } glmmTMB/src/Makevars0000644000176200001440000000055314120405266014037 0ustar liggesusersPKG_LIBS = $(SHLIB_OPENMP_CXXFLAGS) PKG_CXXFLAGS=$(SHLIB_OPENMP_CXXFLAGS) all: $(SHLIB) pkgver pkgver: "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "cat(as.character(packageVersion('TMB')), '\n', sep="", file='../inst/TMB-version')" "$(R_HOME)/bin$(R_ARCH_BIN)/Rscript" -e "cat(as.character(packageVersion('Matrix')), '\n', sep="", file='../inst/Matrix-version')" glmmTMB/src/init.h0000644000176200001440000000257514070567426013500 0ustar liggesusers#include #include /* FIXME: Won't be needed in upcoming TMB versions */ #ifndef TMB_CALLDEFS #define TMB_CALLDEFS \ {"MakeADFunObject", (DL_FUNC) &MakeADFunObject, 4}, \ {"InfoADFunObject", (DL_FUNC) &InfoADFunObject, 1}, \ {"EvalADFunObject", (DL_FUNC) &EvalADFunObject, 3}, \ {"MakeDoubleFunObject", (DL_FUNC) &MakeDoubleFunObject, 3}, \ {"EvalDoubleFunObject", (DL_FUNC) &EvalDoubleFunObject, 3}, \ {"getParameterOrder", (DL_FUNC) &getParameterOrder, 3}, \ {"MakeADGradObject", (DL_FUNC) &MakeADGradObject, 3}, \ {"MakeADHessObject2", (DL_FUNC) &MakeADHessObject2, 4}, \ {"usingAtomics", (DL_FUNC) &usingAtomics, 0}, \ {"TMBconfig", (DL_FUNC) &TMBconfig, 2} #endif #define CALLDEF(name, n) {#name, (DL_FUNC) &name, n} extern "C" { SEXP compois_calc_var(SEXP mean, SEXP nu); SEXP omp_check(); SEXP omp_num_threads(SEXP); const static R_CallMethodDef R_CallDef[] = { TMB_CALLDEFS, CALLDEF(compois_calc_var, 2), CALLDEF(omp_check, 0), CALLDEF(omp_num_threads, 1), {NULL, NULL, 0} }; void R_init_glmmTMB(DllInfo *dll) { R_registerRoutines(dll, NULL, R_CallDef, NULL, NULL); R_useDynamicSymbols(dll, FALSE); #ifdef TMB_CCALLABLES TMB_CCALLABLES("glmmTMB"); #endif } } glmmTMB/src/utils.cpp0000644000176200001440000000112314120405266014201 0ustar liggesusers/* as per BDR/CRAN, system headers must come before R headers */ # ifdef _OPENMP #include # endif # include # include /* check if openmp is enabled */ extern "C" SEXP omp_check(void) { #ifdef _OPENMP return ScalarLogical(1); #else return ScalarLogical(0); #endif } /* openmp controller */ extern "C" SEXP omp_num_threads(SEXP x) { #ifdef _OPENMP if( !isNull(x) ){ int n = INTEGER(x)[0]; omp_set_num_threads( n ); } return ScalarInteger( omp_get_max_threads() ); #else warning("OpenMP not supported."); return ScalarInteger( 0 ); #endif } glmmTMB/src/distrib.h0000644000176200001440000002314114070567426014165 0ustar liggesusers// additional distributions etc. for glmmTMB // FIXME: check proper syntax for including namespace glmmtmb{ /* Not used anymore: */ template Type dbetabinom(Type y, Type a, Type b, Type n, int give_log=0) { /* Wikipedia: f(k|n,\alpha,\beta) = \frac{\Gamma(n+1)}{\Gamma(k+1)\Gamma(n-k+1)} \frac{\Gamma(k+\alpha)\Gamma(n-k+\beta)}{\Gamma(n+\alpha+\beta)} \frac{\Gamma(\alpha+\beta)}{\Gamma(\alpha)\Gamma(\beta)} */ Type logres = lgamma(n + 1) - lgamma(y + 1) - lgamma(n - y + 1) + lgamma(y + a) + lgamma(n - y + b) - lgamma(n + a + b) + lgamma(a + b) - lgamma(a) - lgamma(b) ; if(!give_log) return exp(logres); else return logres; } /* R: > identical(lgamma(exp(-150)), 150) [1] TRUE FIXME: Move 'logspace_gamma' to TMB. */ namespace adaptive { template T logspace_gamma(const T &x) { /* Tradeoff: The smaller x the better approximation *but* the higher risk of psigamma() overflow */ if (x < -150) return -x; else return lgamma(exp(x)); } } TMB_BIND_ATOMIC(logspace_gamma, 1, adaptive::logspace_gamma(x[0])) template Type logspace_gamma(Type x) { CppAD::vector args(2); // Last index reserved for derivative order args[0] = x; args[1] = 0; return logspace_gamma(args)[0]; } template Type dbetabinom_robust(Type y, Type loga, Type logb, Type n, int give_log=0) { Type a = exp(loga), b = exp(logb); Type logy = log(y), lognmy = log(n - y); // May be -Inf Type logres = lgamma(n + 1) - lgamma(y + 1) - lgamma(n - y + 1) + logspace_gamma(logspace_add(logy, loga)) + logspace_gamma(logspace_add(lognmy, logb)) - lgamma(n + a + b) + lgamma(a + b) - logspace_gamma(loga) - logspace_gamma(logb); if(!give_log) return exp(logres); else return logres; } template Type dgenpois(Type y, Type theta, Type lambda, int give_log=0) { /* f(y|\theta,\lambda) = \frac{\theta(theta+\lambda y)^{y-1}e^{-\theta-\lambda y}}{y \!} */ Type logres = log(theta) + (y - 1) * log(theta + lambda * y) - theta - lambda * y - lgamma(y + Type(1)); if(!give_log) return exp(logres); else return logres; } // from C. Geyer aster package, src/raster.c l. 175 // Simulate from truncated poisson // see https://cran.r-project.org/web/packages/aster/vignettes/trunc.pdf for technical/mathematical details // k is the truncation point (e.g. k=0 -> 0-truncated) // MODIFICATIONS: change die() to throw std::range_error() double rtruncated_poisson(int k, double mu) { int m; double mdoub; if (mu <= 0.0) throw std::range_error("non-positive mu in k-truncated-poisson simulator\n"); if (k < 0) throw std::range_error("negative k in k-truncated-poisson simulator\n"); mdoub = k + 1 - mu; if (mdoub < 0.0) mdoub = 0.0; m = mdoub; if (m < mdoub) m = m + 1; /* since mu > 0.0 we have 0.0 <= mdoub < k + 1 hence 0 <= m <= k + 1 */ for (;;) { double x = rpois(mu) + m; if (m > 0) { double a = 1.0; int j; double u = unif_rand(); for (j = 0; j < m; ++j) a *= (k + 1 - j) / (x - j); if (u < a && x > k) return x; } else { if (x > k) return x; } } } // rtruncpois // alpha = size (dispersion param), k = truncation point, mu = mean double rtruncated_nbinom(double alpha, int k, double mu) { int m; double mdoub; double p = alpha / (mu + alpha); double q = mu / (mu + alpha); if (alpha <= 0.0) throw std::range_error("non-positive size in k-truncated-neg-bin simulator\n"); if (mu <= 0.0) throw std::range_error("non-positive mu in k-truncated-neg-bin simulator\n"); if (k < 0) throw std::range_error("negative k in k-truncated-neg-bin simulator\n"); mdoub = (k + 1.0) * p - alpha * q; if (mdoub < 0.0) mdoub = 0.0; m = mdoub; if (m < mdoub) m = m + 1; /* since p < 1.0 and q > 0.0 we have 0.0 <= mdoub < k + 1 hence 0 <= m <= k + 1 */ for (;;) { double x = rnbinom(alpha + m, p) + m; if (m > 0) { double a = 1.0; int j; double u = unif_rand(); for (j = 0; j < m; ++j) a *= (k + 1 - j) / (x - j); if (u < a && x > k) return x; } else { if (x > k) return x; } } } // rtruncated_nbinom /* Simulate from generalized poisson distribution */ template Type rgenpois(Type theta, Type lambda) { // Copied from R function HMMpa::rgenpois Type ans = Type(0); Type random_number = runif(Type(0), Type(1)); Type kum = dgenpois(Type(0), theta, lambda); while (random_number > kum) { ans = ans + Type(1); kum += dgenpois(ans, theta, lambda); } return ans; } /* Simulate from zero-truncated generalized poisson distribution */ template Type rtruncated_genpois(Type theta, Type lambda) { int nloop = 10000; int counter = 0; Type ans = rgenpois(theta, lambda); while(ans < Type(1) && counter < nloop) { ans = rgenpois(theta, lambda); counter++; } if(ans < 1.) warning("Zeros in simulation of zero-truncated data. Possibly due to low estimated mean."); return ans; } extern "C" { /* See 'R-API: entry points to C-code' (Writing R-extensions) */ double Rf_logspace_sub (double logx, double logy); void Rf_pnorm_both(double x, double *cum, double *ccum, int i_tail, int log_p); } /* y(x) = logit_invcloglog(x) := log( exp(exp(x)) - 1 ) = logspace_sub( exp(x), 0 ) y'(x) = exp(x) + exp(x-y) = exp( logspace_add(x, x-y) ) */ TMB_ATOMIC_VECTOR_FUNCTION( // ATOMIC_NAME logit_invcloglog , // OUTPUT_DIM 1, // ATOMIC_DOUBLE ty[0] = Rf_logspace_sub(exp(tx[0]), 0.); , // ATOMIC_REVERSE px[0] = exp( logspace_add(tx[0], tx[0]-ty[0]) ) * py[0]; ) template Type logit_invcloglog(Type x) { CppAD::vector tx(1); tx[0] = x; return logit_invcloglog(tx)[0]; } /* y(x) = logit_pnorm(x) := logit( pnorm(x) ) = pnorm(x, lower.tail=TRUE, log.p=TRUE) - pnorm(x, lower.tail=FALSE, log.p=TRUE) y'(x) = dnorm(x) * ( (1+exp(y)) + (1+exp(-y)) ) */ double logit_pnorm(double x) { double log_p_lower, log_p_upper; Rf_pnorm_both(x, &log_p_lower, &log_p_upper, 2 /* both tails */, 1 /* log_p */); return log_p_lower - log_p_upper; } TMB_ATOMIC_VECTOR_FUNCTION( // ATOMIC_NAME logit_pnorm , // OUTPUT_DIM 1, // ATOMIC_DOUBLE ty[0] = logit_pnorm(tx[0]) , // ATOMIC_REVERSE Type zero = 0; Type tmp1 = logspace_add(zero, ty[0]); Type tmp2 = logspace_add(zero, -ty[0]); Type tmp3 = logspace_add(tmp1, tmp2); Type tmp4 = dnorm(tx[0], Type(0), Type(1), true) + tmp3; px[0] = exp( tmp4 ) * py[0]; ) template Type logit_pnorm(Type x) { CppAD::vector tx(1); tx[0] = x; return logit_pnorm(tx)[0]; } /* Calculate variance in compois family using V(X) = (logZ)''(loglambda) */ double compois_calc_var(double mean, double nu){ using atomic::compois_utils::calc_loglambda; using atomic::compois_utils::calc_logZ; double loglambda = calc_loglambda(log(mean), nu); typedef atomic::tiny_ad::variable<2, 1, double> ADdouble; ADdouble loglambda_ (loglambda, 0); ADdouble ans = calc_logZ(loglambda_, nu); return ans.getDeriv()[0]; } /* Simulate from zero-truncated Conway-Maxwell-Poisson distribution */ template Type rtruncated_compois2(Type mean, Type nu) { int nloop = 10000; int counter = 0; Type ans = rcompois2(mean, nu); while(ans < 1. && counter < nloop) { ans = rcompois2(mean, nu); counter++; } if(ans < 1.) warning("Zeros in simulation of zero-truncated data. Possibly due to low estimated mean."); return ans; } /* Simulate from tweedie distribution */ template Type rtweedie(Type mu, Type phi, Type p) { // Copied from R function tweedie::rtweedie Type lambda = pow(mu, 2. - p) / (phi * (2. - p)); Type alpha = (2. - p) / (1. - p); Type gam = phi * (p - 1.) * pow(mu, p - 1.); int N = (int) asDouble(rpois(lambda)); Type ans = rgamma(N, -alpha /* shape */, gam /* scale */).sum(); return ans; } } // namespace glmmtmb /* Interface to compois variance */ extern "C" { SEXP compois_calc_var(SEXP mean, SEXP nu) { if (LENGTH(mean) != LENGTH(nu)) error("'mean' and 'nu' must be vectors of same length."); SEXP ans = PROTECT(allocVector(REALSXP, LENGTH(mean))); for(int i=0; ioAlS|;ͽ`pb.ٓUzNt!?%JfCr]}BL<36(`pP4A/Eb ZC ByL _hppgj_%>5o'f488Y@gQ~^^bn*k`j"p2A+Mhf^\)%ziE 0KglmmTMB/vignettes/parallel.Rmd0000644000176200001440000000720014120405266016020 0ustar liggesusers--- title: "Parallel optimization using glmmTMB" author: "Nafis Sadat" date: "`r Sys.Date()`" output: rmarkdown::html_vignette vignette: > %\VignetteIndexEntry{Parallel optimization using glmmTMB} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} --- A new, experimental feature of `glmmTMB` is the ability to parallelize the optimization process. This vignette shows an example and timing of a simple model fit with and without parallelizing across threads. If your OS supports OpenMP parallelization and R was installed using OpenMP, `glmmTMB` will automatically pick up the OpenMP flags from R's `Makevars` and compile the C++ model with OpenMP support. If the flag is not available, then the model will be compiled with serial optimization only. ```{r setup, include=FALSE, message=FALSE} library(knitr) opts_chunk$set(eval = identical(Sys.getenv("NOT_CRAN"), "true")) ``` Load packages: ```{r libs,message=FALSE, eval=TRUE} library(glmmTMB) set.seed(1) nt <- min(parallel::detectCores(),5) ``` Simulate a dataset with large `N`: ```{r simulate1} N <- 3e5 xdata <- rnorm(N, 1, 2) ydata <- 0.3 + 0.4*xdata + rnorm(N, 0, 0.25) ``` First, we fit the model serially. We can pass the number of parallelizing process we want using the `parallel` parameter in `glmmTMBcontrol`: ```{r fit1} system.time( model1 <- glmmTMB(formula = ydata ~ 1 + xdata, control = glmmTMBControl(parallel = 1)) ) ``` Now, we fit the same model using five threads (or as many as possible - `r nt` in this case): ```{r fit2} system.time( model2 <- glmmTMB(formula = ydata ~ 1 + xdata, control = glmmTMBControl(parallel = nt)) ) ``` The speed-up is definitely more visible on models with a much larger number of observations, or in models with random effects. Here's an example where we have an IID Gaussian random effect. We first simulate the data with 200 groups (our random effect): ```{r simulate2} xdata <- rnorm(N, 1, 2) groups <- 200 data_use <- data.frame(obs = 1:N) data_use <- within(data_use, { group_var <- rep(seq(groups), times = nrow(data_use) / groups) group_intercept <- rnorm(groups, 0, 0.1)[group_var] xdata <- xdata ydata <- 0.3 + group_intercept + 0.5*xdata + rnorm(N, 0, 0.25) }) ``` We fit the random effect model, first with a single thread: ```{r fit3} (t_serial <- system.time( model3 <- glmmTMB(formula = ydata ~ 1 + xdata + (1 | group_var), data = data_use, control = glmmTMBControl(parallel = 1)) ) ) ``` Now we fit the same model, but using `r nt` threads. The speed-up is more noticeable with this model. ```{r fit4} (t_parallel <- system.time( update(model3, control = glmmTMBControl(parallel = nt)) ) ) ``` ## Notes on OpenMP support From [Writing R Extensions](https://cran.r-project.org/doc/manuals/r-devel/R-exts.html#OpenMP-support): > Apple builds of clang on macOS currently have no OpenMP support, but CRAN binary packages are built with a clang-based toolchain which supports OpenMP. https://www.openmp.org/resources/openmp-compilers-tools/ gives some idea of what compilers support what versions. > The performance of OpenMP varies substantially between platforms. The Windows implementation has substantial overheads, so is only beneficial if quite substantial tasks are run in parallel. Also, on Windows new threads are started with the default FPU control word, so computations done on OpenMP threads will not make use of extended-precision arithmetic which is the default for the main process. ## System information This report was built using `r nt` parallel threads (on a machine with a total of `r parallel::detectCores()` cores) ```{r SI} print(sessionInfo(), locale=FALSE) ``` glmmTMB/vignettes/covstruct.rmd0000644000176200001440000005743014070567426016345 0ustar liggesusers--- title: "Covariance structures with glmmTMB" author: "Kasper Kristensen and Maeve McGillycuddy" date: "`r Sys.Date()`" output: rmarkdown::html_vignette bibliography: glmmTMB.bib vignette: > %\VignetteIndexEntry{Covariance structures with glmmTMB} %\VignettePackage{glmmTMB} %\VignetteEngine{knitr::rmarkdown} \usepackage[utf8]{inputenc} params: EVAL: !r identical(Sys.getenv("NOT_CRAN"), "true") --- ```{r setup, include=FALSE, message=FALSE} library(knitr) library(glmmTMB) library(MASS) ## for mvrnorm() library(TMB) ## for tmbprofile() library(mvabund) ## for spider data ## devtools::install_github("kaskr/adcomp/TMB") ## get development version knitr::opts_chunk$set(echo = TRUE, eval=if (exists("params")) params$EVAL else FALSE) do_image <- exists("params") && params$EVAL ## want to *store* images within package save_vig_dir <- file.path("inst","vignette_data") pkg_dir <- "glmmTMB" ## guess where we are ... if (grepl("/vignettes$",getwd())) { ## in vignettes dir save_vig_dir <- file.path("../",save_vig_dir) } else if (grepl(paste0("/",pkg_dir,"$"),getwd())) { ## in repo head save_vig_dir <- file.path(pkg_dir,save_vig_dir) } ## want to *retrieve* images from system files use_vig_dir <- system.file("vignette_data",package="glmmTMB") mkfig <- function(expr,fn) { png(normalizePath(file.path(save_vig_dir,fn))) eval(substitute(expr)) invisible(dev.off()) } usefig <- function(fn) { knitr::include_graphics(file.path(use_vig_dir,fn)) } ## turned off caching for now: got error in chunk 'fit.us.2' ## Error in retape() : ## Error when reading the variable: 'thetaf'. Please check data and parameters. ## In addition: Warning message: ## In retape() : Expected object. Got NULL. set.seed(1) ## run this in interactive session if you actually want to evaluate chunks ... ## Sys.setenv(NOT_CRAN="true") ``` This vignette demonstrates some of the covariance structures available in the `glmmTMB` package. Currently the available covariance structures are: | Covariance | Notation | Parameter count | Requirement | |----------------------------------|---------------|-----------------|-------------| | Heterogeneous unstructured | `us` | $n(n+1)/2$ | | | Heterogeneous Toeplitz | `toep` | $2n-1$ | | | Heterogeneous compound symmetry | `cs` | $n+1$ | | | Heterogeneous diagonal | `diag` | $n$ | | | AR(1) | `ar1` | $2$ | Unit spaced levels | | Ornstein-Uhlenbeck | `ou` | $2$ | Coordinates | | Spatial exponential | `exp` | $2$ | Coordinates | | Spatial Gaussian | `gau` | $2$ | Coordinates | | Spatial Matern | `mat` | $3$ | Coordinates | | Reduced rank | `rr` | $nd-d(d-1)/2$ | rank (d) | The word 'heterogeneous' refers to the marginal variances of the model. Beyond correlation parameters, a heterogeneous structure uses $n$ additional variance parameters where $n$ is the dimension. Some of the structures require temporal or spatial coordinates. We will show examples of this in a later section. ## The AR(1) covariance structure ### Demonstration on simulated data First, let's consider a simple time series model. Assume that our measurements $Y(t)$ are given at discrete times $t \in \{1,...,n\}$ by $$Y(t) = \mu + X(t) + \varepsilon(t)$$ where - $\mu$ is the mean value parameter. - $X(t)$ is a stationary AR(1) process, i.e. has covariance $cov(X(s), X(t)) = \sigma^2\exp(-\theta |t-s|)$. - $\varepsilon(t)$ is iid. $N(0,\sigma_0^2)$ measurement error. A simulation experiment is set up using the parameters | Description | Parameter | Value | |------------------------|---------------|-------| | Mean | $\mu$ | 0 | | Process variance | $\sigma^2$ | 1 | | Measurement variance | $\sigma_0^2$ | 1 | | One-step correlation | $e^{-\theta}$ | 0.7 | The following R-code draws a simulation based on these parameter values. For illustration purposes we consider a very short time series. ```{r sim1, eval=TRUE} n <- 6 ## Number of time points x <- mvrnorm(mu = rep(0,n), Sigma = .7 ^ as.matrix(dist(1:n)) ) ## Simulate the process using the MASS package y <- x + rnorm(n) ## Add measurement noise ``` In order to fit the model with `glmmTMB` we must first specify a time variable as a *factor*. The factor *levels* correspond to unit spaced time points. It is a common mistake to forget some factor levels due to missing data or to order the levels incorrectly. We therefore recommend to construct factors with explicit levels, using the `levels` argument to the `factor` function: ```{r simtimes} times <- factor(1:n, levels=1:n) levels(times) ``` We also need a grouping variable. In the current case there is only one time-series so the grouping is: ```{r simgroup} group <- factor(rep(1,n)) ``` We combine the data into a single data frame (not absolutely required, but good practice): ```{r simcomb} dat0 <- data.frame(y,times,group) ``` Now fit the model using ```{r fitar1, eval=FALSE} glmmTMB(y ~ ar1(times + 0 | group), data=dat0) ``` This formula notation follows that of the `lme4` package. - The left hand side of the bar `times + 0` corresponds to a design matrix $Z$ linking observation vector $y$ (rows) with a random effects vector $u$ (columns). - The distribution of $u$ is `ar1` (this is the only `glmmTMB` specific part of the formula). - The right hand side of the bar splits the above specification independently among groups. Each group has its own separate $u$ vector but shares the same parameters for the covariance structure. After running the model, we find the parameter estimates $\mu$ (intercept), $\sigma_0^2$ (dispersion), $\sigma$ (Std. Dev.) and $e^{-\theta}$ (First off-diagonal of "Corr") in the output: > FIXME: Try a longer time series when the print.VarCorr is fixed. ```{r ar0fit,echo=FALSE} glmmTMB(y ~ ar1(times + 0 | group), data=dat0) ``` ### Increasing the sample size A single time series of 6 time points is not sufficient to identify the parameters. We could either increase the length of the time series or increase the number of groups. We'll try the latter: ```{r simGroup} simGroup <- function(g, n=6, rho=0.7) { x <- mvrnorm(mu = rep(0,n), Sigma = rho ^ as.matrix(dist(1:n)) ) ## Simulate the process y <- x + rnorm(n) ## Add measurement noise times <- factor(1:n) group <- factor(rep(g,n)) data.frame(y, times, group) } simGroup(1) ``` Generate a dataset with 1000 groups: ```{r simGroup2} dat1 <- do.call("rbind", lapply(1:1000, simGroup) ) ``` And fitting the model on this larger dataset gives estimates close to the true values (AR standard deviation=1, residual (measurement) standard deviation=1, autocorrelation=0.7): ```{r fit.ar1} (fit.ar1 <- glmmTMB(y ~ ar1(times + 0 | group), data=dat1)) ``` ## The unstructured covariance We can try to fit an unstructured covariance to the previous dataset `dat`. For this case an unstructured covariance has `r (n*n-n)/2` correlation parameters and `r n` variance parameters. Adding $\sigma_0^2 I$ on top would cause a strict overparameterization, as these would be redundant with the diagonal elements in the covariance matrix. Hence, when fitting the model with `glmmTMB`, we have to disable the $\varepsilon$ term (the dispersion) by setting `dispformula=~0`: ```{r fit.us} fit.us <- glmmTMB(y ~ us(times + 0 | group), data=dat1, dispformula=~0) fit.us$sdr$pdHess ## Converged ? ``` The estimated variance and correlation parameters are: ```{r fit.us.vc} VarCorr(fit.us) ``` \newcommand{\textsub}[2]{#1_{{\text {#2}}}} The estimated correlation is approximately constant along diagonals (apparent Toeplitz structure) and we note that the first off-diagonal is now ca. half the true value (0.7) because the dispersion is effectively included in the estimated covariance matrix (i.e. $\rho' = \rho \textsub{\sigma^2}{AR}/(\textsub{\sigma^2}{AR} + \textsub{\sigma^2}{meas})$). ## The Toeplitz structure The next natural step would be to reduce the number of parameters by collecting correlation parameters within the same off-diagonal. This amounts to `r (n-1)` correlation parameters and `r n` variance parameters. > FIXME: Explain why dispformula=~1 causes over-parameterization ```{r fit.toep} fit.toep <- glmmTMB(y ~ toep(times + 0 | group), data=dat1, dispformula=~0) fit.toep$sdr$pdHess ## Converged ? ``` The estimated variance and correlation parameters are: ```{r fit.toep.vc} (vc.toep <- VarCorr(fit.toep)) ``` The diagonal elements are all approximately equal to the true total variance ($\textsub{\sigma^2}{AR} + \textsub{\sigma^2}{meas}$=2), and the off-diagonal elements are approximately equal to the expected value of 0.7/2=0.35. ```{r fit.toep.vc.diag} vc1 <- vc.toep$cond[[1]] ## first term of var-cov for RE of conditional model summary(diag(vc1)) summary(vc1[row(vc1)!=col(vc1)]) ``` We can get a *slightly* better estimate of the variance by using REML estimation (however, the estimate of the correlations seems to have gotten slightly worse): ```{r fit.toep.reml} fit.toep.reml <- update(fit.toep, REML=TRUE) vc1R <- VarCorr(fit.toep.reml)$cond[[1]] summary(diag(vc1R)) summary(vc1R[row(vc1R)!=col(vc1R)]) ``` ## Compound symmetry The compound symmetry structure collects all off-diagonal elements of the correlation matrix to one common value. > FIXME: Explain why dispformula=~1 causes over-parameterization ```{r fit.cs} fit.cs <- glmmTMB(y ~ cs(times + 0 | group), data=dat1, dispformula=~0) fit.cs$sdr$pdHess ## Converged ? ``` The estimated variance and correlation parameters are: ```{r fit.cs.vc} VarCorr(fit.cs) ``` ## Anova tables The models `ar1`, `toep`, and `us` are nested so we can use: ```{r anova1} anova(fit.ar1, fit.toep, fit.us) ``` `ar1` has the lowest AIC (it's the simplest model, and fits the data adequately); we can't reject the (true in this case!) null model that an AR1 structure is adequate to describe the data. The model `cs` is a sub-model of `toep`: ```{r anova2} anova(fit.cs, fit.toep) ``` Here we *can* reject the null hypothesis of compound symmetry (i.e., that all the pairwise correlations are the same). ## Adding coordinate information Coordinate information can be added to a variable using the `glmmTMB` function `numFactor`. This is necessary in order to use those covariance structures that require coordinates. For example, if we have the numeric coordinates ```{r sample2} x <- sample(1:2, 10, replace=TRUE) y <- sample(1:2, 10, replace=TRUE) ``` we can generate a factor representing $(x,y)$ coordinates by ```{r numFactor} (pos <- numFactor(x,y)) ``` Numeric coordinates can be recovered from the factor levels: ```{r parseNumLevels} parseNumLevels(levels(pos)) ``` In order to try the remaining structures on our test data we re-interpret the time factor using `numFactor`: ```{r numFactor2} dat1$times <- numFactor(dat1$times) levels(dat1$times) ``` ## Ornstein–Uhlenbeck Having the numeric times encoded in the factor levels we can now try the Ornstein–Uhlenbeck covariance structure. ```{r fit.ou} fit.ou <- glmmTMB(y ~ ou(times + 0 | group), data=dat1) fit.ou$sdr$pdHess ## Converged ? ``` It should give the exact same results as `ar1` in this case since the times are equidistant: ```{r fit.ou.vc} VarCorr(fit.ou) ``` However, note the differences between `ou` and `ar1`: - `ou` can handle irregular time points. - `ou` only allows positive correlation between neighboring time points. ## Spatial correlations The structures `exp`, `gau` and `mat` are meant to used for spatial data. They all require a Euclidean distance matrix which is calculated internally based on the coordinates. Here, we will try these models on the simulated time series data. An example with spatial data is presented in a later section. ### Matern ```{r fit.mat} fit.mat <- glmmTMB(y ~ mat(times + 0 | group), data=dat1, dispformula=~0) fit.mat$sdr$pdHess ## Converged ? ``` ```{r fit.mat.vc} VarCorr(fit.mat) ``` ### Gaussian "Gaussian" refers here to a Gaussian decay in correlation with distance, i.e. $\rho = \exp(-d x^2)$, not to the conditional distribution ("family"). ```{r fit.gau} fit.gau <- glmmTMB(y ~ gau(times + 0 | group), data=dat1, dispformula=~0) fit.gau$sdr$pdHess ## Converged ? ``` ```{r fit.gau.vc} VarCorr(fit.gau) ``` ### Exponential ```{r fit.exp} fit.exp <- glmmTMB(y ~ exp(times + 0 | group), data=dat1) fit.exp$sdr$pdHess ## Converged ? ``` ```{r fit.exp.vc} VarCorr(fit.exp) ``` ### A spatial covariance example Starting out with the built in `volcano` dataset we reshape it to a `data.frame` with pixel intensity `z` and pixel position `x` and `y`: ```{r spatial_data} d <- data.frame(z = as.vector(volcano), x = as.vector(row(volcano)), y = as.vector(col(volcano))) ``` Next, add random normal noise to the pixel intensities and extract a small subset of 100 pixels. This is our spatial dataset: ```{r spatial_sub_sample} set.seed(1) d$z <- d$z + rnorm(length(volcano), sd=15) d <- d[sample(nrow(d), 100), ] ``` Display sampled noisy volcano data: ```{r volcano_data_image_fake,eval=FALSE} volcano.data <- array(NA, dim(volcano)) volcano.data[cbind(d$x, d$y)] <- d$z image(volcano.data, main="Spatial data", useRaster=TRUE) ``` ```{r volcano_data_image_real,echo=FALSE} if (do_image) { volcano.data <- array(NA, dim(volcano)) volcano.data[cbind(d$x, d$y)] <- d$z mkfig(image(volcano.data, main="Spatial data"),"volcano_data.png") } ``` ```{r volcano_image,eval=TRUE,echo=FALSE} usefig("volcano_data.png") ``` Based on this data, we'll attempt to re-construct the original image. As model, it is assumed that the original image `image(volcano)` is a realization of a random field with correlation decaying exponentially with distance between pixels. Denoting by $u(x,y)$ this random field the model for the observations is \[ z_{i} = \mu + u(x_i,y_i) + \varepsilon_i \] To fit the model, a `numFactor` and a dummy grouping variable must be added to the dataset: ```{r spatial_add_pos_and_group} d$pos <- numFactor(d$x, d$y) d$group <- factor(rep(1, nrow(d))) ``` The model is fit by ```{r fit_spatial_model, cache=TRUE} f <- glmmTMB(z ~ 1 + exp(pos + 0 | group), data=d) ``` Recall that a standard deviation `sd=15` was used to distort the image. A confidence interval for this parameter is ```{r confint_sigma} confint(f, "sigma") ``` The glmmTMB `predict` method can predict unseen levels of the random effects. For instance to predict a 3-by-3 corner of the image one could construct the new data: ```{r newdata_corner} newdata <- data.frame( pos=numFactor(expand.grid(x=1:3,y=1:3)) ) newdata$group <- factor(rep(1, nrow(newdata))) newdata ``` and predict using ```{r predict_corner} predict(f, newdata, type="response", allow.new.levels=TRUE) ``` A specific image column can thus be predicted using the function ```{r predict_column} predict_col <- function(i) { newdata <- data.frame( pos = numFactor(expand.grid(1:87,i))) newdata$group <- factor(rep(1,nrow(newdata))) predict(f, newdata=newdata, type="response", allow.new.levels=TRUE) } ``` Prediction of the entire image is carried out by (this takes a while...): ```{r predict_all} pred <- sapply(1:61, predict_col) ``` Finally plot the re-constructed image by ```{r image_results_fake,eval=FALSE} image(pred, main="Reconstruction") ``` ```{r image_results_real,echo=FALSE} if (do_image) { mkfig(image(pred, main="Reconstruction", useRaster=TRUE), "volcano_results.png") } ``` ```{r results_image,eval=TRUE,echo=FALSE} usefig("volcano_results.png") ``` ## Mappings For various advanced purposes, such as computing likelihood profiles, it is useful to know the details of the parameterization of the models - the scale on which the parameters are defined (e.g. standard deviation, variance, or log-standard deviation for variance parameters) and their order. ### Unstructured For an unstructured matrix of size `n`, parameters `1:n` represent the log-standard deviations while the remaining `n(n-1)/2` (i.e. `(n+1):(n:(n*(n+1)/2))`) are the elements of the *scaled* Cholesky factor of the correlation matrix, filled in row-wise order (see [TMB documentation](http://kaskr.github.io/adcomp/classUNSTRUCTURED__CORR__t.html)). In particular, if $L$ is the lower-triangular matrix with 1 on the diagonal and the correlation parameters in the lower triangle, then the correlation matrix is defined as $\Sigma = D^{-1/2} L L^\top D^{-1/2}$, where $D = \textrm{diag}(L L^\top)$. For a single correlation parameter $\theta_0$, this works out to $\rho = \theta_0/(1+\theta_0^2)$. ```{r fit.us.2} vv0 <- VarCorr(fit.us) vv1 <- vv0$cond$group ## extract 'naked' V-C matrix n <- nrow(vv1) rpars <- getME(fit.us,"theta") ## extract V-C parameters ## first n parameters are log-std devs: all.equal(unname(diag(vv1)),exp(rpars[1:n])^2) ## now try correlation parameters: cpars <- rpars[-(1:n)] length(cpars)==n*(n-1)/2 ## the expected number cc <- diag(n) cc[upper.tri(cc)] <- cpars L <- crossprod(cc) D <- diag(1/sqrt(diag(L))) round(D %*% L %*% D,3) round(unname(attr(vv1,"correlation")),3) ``` ```{r other_check} all.equal(c(cov2cor(vv1)),c(fit.us$obj$env$report(fit.us$fit$parfull)$corr[[1]])) ``` Profiling (experimental/exploratory): ```{r fit.us.profile,cache=TRUE} ## want $par, not $parfull: do NOT include conditional modes/'b' parameters ppar <- fit.us$fit$par length(ppar) range(which(names(ppar)=="theta")) ## the last n*(n+1)/2 parameters ## only 1 fixed effect parameter tt <- tmbprofile(fit.us$obj,2,trace=FALSE) ``` ```{r fit.us.profile.plot_fake,eval=FALSE} confint(tt) plot(tt) ``` ```{r fit.us.profile.plot_real,echo=FALSE} mkfig(plot(tt),"us_profile_plot.png") ``` ```{r us_profile_image,eval=TRUE,echo=FALSE} usefig("us_profile_plot.png") ``` ```{r fit.cs.profile,cache=TRUE} ppar <- fit.cs$fit$par length(ppar) range(which(names(ppar)=="theta")) ## the last n*(n+1)/2 parameters ## only 1 fixed effect parameter, 1 dispersion parameter tt2 <- tmbprofile(fit.cs$obj,3,trace=FALSE) ``` ```{r fit.cs.profile.plot_fake,eval=FALSE} plot(tt2) ``` ```{r fit.cs.profile.plot_real,echo=FALSE} mkfig(plot(tt2),"cs_profile_plot.png") ``` ```{r fit.cs.profile_image,echo=FALSE,eval=TRUE} usefig("cs_profile_plot.png") ``` ## General latent variable model Consider a generalized linear mixed model \begin{equation} g(\boldsymbol{\mu}) = \boldsymbol{X\beta} + \boldsymbol{Zb} \end{equation} where $g(.)$ is the link function; $\boldsymbol{\beta}$ is a p-dimensional vector of regression coefficients related to the covariates; $\boldsymbol{X}$ is an $n \times p$ model matrix; and $\boldsymbol{Z}$ is the $n\times q$ model matrix for the $q$-dimensional vector-valued random effects variable $\boldsymbol{U}$ which is multivariate normal with mean zero and a parameterized $q \times q$ variance-covariance matrix, $\boldsymbol{\Sigma}$, i.e., $\boldsymbol{U} \sim N(\boldsymbol{0}, \boldsymbol{\Sigma})$. A general latent variable model (GLVM) requires many fewer parameters for the variance-covariance matrix, $\boldsymbol{\Sigma}$. To a fit a GLVM we add a *reduced-rank* (rr) covariance structure, so the model becomes \begin{align} g(\boldsymbol{\mu}) &= \boldsymbol{X\beta} + \boldsymbol{Z(I_n \otimes \Lambda)b} \\ &= \boldsymbol{X\beta} + \boldsymbol{Zb_{new}} \end{align} where $\otimes$ is the Kronecker product and $\boldsymbol{\Lambda} = (\boldsymbol{\lambda_1}, \ldots, \boldsymbol{\lambda_d})'$ is the $q \times d$ matrix of factor loadings (with $d \ll q$). The upper triangular elements of $\boldsymbol{\Lambda}$ are set to be zero to ensure parameter identifiability. Here we assume that the latent variables follow a multivariate standard normal distribution, $\boldsymbol{b} \sim N(\boldsymbol{0}, \boldsymbol{I})$. For GLVMs it is important to select initial starting values for the parameters because the observed likelihood may be multimodal, and maximization algorithms can end up in local maxima. @niku2019gllvm describe methods to enable faster and more reliable fits of latent variable models by carefully choosing starting values of the parameters. A similar method has been implemented in `glmmTMB`. A generalized linear model is fitted to the data to obtain initial starting values for the fixed parameters in the model. Residuals from the fitted GLM are calculated; Dunn-Smyth residuals are calculated for common families while residuals from the `dev.resids()` function are used otherwise. Initial starting values for the latent variables and their loadings are obtained by fitting a reduced rank model to the residuals. ### Reduced rank One of our main motivations for adding this variance-covariance structure is to enable the analysis of multivariate abundance data, for example to model the abundance of different taxa across multiple sites. Typically an unstructured random effect is assumed to account for correlation between taxa; however the number of parameters required quickly becomes large with increasing numbers of taxa. A GLVM is a flexible and more parsimonious way to account for correlation so that one can fit a joint model across many taxa. A GLVM can be fit by specifying a reduced rank (`rr`) covariance structure. For example, the code for modeling the mean abundance against taxa and to account for the correlation between taxa using two latent variables is as follows ```{r rr_ex, eval = FALSE} if (require(mvabund)) { data(spider) ## organize data into long format sppTot <- sort(colSums(spider$abund), decreasing = TRUE) tmp <- cbind(spider$abund, spider$x) tmp$id <- 1:nrow(tmp) spiderDat <- reshape(tmp, idvar = "id", timevar = "Species", times = colnames(spider$abund), varying = list(colnames(spider$abund)), v.names = "abund", direction = "long") ## fit rank-reduced models with varying dimension fit_list <- lapply(2:10, function(d) { fit.rr <- glmmTMB(abund ~ Species + rr(Species + 0|id, d = d), data = spiderDat) }) ## compare fits via AIC aic_vec <- sapply(fit_list, AIC) aic_vec - min(aic_vec, na.rm = TRUE) ``` The left hand side of the bar `taxa + 0` corresponds to a factor loading matrix that accounts for the correlations among taxa. The right hand side of the bar splits the above specification independently among sites. The `d` is a non-negative integer (which defaults to 2). An option in `glmmTMBControl()` has been included to initialize the starting values for the parameters based on the approach mentioned above with the default set at `glmmTMBControl(start_method = list(method = NULL, jitter.sd = 0)`: - `method = "res"` initializes starting values from the results of fitting a GLM, and fitting a reduced rank model to the residuals to obtain starting values for the fixed coefficients, the latent variables and the factor loadings. - `jitter.sd` adds variation to the starting values of latent variables when `method = "res"` (default 0). For a reduced rank matrix of rank `d`, parameters `1:d` represent the diagonal factor loadings while the remaining `$nd-d(d-3)/2$`, (i.e. `(d+1):(nd-d(d-1)/2`) are the lower diagonal factor loadings filled in column-wise order. The factor loadings from a model can be obtained by `fit.rr$obj$env$report(fit.rr$fit$parfull)$fact_load[[1]]`. An appropriate rank for the model can be determined by standard model selection approaches such as information criteria (e.g. AIC or BIC) [@hui2015model]. ## References glmmTMB/vignettes/lineno.sty0000755000176200001440000045027214071363113015622 0ustar liggesusers \iffalse; awk '/S[H]ELL1/' lineno.sty|sh;exit; ... see bottom for .tex documentation ... Macro file lineno.sty for LaTeX: attach line numbers, refer to them. \fi \def\fileversion{v4.41} \def\filedate{2005/11/02} %VERSION %%% Copyright 1995--2003 Stephan I. B"ottcher ; %%% Copyright 2002--2005 Uwe L"uck, http://www.contact-ednotes.sty.de.vu %%% for version 4 and code from former Ednotes bundle %%% --author-maintained. %%% %%% This file can be redistributed and/or modified under %%% the terms of the LaTeX Project Public License; either %%% version 1.3a of the License, or any later version. %%% The latest version of this license is in %%% http://www.latex-project.org/lppl.txt %%% We did our best to help you, but there is NO WARRANTY. % %%% $Id: lineno.sty,v 3.14.2.2 2004/09/13 19:30:39 stephan Exp $ %% was v4.00. % \title{\texttt{\itshape %% %% (UL 2004/10/09:) Italic TT is evil %% %% ... or nice front page layout!? %% % lineno.sty \ \fileversion\ \filedate % \unskip}\\\ \\ % A \LaTeX\ package to attach % \\ line numbers to paragraphs % \unskip}\author{% % Stephan I. B\"ottcher % \\ Uwe L\"uck % \unskip}\date{% % boettcher@physik.uni-kiel.de % \\ http://contact-ednotes.sty.de.vu %% \\ stephan@nevis.columbia.edu %% \\ Stephan.Boettcher@cern.ch % \\} % % \documentclass[a4paper,12pt]{article}%D % \usepackage{lineno}%D %% %% (New v4.00) % \catcode`\_\active\let_~ %% %% Beware math!? (/New v4.00) % \def~{\verb~} % \let\lessthan< % \catcode`\<\active % \def<#1>{$\langle${\itshape#1}\/$\rangle$} % \catcode`\|\active %% (New v4.1: \tt star; in box anyway.) % \def|#1{\ttfamily\string#1} %% \def|#1{{\ttfamily\string#1}} %% (/New v4.1) % \newenvironment{code} % {\par\runninglinenumbers % \modulolinenumbers[1]% % \linenumbersep.3em % \footnotesize % \def\linenumberfont % {\normalfont\tiny\itshape}} % {} %% %% (New v4.00) % {\makeatletter \gdef\scs#1{\texttt % {\protect\@backslashchar#1}}} % \def\old{\par\footnotesize} %% %% (/New v4.00) %% %% (New v4.1) % {\catcode`\/\active % \gdef\path{\begingroup\catcode`\/\active % \let/\slash\dopath} % \gdef\dopath#1{\slash\unpenalty#1\endgroup}} %% %% (/New v4.1) % % \begin{document}%D %% \DocInput{lineno}%D % \pagewiselinenumbers % \maketitle % \pagestyle{headings} % \tableofcontents % \sloppy % %% %% New v4.00: `...section{%' + \unskip % \section{% % Introductions %% %% New v4.00: `s' % \unskip} % % (New v4.00) Parts of former first section % have been rendered separate subsections for package % version_v4.00. (/New v4.00) % % \subsection{% % Introduction to versions $\textrm{v}\lessthan4$ % \unskip} % % This package provides line numbers on paragraphs. % After \TeX\ has broken a paragraph into lines there will % be line numbers attached to them, with the possibility to % make references through the \LaTeX\ ~\ref~, ~\pageref~ % cross reference mechanism. This includes four issues: % \begin{itemize} % \item attach a line number on each line, % \item create references to a line number, % \item control line numbering mode, % \item count the lines and print the numbers. % \end{itemize} % The first two points are implemented through patches to % the output routine. The third by redefining ~\par~, ~\@par~ % and ~\@@par~. The counting is easy, as long as you want % the line numbers run through the text. If they shall % start over at the top of each page, the aux-file as well % as \TeX s memory have to carry a load for each counted line. % % I wrote this package for my wife Petra, who needs it for % transcriptions of interviews. This allows her to % precisely refer to passages in the text. It works well % together with ~\marginpar~s, but not too well with displaymath. % ~\footnote~s are a problem, especially when they % are split, but we may get there. % (New v4.00 UL) Version v4.00 overcomes the problem, I believe. % (/UL /New v4.00) % % lineno.sty works % surprisingly well with other packages, for % example, ~wrapfig.sty~. So please try if it % works with whatever you need, and if it does, % please tell me, and if it does not, tell me as % well, so I can try to fix it. % % \subsection{% % Introduction to versions v4.00ff. (UL) % \unskip} % % ~lineno.sty~ has been maintained by Stephan until version_v3.14. % From version_v4.00 onwards, maintenance is shifting towards % Uwe L\"uck (UL), who is the author of v4\dots code and of v4\dots % changes in documentation. This came about as follows. % % Since late 2002, Christian Tapp and Uwe L\"uck have employed % ~lineno.sty~ for their ~ednotes.sty~, a package supporting % critical editions---cf. % \[\mbox{\tt % http://ednotes.sty.de.vu % \unskip}\] % ---while you find ~ednotes.sty~ and surrounding files in % CTAN folder \path{macros/latex/contrib/ednotes}. % % Soon, some weaknesses of ~lineno.sty~ showed up, mainly since % Christian's critical editions (using ~ednotes.sty~) needed lots % of ~\linelabel~s and footnotes. (These weaknesses are due to % weaknesses of \LaTeX's ~\marginpar~ mechanism that Stephan % used for ~\linelabel~.) So we changed some ~lineno.sty~ % definitions in some extra files, which moreover offered new % features. We sent these files to Stephan, hoping he would take % the changes into ~lineno.sty~. However, he was too short of time. % % Writing a TUGboat article on Ednotes in 2004, we hoped to % reduce the number of files in the Ednotes bundle and so asked % Stephan again. Now he generously offered maintenance to me, so % I could execute the changes on my own. % % The improvements are as follows: % \begin{itemize}\item % [(i)] Footnotes placement approaches intentions better % (footnotes formerly liked to pile up at late pages). % \item % [(ii)] The number of ~\linelabel~s in one paragraph is no longer % limited to 18. % \item % [(iii)] ~\pagebreak~, ~\nopagebreak~, ~\vspace~, and the star % and optional versions of ~\\~ work as one would expect % (section_\ref{s:MVadj}). %% Added for v4.1 % \item % [(iv)] A command is offered which chooses the first line number % to be printed in the margin % (subsection_\ref{ss:Mod}). %% Added for v4.1 % \item % [(v)] (New v4.1) \LaTeX\ tabular environments (optionally) % get line numbers as well, and you can refer to them in the % usual automatic way. (It may be considered a shortcoming that, % precisely, \emph{rows} are numbered, not lines.---See % subsection_\ref{ss:Tab}.) % \item % [(vi)] We are moving towards referring to math items % (subsection_\ref{ss:MathRef} and the hooks in % subsection_\ref{ss:LL}). % (/New v4.1) % \end{itemize} % (Thanks to Stephan for making this possible!) % %% Unpublish: %% You may trace the earlier developments of these changes by %% requesting our files ~linenox0.sty~, ~linenox1.sty~, and %% ~lnopatch.sty~. Most of our changes have been in ~linenox0.sty~. %% Our ~linenox1.sty~ has extended ~linenox0.sty~ for one single %% purpose in a not very stable way. %%% (See ~\linenumberpar~ below). %% ~lnopatch.sty~ has done the first line number thing referred %% to in case_(iv) up to now. %% (New v4.1) %% Case_(v) earlier was provided by our ~edtab02.sty~---now %% called ~edtable.sty~. %% (/New v4.1) % % Ednotes moreover profits from Stephan's offer with regard % to the documentation of our code which yielded these % improvements formerly. This documentation now becomes % printable, being part of the ~lineno.sty~ documentation. % % Of course, Stephan's previous ~lineno.sty~ versions were a great % and ingenious work and exhibit greatest \TeX pertise. I never % could have done this. I learnt a lot in studying the code when % Christian pointed out strange output results and error % messages, and there are still large portions of ~lineno.sty~ % which I don't understand (consider only pagewise numbering of % lines). Fortunately, Stephan has offered future help if % needed.---My code for attaching line numbers to \emph{tabular % environments} (as mentioned above, now still in % ~edtable.sty~) %% %% TODO % developed from macros which Stephan and Christian experimented % with in December 2002. Stephan built the basics. % (However, I then became too proud to follow his advice only to % use and modify ~longtable.sty~.) % % There are some issues concerning use of counters on which I % don't agree with Stephan and where I would like to change the % code if ~lineno.sty~ is ``mine'' as Stephan offered. However, % Stephan is afraid of compatibility problems from which, in % particular, his wife could suffer in the near future. So he % demanded that I change as little as possible for my first % version. Instead of executing changes that I plan I just offer % my opinions at the single occasions. I hope to get in touch % this way with users who consider subtle features vital which I % consider strange. % % On the other hand, the sections on improvements of the % implementation have been blown up very much and may be tiring % and litte understandable for mere \emph{users}. These users % may profit from the present presentation just by jumping to % sections_\ref{s:Opts} and_\ref{s:UserCmds}. There is a user's % guide ulineno.tex which may be even more helpful, but it has % not been updated for a while. %% TODO % % \subsection{% % Availability % \unskip} % % In case you have found the present file otherwise than from % CTAN: A recent version and documentation of this package % should be available from CTAN folder % \path{macros/latex/contrib/lineno}. % Or mail to one of the addresses at top of file. % % \subsection{% % Introductory code % \unskip} % % This style option is written for \LaTeXe, November 1994 or later, % since we need the ~\protected@write~ macro. % % (New v4.00) And we use ~\newcommand*~ for % controlling length of user macro arguments, which has been % available since December 1994. %% \NeedsTeXFormat{LaTeX2e}[1994/12/01] %% [1994/11/04] \ProvidesPackage{lineno} [\filedate\space line numbers on paragraphs \fileversion] % (/New v4.00) %% %% History of versions: %% v1.00 1995/03/31 SIB: first release for Petra's interview transcriptions %% v1.01 1995/10/28 SIB: added ~pagewise~ mode %% v1.02 1995/11/15 SIB: added ~modulo~ option %% v1.03 1995/12/05 SIB: pagewise: try to reduce the hash-size requirements %% v2.00 1995/12/06 SIB: .. it works, new user interface %% v2.01 1996/09/17 SIB: put into CVS %% v2.02 1997/03/17 SIB: add: \@reinserts, for footnotes %% v2.04 1998/03/09 SIB: add: linenomath environment %% v2.05 1998/04/26 SIB: add: prevgraf test %% v2.06 1999/03/02 SIB: LPPL added %% v3.00 1999/06/11 SiB: include the extension in the main file %% v3.01 1999/08/28 SiB: \@reinserts -> \holdinginserts %% v3.02 2000/03/10 SiB: \@LN@output %% v3.03 2000/07/01 SiB: \@LN@ExtraLabelItems, hyperref %% v3.04 2000/12/17 SiB: longtable compatibility. %% v3.05 2001/01/02 SiB: [fleqn] detection. %% v3.05a 2001/01/04 SiB: [fleqn] detection reverted for eqnarray. %% v3.06 2001/01/17 SiB: [twocolumn] mode support. %% v3.07 2001/07/30 SiB: [hyperref] option obsoleted. %% v3.08 2001/08/02 SiB: linenomath wrapping for \[ \] %% v3.08a 2001/08/04 SiB: linenomath wrapping for \[ \] fixed %% v3.08b 2002/01/27 SiB: enquotation typo fix %% v3.09 2003/01/14 SIB: hyperref detection fix %% v3.10 2003/04/15 FMi: \MakeLineNo fix for deep boxes %% v3.10a 2003/11/12 Uwe Lck: \lineref typo fix %% v4.00 2004/09/02 UL: included linenox0, linenox1, lnopatch code with %% documentation, usually indicated by `New v4.00'; %% discussions of old code, indicated by `UL'; %% LPPL v.1 -> LPPL v1.3, `program' -> `file'; %% first lines with \filedate and \fileversion, %% according nawk lines; `November 1994 or later', %% some earlier documentation typos (including a few %% bad minus signs), { -> {% and } -> \unskip} at %% line ends (so, e.g., alignment in TOC works); \scs. %% 2004/09/03 UL: removed everything which indicated that the %% present file were named `lineno4.sty'. %% v4.1 2004/09/19 UL: Inserted Stephan's identification line, removed %% some TODOs and remarks from v4.00. %% 2004/10/04 UL: Added acknowledgement for Daniel Doherty; %% `(New v4.00)' with [|\firstlinenumber]; changed %% TODOs; Refining -> Redefining (\vadjust). %% 2004/10/05 UL: ednmath0 -> mathrefs; \catcode`\~ -> \active; %% \path; refined section on options `mathrefs'; %% changes in introduction. %% 2004/10/06 UL: Changed/removed TODOs, e.g., for edtable.sty. %% 2004/10/11 UL: Reminders: linenox0/1/lnopatch.sty obsolete; %% \tt star in list of commands. %% 2004/10/12 UL: Corrected blank lines in lineno.tex. %% 2004/10/19 UL: Fixed minor typos; remark on \if@LN@edtable. %% v4.1a 2004/11/07 UL: LPPL v1.3a. %% v4.1b 2004/11/13 UL: Comment on \outputpenalty values. %% v4.1c 2005/01/10 UL: Contact via http. %% v4.11 2005/02/20 UL: Error message with \linelabel when not numbering. %% 2005/03/07 UL: Removed \linelabel from ss:Tab heading, consider %% marginal line numbers as well, revised ss:Tab. %% Added a few lines on missing explanations to %% s:UserCmds. Corrected some code alignments. %% 2005/03/08 UL: Require recent edtable.sty. %% %% v4.2 2005/03/21 UL: "Physical page" counter works with \include. %% 2005/04/17 UL: Raised options section above extensions section %% (v4.00 disabled `displaymath' option); %% third arg for \@ifundefined{mathindent}; %% "bunch of options"; %% 2005/04/24 UL: compatibility with tamefloats; vplref.sty. %% 2005/04/25 UL: \number -> \the; wondered -> $$; subsec. appbas; %% CrtlLN sec -> subsec.; \newcommand* wherever ...; %% doc. on `other output routines' and `addpageno' %% (this changed from `varioref'). %% 2005/04/27 UL: =1\relax -> =\@ne, 0\relax ..., \hb@xt@, %% \ifx\@@par\@@@par -> \ifLineNumbers, typos, %% \pagestyle{headings}, LaTeX -> \LaTeX. %% v4.21 2005/04/28 UL: linenomath section: removed wrong \else's, %% \holding...: \thr@@, \@LN@outer@holdins, \global. %% v4.22 2005/05/01 UL: \unvbox\@outputbox; \@LN@col without #1, %% 2005/05/08 UL: global/local \internall..., \resetl... global, %% shortened discussions of this and of \newcounter. %% 2005/05/09 UL: corr.: doc. typo, version history, bad lines; %% percent; \chardef for modulo, %% \value{firstlinenumber}. %% v4.3 2005/05/10 UL: \@backslashchar -> \char`\\ in \scs. %% 2005/05/11 UL: \linenumbers sets ...outer@holdins; tidied up %% documentation regarding earlier versions. %% 2005/05/12 UL: `linenomath' without spurious number above; %% `displaymath' default; edmac homepage -> %% ednotes.sty.de.vu, \endlinenomath without %% numbers: no change of \holdinginserts; %% \linelabel doesn't go to .aux or mark, %% hyperref detected; undone 2005/05/10 (bad mark). %% 2005/05/13 UL: Reworked hyperref detection (new subsec.). %% 2005/05/15 UL: More typo fixes, corrected terrible confusions in %% the discussion (v4.22/v4.3) of \new/\stepcounter; %% new subsec. in `Line number ...'; another %% implementation of `hyperref' detection. %% 2005/05/16 UL: Final minor changes. %% v4.31b /06/14 UL: Extended explanation of \firstlinenumbers %% and package options; \@LN@ifgreat@critical; %% \modulolinenumbers*. Sent to Ednotes.news only. %% v4.31 2005/06/15 UL: \modulolinenumbers* with \firstlinenumber{1}; %% " -> ``/''; more doc. on \firstlinenumber . %% 2005/06/20 UL: Typo fix. %% 2005/10/01 UL: Warning about \mod...* with pagewise mode. %% v4.31a /10/02 UL: Minor changes of appearance of doc., e.g., %% \[ for $$. %% v4.32b /10/15 UL: Support for \addvspace; removed comments that %% had been invisible already for some time; %% made clear with which environments the %% linenomath environment is not needed. %% v4.32ab /10/15 UL: Observe \if@nobreak with support for \addvspace. %% v4.32 2005/10/17 UL: Just made it official and sent it to CTAN. %% v4.33b /10/23 UL: \if@nobreak\nobreak\fi -> \nobreak . %% v4.33ab /10/24 UL: \LineNoLaTeXOutput without \@tempswafalse; %% undid v4.22: \[unv]box\@outputbox (space is OK, %% \unvbox pushes short columns down); \@LN@kern@z@ . %% v4.4b 2005/10/24 UL: Another tidying-up of the discussion of %% \stepcounter{linenumber}; \@LN@screenoff@pen %% replaces \@LN@kern@z@, \@LN@depthbox . %% v4.4 2005/10/27 UL: Just made official for CTAN. %% v4.4a 2005/10/29 UL: Undid change of discussion of %% \stepcounter{linenumber} (confusion again). %% v4.41 2005/11/02 UL: Raised \CheckCommand*. %% %% Acknowledgements: %% v3.06: Donald Arseneau, pointed to mparhack.sty. %% v3.07+: Frank Mittelbach, points out inconsistencies in the %% user interface. %% v3.10: Frank Mittelbach \MakeLineNo fix for deep boxes %% v4.00: Daniel Doherty points out clash of \pagewise... with resetting %% page number. %% v4.21: Much testing work by Erik Luijten. %% v4.3: `displaymath' default by Erik Luijten's suggestion. %% v4.31: \modulolinenumbers* is an idea of Hillel Chayim Yisraeli's. %% v4.32: Support for \addvspace due to Saravanan M.'s observation. %% v4.33: Different support for \addvspace due to bug reports by %% Saravanan M.'s and David Josef Dev. %% v4.4: David Josef Dev points out that \kern\z@ after a paragraph %% tends to place its final baseline wrongly. % % % \section{% % Put the line numbers to the lines % \unskip} % % (New v4.00) This section contained the most % basic package code previously. For various purposes of % version_4\dots, much of these basics have been to be modified. % Much of my (UL's) reasoning on these modifications has been to % be reported. Sorry, the present section has been blown up % awfully thus and contains ramifications that may be difficult % to trace. We add some ~\subsection~ commands in order to cope % with the new situation. (/New v4.00) % % \subsection{% % Basic code of \texttt{lineno.sty} \scs{output} % \unskip}\label{ss:output} % % The line numbers have to be attached by the output % routine. We simply set the ~\interlinepenalty~ to $-100000$. % The output routine will be called after each line in the % paragraph, except the last, where we trigger by ~\par~. % The ~\linenopenalty~ is small enough to compensate a bunch of % penalties (e.g., with ~\samepage~). % % (New v3.04) Longtable uses % ~\penalty~$-30000$. The lineno penalty range was % shrunk to $-188000 \dots -32000$. (/New v3.04) % (New v4.00) New values are listed below (11111f.). (/New v4.00) \newcount\linenopenalty\linenopenalty=-100000 %% TODO v4.4+: % (UL) Hm. It is never needed below % that this is a counter. ~\def\linenopenalty{-100000\relax}~ % would do. (I guess this consumes more memory, but it % is more important to save counters than to save memory.) % I was frightened by ~-\linenopenalty~ below, but indeed % \TeX\ interprets the string ~--100000~ as 100000. % Has any user or extension package writer ever called % ~\linenopenalty=xxx~, or could I really change this?---The % counter is somewhat faster than the macro. Together with the % compatibility question this seems to support keeping the % counter. (???) %% Note that Stephan chose ~\mathchardef~ below, %% so his choice above seems to have been deliberate. %% <- no point, \mathchardef token is fast. % (/UL) \mathchardef\linenopenaltypar=32000 % So let's make a hook to ~\output~, the direct way. The \LaTeX\ % macro ~\@reinserts~ puts the footnotes back on the page. % % (New v3.01) ~\@reinserts~ badly % screws up split footnotes. The bottom part is % still on the recent contributions list, and the % top part will be put back there after the bottom % part. Thus, since lineno.sty does not play well % with ~\inserts~ anyway, we can safely experiment % with ~\holdinginserts~, without making things % much worse. % % Or that's what I thought, but: Just activating % ~\holdinginserts~ while doing the ~\par~ will % not do the trick: The ~\output~ routine may be % called for a real page break before all line % numbers are done, and how can we get control % over ~\holdinginserts~ at that point? % % Let's try this: When the ~\output~ routine is % run with ~\holdinginserts=3~ for a real page % break, then we reset ~\holdinginserts~ and % restart ~\output~. % % Then, again, how do we keep the remaining % ~\inserts~ while doing further line numbers? % % If we find ~\holdinginserts~=$-3$ we activate it again % after doing ~\output~. (/New v3.01) % % (New v3.02) To work with % multicol.sty, the original output routine is now % called indirectly, instead of being replaced. % When multicol.sty changes ~\output~, it is a % toks register, not the real thing. (/New v3.02) % % (New v4.00) Two further complications are added. %% %% TODO v4.3+: Or three, ~\@nobreakfalse~ after ~\MakeLineNo~ %% for getting rid of ~\@LN@nopagebreak~. % \begin{itemize}\item % [(i)] Problems with footnotes formerly resulted from % \LaTeX's ~\@reinserts~ in ~\@specialoutput~ which Stephan's % ~\linelabel~ called via the ~\marginpar~ mechanism. % \item % [(ii)] \LaTeX\ commands using ~\vadjust~ formerly didn't work % as one would have hoped. The problem is as follows: % Printing the line number results from % a box that the output routine inserts at the place of the % ~\interlinepenalty~. ~\vadjust~ items appear \emph{above} the % ~\interlinepenalty~ (\TeX book p._105). So ~\pagebreak~, e.g., % formerly sent the line number to the next page, while the % penalty from ~\nopagebreak~ could not tie the following line, % since it was screened off by the line number box.---Our trick % is putting the ~\vadjust~ items into a list macro from which % the output routine transfers them into the vertical list, % below the line number box. % \end{itemize} % In this case_(ii), like in case_(i), footnotes would suffer % if ~\holdinginserts~ were non-positive. Indeed, in both % cases_(i) and_(ii) we tackle the footnote problem by extending % that part of Stephan's output routine that is active when % ~\holdinginserts~ is positive. This extension writes the line % number ~\newlabel~ to the .aux file (which was formerly done % under $~\holdinginserts~=-3$) and handles the ~\vadjust~ % items.---To trigger ~\output~ and its ~\linelabel~ or, resp., % ~\vadjust~ part, the list of signal penalties started % immediately before is increased here (first for ~\linelabel~, % second for postponed ~\vadjust~ items): \mathchardef\@Mllbcodepen=11111 \mathchardef\@Mppvacodepen=11112 % (/New v4.00) (New v4.2) David Kastrup urges to use a private % name instead of ~\the\output~ (LaTeX-L-list). Otherwise an % ~\output~ routine loaded later and using ~\newtoks\output~ % again may get lost entirely. So we change use of ~\@LN@output~, % using it for the former purpose. Reference to what appeared % with the name of ~\output~ here lasts for a few lines and then % is given away. \let\@tempa\output \newtoks\output \let\@LN@output\output \output=\expandafter{\the\@tempa} % Now we add two cases to Stephan's output routine. (New v4.00) \@tempa={% % (/New 4.2) \LineNoTest \if@tempswa %% %% (UL) Learnt that even in def.s blank line means ~\par~. %% to leave visual space in present file with having a %% blank line neither in present nor in .tex file, %% use double comment mark (`%%'). (/UL) %% % (New v4.00) % We insert recognition of waiting ~\linelabel~ items--- %% \ifnum\outputpenalty=-\@Mllbcodepen \WriteLineNo %% % ---and of waiting ~\vadjust~ items: %% \else \ifnum\outputpenalty=-\@Mppvacodepen \PassVadjustList \else %% %% Now we give control back to Stephan. % (/New v4.00) (New v4.2) Outsource ``Standard'' output % ---which occurs so rarely---to subsection_\ref{ss:LLO}: %% \LineNoLaTeXOutput % (/New v4.2) (New v4.00) % Two new ~\fi~s for the ~\linelabel~ and ~\vadjust~ tests--- %% \fi \fi %% % ---and the remaining is %%%next three lines are % Stephan's code again: % (/New v4.00) %% \else \MakeLineNo \fi } % (New v4.00) Our new macros % ~\WriteLineNo~ and ~\PassVadjustList~ will be dealt with in % sections_\ref{s:LNref} and_\ref{ss:PVadj}. (/New v4.00) % % \subsection{% % \scs{LineNoTest} % \unskip} % % The float mechanism inserts ~\interlinepenalty~s during % ~\output~. So carefully reset it before going on. Else % we get doubled line numbers on every float placed in % horizontal mode, e.g, from ~\linelabel~. % % Sorry, neither a ~\linelabel~ nor a ~\marginpar~ should % insert a penalty, else the following linenumber % could go to the next page. Nor should any other % float. So let us suppress the ~\interlinepenalty~ % altogether with the ~\@nobreak~ switch. % % Since (ltspace.dtx, v1.2p)[1996/07/26], the ~\@nobreaktrue~ does % it's job globally. We need to do it locally here. \def\LineNoTest{% \let\@@par\@@@par \ifnum\interlinepenalty<-\linenopenaltypar \advance\interlinepenalty-\linenopenalty \@LN@nobreaktrue \fi \@tempswatrue \ifnum\outputpenalty>-\linenopenaltypar\else \ifnum\outputpenalty>-188000\relax \@tempswafalse \fi \fi } \def\@LN@nobreaktrue{\let\if@nobreak\iftrue} % renamed v4.33 % (UL) I thought here were % another case of the save stack problem explained in \TeX book, % p._301, namely through both local and global changing % ~\if@nobreak~. However, ~\@LN@nobreak~ is called during % ~\@LN@output~ only, while ~\@nobreaktrue~ is called by \LaTeX's % ~\@startsection~ only. The latter never happens during % ~\@LN@output~. So there is no local value of ~\if@nobreak~ on % save stack when ~\@nobreaktrue~ acts, since ~\the\@LN@output~ % (where ~\@LN@output~ is a new name for the original ~\output~) % is executed within a group (\TeX book p._21). %% %% 2004/09/19 Removed nonsense here according to Stephan 2004/09/04. %% % (/UL) % % \subsection{% % Other output routines (v4.2) % \unskip}\label{ss:LLO} % % I had thought of dealing with bad interference of footnotes % (and ~\enlargethispage~) with (real) ~\marginpar~s and floats % \emph{here}. Yet this is done in % \[ % ~http://~\mbox{[CTAN]} % ~/macros/latex/contrib/tamefloats/tameflts.sty~ % \] % now, and I prefer striving for compatibility with the latter. % (See there for expanding on the problem.) % This requires returning the special absolute value of % ~\holdinginserts~ that ~lineno.sty~ finds at the end of a newly % typeset paragraph---now done in subsection_\ref{ss:calls} % (~\linenumberpar~). % The former ~\LineNoHoldInsertsTest~ has been filled into here. %% ---`3' is replaced by ~\thr@@~ for a while. ~\thr@@~ is %% useful practice since plain \TeX, but Stephan may have been %% wise in suspecting that \LaTeX\ once could forsake ~\thr@@~. %% The same holds for ~\@M=10000~. % Note: when the following code is invoked, we have % ~\if@tempswa~_ =_~\iftrue~. % WARNING: I am still not sure whether the present code is good % for cooperating with other packages that use ~\holdinginserts~. \def\LineNoLaTeXOutput{% \ifnum \holdinginserts=\thr@@ % v4.33 without \@tempswafalse \global\holdinginserts-\thr@@ \unvbox\@cclv \ifnum \outputpenalty=\@M \else \penalty\outputpenalty \fi \else \if@twocolumn \let\@makecol\@LN@makecol \fi \the\@LN@output % finally following David Kastrup's advice. \ifnum \holdinginserts=-\thr@@ \global\holdinginserts\thr@@ \fi \fi } % \textit{More on dealing with output routines from other % packages:} % Since ~lineno.sty~'s output routine is called at least once % for each output line, I think it should be in \TeX's % original ~\output~, while output routines dealing with % building pages and with floats etc.\ should be filled into % registers addressed by ~\output~ after ~\newtoks\output~. % Therefore \begin{enumerate} % \item % ~tameflts.sty~ should be loaded \emph{after} ~lineno.sty~; % \item % if a class changes ~\output~ (APS journal class revtex4, % e.g.), ~lineno.sty~ should be loaded by ~\RequirePackage~ % [here presumably following some options in % brackets]~{lineno}~ \emph{preceding} ~\documentclass~. % \item % If you actually maintain such a class, please consider % loading ~lineno.sty~ on some draft option. The bunch of % lineno's package options may be a problem, but perhaps the % purpose of your class is offering only very few of lineno's % options anyway, maybe just one. % \end{enumerate} % The latter may also be needed with classes that don't follow % David Kastrup's rule on changing ~\output~. % % \subsection{% % \scs{MakeLineNo}: Actually attach line number % \unskip}\label{ss:MLN} % % We have to return all the page to the current page, and % add a box with the line number, without adding % breakpoints, glue or space. The depth of our line number % should be equal to the previous depth of the page, in % case the page breaks here, and the box has to be moved up % by that depth. % % The ~\interlinepenalty~ comes after the ~\vadjust~ from a % ~\linelabel~, so we increment the line number \emph{after} % printing it. The macro ~\makeLineNumber~ produces the % text of the line number, see section \ref{appearance}. % % (UL) I needed a while to understand % the sentence on incrementing. Correctly: writing the % ~\newlabel~ to the .aux file is triggered by the signal % penalty that ~\end@float~ inserts via ~\vadjust~. % However, this could be changed by our new ~\PostponeVadjust~. % After ~\c@linenumber~ has been introduced as a \LaTeX\ % counter, it might be preferable that it behaved like standard % \LaTeX\ counters which are incremented shortly before printing. % But this may be of little practical relevance in this case, % as ~\c@linenumber~ is driven in a very non-standard % way.---However still, this behaviour of ~\c@linenumber~ % generates a problem with our ~edtable.sty~. %% \unskip---Before, %% I thought that Stephan had reported his reasoning incorrectly %% and rather did this because of his ~\resetlinenumber~ which %% initializes ~\c@linenumber~ to 1 instead of 0---the latter is %% usual with \LaTeX\ counters. Cf._additional comment at %% ~\resetlinenumber~. % (/UL). % % Finally we put in the natural ~\interlinepenalty~, except % after the last line. % % (New v3.10) Frank Mittelbach points out that box255 may be % less deep than the last box inside, so he proposes to % measure the page depth with ~\boxmaxdepth=\maxdimen~. % (/New v3.10) % % (UL, New v4.00) We also resume the matter of % ~\vadjust~ items that was started in section_\ref{ss:output}. % % \TeX\ puts only nonzero interline % penalties into the vertical list (\TeX book p._105), while % ~lineno.sty~ formerly replaced the signal interline penalty by % something closing with an explicit penalty of the value that % the interline penalty would have without ~lineno.sty~. % This is usually 0. Now, explicit vertical penalties can be % very nasty with respect to ~\nopagebreak~, e.g., a low (even % positive) ~\widowpenalty~ may force a widow where you % explicitly tried to forbid it by ~\nopagebreak~ % (see explanation soon below). % The ~\nopagebreak~ we create here would never work if all % those zero penalties were present.---On % the other hand, we cannot just omit Stephan's zero penalties, % because \TeX\ puts a penalty of 10000 after what ~lineno.sty~ % inserts (\TeX book p._125). This penalty must be overridden % to allow page breaks between ordinary lines. To revive % ~\nopagebreak~, we therefore replace those zero (or low) % penalties by penalties that the user demanded by % ~\nopagebreak~.---This mechanism is not perfect and does not % exactly restore the original \LaTeX\ working of ~\pagebreak~ % and ~\nopagebreak~. Viz., if there are several vertical % penalties after a line which were produced by closely sitting % ~\[no]pagebreak~s, without ~lineno.sty~ the lowest penalty would % be effective (cf._\TeX book exercise_14.10). Our mechanism, by % contrast, chooses the \emph{last} user-set penalty of the line % as the effective one. It would not be very difficult to come % more close to the original mechanism, but until someone urges % us we will cling to the present simple way. You may consider an % advantage of the difference between our mechanism and the % original one that the user here can actually override low % penalties by ~\nopagebreak~, which may be what a lay \LaTeX\ % user would expect. %% ---Zero glue would do instead of zero %% penalty! This could make things easier. Maybe next time. %% <- v4.4: No, problem with column depth. % (/UL, /New v4.00) \def\MakeLineNo{% \@LN@maybe@normalLineNumber % v4.31 \boxmaxdepth\maxdimen\setbox\z@\vbox{\unvbox\@cclv}% \@tempdima\dp\z@ \unvbox\z@ \sbox\@tempboxa{\hb@xt@\z@{\makeLineNumber}}% %% % (New v4.00) Previously, % \begin{old}\begin{verbatim} % % \stepcounter{linenumber}% % \end{verbatim} % \end{old} %% %% TODO: Still first `\begin{old}'? % followed. (Of course, there was no % comment mark; I put it there to make % reading the actual code easy.) % % (New v4.22: improved) Why not just % \[~\global\advance\c@linenumber\@ne~?\] % ~\stepcounter~ additionally resets ``subordinate'' % counters, but which could these (usefully) be? % Again, may be column counters with ~edtable.sty~!? % % But then, our ~edtable.sty~ and its ~longtable~ option % should use it as well. So use a shorthand supporting % uniformity. You can even use it as a hook for choosing % ~\global\advance\c@linenumber\@ne~ instead of our choice. % (/New v4.22) %% \stepLineNumber %% % (New v4.4) Now %% \ht\@tempboxa\z@ \@LN@depthbox %% % appends the box containing the line number without changing % ~\prevdepth~---see end of section. % Now is the time for inserting the $\dots$ (/New v4.4) %% The line number has now been placed (it may be invisible %% depending on the modulo feature), so %% we can insert the % ~\vadjust~ items. We cannot do this much later, because % their right place is above the artificial interline % penalty which Stephan's code will soon insert % (cf._\TeX book p._105). The next command is just ~\relax~ % if no ~\vadjust~ items have been accumulated for the % current line. Otherwise it is a list macro inserting % the ~\vadjust~ items and finally resetting itself. % (This is made in section_\ref{ss:PVadj} below.) % If the final item is a penalty, it is stored so it can % compete with other things about page breaking. %% \@LN@do@vadjusts \count@\lastpenalty %% % At this place, % \begin{old}\begin{verbatim} % % \ifnum\outputpenalty=-\linenopenaltypar\else % \end{verbatim} % \end{old} % originally followed. We need something \emph{before} the % ~\else~: %% \ifnum\outputpenalty=-\linenopenaltypar \ifnum\count@=\z@ \else %% % So final ~\pagebreak[0]~ or ~\nopagebreak[0]~ has no % effect---but this will make a difference after headings only, % where nobody should place such a thing anyway. %% \xdef\@LN@parpgbrk{% \penalty\the\count@ \global\let\noexpand\@LN@parpgbrk \noexpand\@LN@screenoff@pen}% v4.4 %% % That penalty will replace former ~\kern\z@~ in % ~\linenumberpar~, see subsection_\ref{ss:calls}.---A % few days earlier, I tried to send just a penalty value. % However, the ~\kern\z@~ in ~\linenumberpar~ is crucial, % as I then found out. See below.---The final penalty is % repeated, but this does no harm. (It would not be very % difficult to avoid the repeating, but it may even be % less efficient.) It may be repeated due to the previous % ~\xdef~, but it may be repeated as well below in the % present macro where artificial interline penalty is to % be overridden. %% \fi \else %% % (/New v4.00) %% Corrected code alignment with v4.11. \@tempcnta\outputpenalty \advance\@tempcnta -\linenopenalty %% % (New v4.00) % \begin{old}\begin{verbatim} % % \penalty\@tempcnta % \end{verbatim} % \end{old} % followed previously. To give ~\nopagebreak~ a chance, % we do %% Corrected code alignment with v4.11. \penalty \ifnum\count@<\@tempcnta \@tempcnta \else \count@ \fi %% % instead.---In ~linenox0.sty~, the ~\else~ thing once was omitted. % Sergei Mariev's complaint (thanks!) showed that it is vital % (see comment before ~\MakeLineNo~). % The remaining ~\fi~ from previous package version closes the % ~\ifnum\outputpenalty~\dots % (/New v4.00) %% \fi } % (New v4.00) \newcommand\stepLineNumber{\stepcounter{linenumber}} % For reason, see use above. (/New v4.00) %% %% TODO v4.4+: ~\newcommand~ more often!? % % (New v4.4) The depth preserving trick is drawn here from % ~\MakeLineNo~ because it will be used again in % section_\ref{ss:calls}. \def\@LN@depthbox{% \dp\@tempboxa=\@tempdima \nointerlineskip \kern-\@tempdima \box\@tempboxa} % (/New v4.4) % % \section{% % Control line numbering % \unskip} % \subsection{% % Inserting \scs{output} calls %% own subsec. v4.4. % \unskip}\label{ss:calls} % The line numbering is controlled via ~\par~. \LaTeX\ % saved the \TeX-primitive ~\par~ in ~\@@par~. We push it % one level further out, and redefine ~\@@par~ to insert % the ~\interlinepenalty~ needed to trigger the % line numbering. And we need to allow pagebreaks after a % paragraph. % % New (2.05beta): the prevgraf test. A paragraph that ends with a % displayed equation, a ~\noindent\par~ or ~wrapfig.sty~ produce empty % paragraphs. These should not get a spurious line number via % ~\linenopenaltypar~. \let\@@@par\@@par \newcount\linenoprevgraf % (UL) And needs ~\linenoprevgraf~ % to be a counter? Perhaps there may be a paragraph having % thousands of lines, so ~\mathchardef~ doesn't suffice (really??). %% %% %% TODO: limitations of lines per paragraph elsewhere? %% %% Signal penalties, e.g.!? ~\deadcycles~!? %% % A macro ending on ~\relax~ might suffice, but would be % somewhat slow. I think I will use ~\mathchardef~ next time. % Or has any user used ~\linenoprevgraf~? (/UL) %% v4.33: changed code alignment for better understanding. \def\linenumberpar{% \ifvmode \@@@par \else \ifinner \@@@par \else \xdef\@LN@outer@holdins{\the\holdinginserts}% v4.2 \advance \interlinepenalty \linenopenalty \linenoprevgraf \prevgraf \global \holdinginserts \thr@@ \@@@par \ifnum\prevgraf>\linenoprevgraf \penalty-\linenopenaltypar \fi %% % (New v4.00) % \begin{old}\begin{verbatim} % % \kern\z@ % \end{verbatim} % \end{old} % was here previously. What for? % According to \TeX book p._125, Stephan's % interline penalty is changed into 10000. At the end of a % paragraph, the ~\parskip~ would follow that penalty of 10000, % so there could be a page break neither at the % ~\parskip~ nor at the ~\baselineskip~ (\TeX book p._110)---so % there could never be a page break between two paragraphs. % So something must screen off the 10000 penalty. % Indeed, the ~\kern~ is a place to break. % (Stephan once knew this: see `allow pagebreaks' above.) % % Formerly, I tried to replace ~\kern\z@~ by % \begin{old}\begin{verbatim} % % \penalty\@LN@parpgpen\relax % \end{verbatim} % \end{old} % ---but this allows a page break after heading. So: %% \@LN@parpgbrk %% %% After heading, ~\kern\z@~ resulting from previous line %% (see below) is followed by ~\write~ or ~\penalty10000~, %% so causes no page break. % % These and similar changes were formerly done by ~linenox1.sty~. % (/New v4.00) % % (New v4.4) % A ~\belowdisplayskip~ may precede the previous when the paragraph % ends on a display-math; or there may be a ~\topsep~ from a list, etc. % ~\addvspace~ couldn't take account for it with ~\kern\z@~ % here. v4.32 therefore moved the space down -- with at least two % bad consequences. % Moreover, David Josef Dev observes that ~\kern\z@~ may % inappropriately yield column depth 0pt. % For these reasons, we introduce ~\@LN@screenoff@pen~ below. % (/New v4.4) %% \global\holdinginserts\@LN@outer@holdins % v4.2 \advance\interlinepenalty -\linenopenalty \fi % from \ifinner ... \else \fi} % from \ifvmode ... \else % (New v4.00, v4.4) Initialize ~\@LN@parpgbrk~, accounting % for earlier space and for appropriate columndepth. % We use former ~\MakeLineNo~'s depth-preverving trick % ~\@LN@depthbox~ again: \def\@LN@screenoff@pen{% \ifdim\lastskip=\z@ \@tempdima\prevdepth \setbox\@tempboxa\null \@LN@depthbox \fi} \global\let\@LN@parpgbrk\@LN@screenoff@pen % (/New v4.4, v4.00) % \subsection{% % Turning on/off %% own subsec. v4.4. % \unskip}\label{ss:OnOff} % The basic commands to enable and disable line numbers. % ~\@par~ and ~\par~ are only touched, when they are ~\let~ % to ~\@@@par~/~\linenumberpar~. The line number may be % reset to 1 with the star-form, or set by an optional % argument ~[~~]~. % % (New v4.00) We add ~\ifLineNumbers~ etc.\ since % a number of our new adjustments need to know whether % linenumbering is active. This just provides a kind of % shorthand for ~\ifx\@@par\linenumberpar~; moreover it is % more stable: who knows what may happen to ~\@@par~?---A % caveat: ~\ifLineNumbers~ may be wrong. E.g., it may be % ~\iffalse~ where it acts, while a ~\linenumbers~ a few % lines below---in the same paragraph---brings about that % the line where the ~\ifLineNumbers~ appears gets a % marginal number. %% Better implementation suggested below. %% % (New v4.3) Just noticed: Such tricks have been % disallowed with v4.11, see subsections_\ref{ss:LL} % and_\ref{ss:OnOff}.---Moreover, the switching between % meanings of ~\linelabel~ for a possible error message % as of v4.11 is removed. Speed is difficult to esteem % and also depends on applications. Just use the most % simple code you find. (/New v4.3) \newif\ifLineNumbers \LineNumbersfalse % (/New v4.00) \def\linenumbers{% \LineNumberstrue % v4.00 \xdef\@LN@outer@holdins{\the\holdinginserts}% v4.3 %% % (New v4.3) The previous line is for ~{linenomath}~ % in a first numbered paragraph. (/New v4.3) %% \let\@@par\linenumberpar % \let\linelabel\@LN@linelabel % v4.11, removed v4.3 \ifx\@par\@@@par\let\@par\linenumberpar\fi \ifx\par\@@@par\let\par\linenumberpar\fi \@LN@maybe@moduloresume % v4.31 \@ifnextchar[{\resetlinenumber}%] {\@ifstar{\resetlinenumber}{}}% } \def\nolinenumbers{% \LineNumbersfalse % v4.00 \let\@@par\@@@par % \let\linelabel\@LN@LLerror % v4.11, removed v4.3 \ifx\@par\linenumberpar\let\@par\@@@par\fi \ifx\par\linenumberpar\let\par\@@@par\fi } % (New v4.00) Moreover, it is useful to switch to % ~\nolinenumbers~ in ~\@arrayparboxrestore~. We postpone this % to section_\ref{ss:ReDef} where we'll have an appending macro % for doing this. (/New v4.00) % % What happens with a display math? Since ~\par~ is not executed, % when breaking the lines before a display, they will not get % line numbers. Sorry, but I do not dare to change % ~\interlinepenalty~ globally, nor do I want to redefine % the display math environments here. % \begin{displaymath} % display \ math % \end{displaymath} % See the subsection below, for a wrapper environment to make % it work. But that requires to wrap each and every display % in your \LaTeX\ source %%. %% v4.3: % (see option ~displaymath~ in subsections_\ref{ss:v3opts} % and_\ref{ss:display} for some relief [UL]). % % The next two commands are provided to turn on line % numbering in a specific mode. Please note the difference: % for pagewise numbering, ~\linenumbers~ comes first to % inhibit it from seeing optional arguments, since % re-/presetting the counter is useless. \def\pagewiselinenumbers{\linenumbers\setpagewiselinenumbers} \def\runninglinenumbers{\setrunninglinenumbers\linenumbers} % Finally, it is a \LaTeX\ style, so we provide for the use % of environments, including the suppression of the % following paragraph's indentation. % %% TODO: v4.4+: % (UL) I am drawing the following % private thoughts of Stephan's to publicity so that others may % think about them---or to remind myself of them in an efficient % way. (/UL) %% UL changed `%%%' to `% %' below. %% TODO: add \par to \linenumbers, if called from an environment. %% v4.3 %% ToDO: add an \@endpe hack if \linenumbers are turned on % \begin{old}\begin{verbatim} % % TO DO: add \par to \linenumbers, if called from an environment. % % To DO: add an \@endpe hack if \linenumbers are turned on % % in horizontal mode. {\par\parskip\z@\noindent} or % % something. % \end{verbatim} % \end{old} % (UL) However, I rather think that ~\linenumbers~ and %% v4.31 % ~\nolinenumbers~ should execute a ~\par~ already. (Then the % ~\par~s in the following definitions should be removed.) (/UL) \@namedef{linenumbers*}{\par\linenumbers*} \@namedef{runninglinenumbers*}{\par\runninglinenumbers*} \def\endlinenumbers{\par\@endpetrue} \let\endrunninglinenumbers\endlinenumbers \let\endpagewiselinenumbers\endlinenumbers \expandafter\let\csname endlinenumbers*\endcsname\endlinenumbers \expandafter\let\csname endrunninglinenumbers*\endcsname\endlinenumbers \let\endnolinenumbers\endlinenumbers % % \subsection{% % Display math % \unskip}\label{ss:DM} % % Now we tackle the problem to get display math working. % There are different options. % \begin{enumerate}\item[ % 1.] Precede every display math with a ~\par~. % Not too good. % \item[ % 2.] Change ~\interlinepenalty~ and associates globally. % Unstable. % \item[ % 3.] Wrap each display math with a ~{linenomath}~ % environment. % \end{enumerate} % We'll go for option 3. See if it works: % \begin{linenomath} % \begin{equation} % display \ math % \end{equation} % \end{linenomath} % The star form ~{linenomath*}~ should also number the lines % of the display itself, % \begin{linenomath*} % \begin{eqnarray} % multi && line \\ % display && math \\ % & % \begin{array}{c} % with \\ % array % \end{array} % & % \end{eqnarray} % \end{linenomath*} % including multline displays. % % First, here are two macros to turn % on linenumbering on paragraphs preceeding displays, with % numbering the lines of the display itself, or without. % The ~\ifx..~ tests if line numbering is turned on. It % does not harm to add these wrappers in sections that are % not numbered. Nor does it harm to wrap a display % twice, e.q, in case you have some ~{equation}~s wrapped % explicitely, and later you redefine ~\equation~ to do it % automatically. % % (New v4.3) To avoid the spurious line number above a % display in vmode, I insert ~\ifhmode~. (/New v4.3) \newcommand\linenomathNonumbers{% \ifLineNumbers %% \ifx\@@par\@@@par\else \ifnum\interlinepenalty>-\linenopenaltypar \global\holdinginserts\thr@@ \advance\interlinepenalty \linenopenalty \ifhmode % v4.3 \advance\predisplaypenalty \linenopenalty \fi \fi \fi \ignorespaces } \newcommand\linenomathWithnumbers{% \ifLineNumbers %% \ifx\@@par\@@@par\else \ifnum\interlinepenalty>-\linenopenaltypar \global\holdinginserts\thr@@ \advance\interlinepenalty \linenopenalty \ifhmode % v4.3 \advance\predisplaypenalty \linenopenalty \fi \advance\postdisplaypenalty \linenopenalty \advance\interdisplaylinepenalty \linenopenalty \fi \fi \ignorespaces } % The ~{linenomath}~ environment has two forms, with and % without a star. The following two macros define the % environment, where the stared/non-stared form does/doesn't number the % lines of the display or vice versa. \newcommand\linenumberdisplaymath{% \def\linenomath{\linenomathWithnumbers}% \@namedef{linenomath*}{\linenomathNonumbers}% } \newcommand\nolinenumberdisplaymath{% \def\linenomath{\linenomathNonumbers}% \@namedef{linenomath*}{\linenomathWithnumbers}% } \def\endlinenomath{% \ifLineNumbers % v4.3 \global\holdinginserts\@LN@outer@holdins % v4.21 \fi \global % v4.21 support for LaTeX2e earlier than 1996/07/26. \@ignoretrue } \expandafter\let\csname endlinenomath*\endcsname\endlinenomath % The default is not to number the lines of a display. But % the package option ~mathlines~ may be used to switch % that behavior. \nolinenumberdisplaymath % % \section{% % Line number references % \unskip}\label{s:LNref} % \subsection{% % Internals %% New subsec. v4.3. % \unskip} % The only way to get a label to a line number in a % paragraph is to ask the output routine to mark it. % % (New v4.00) The following two paragraphs don't hold any % longer, see below. (/New v4.00) % \begin{old}\begin{verbatim} % % We use the marginpar mechanism to hook to ~\output~ for a % % second time. Marginpars are floats with number $-1$, we % % fake marginpars with No $-2$. Originally, every negative % % numbered float was considered to be a marginpar. % % % % The float box number ~\@currbox~ is used to transfer the % % label name in a macro called ~\@LNL@~. % \end{verbatim} % \end{old} % A ~\newlabel~ is written to the aux-file. The reference % is to ~\theLineNumber~, \emph{not} ~\thelinenumber~. % This allows to hook in, as done below for pagewise line % numbering. % % (New v3.03) The ~\@LN@ExtraLabelItems~ are added for a hook % to keep packages like ~{hyperref}~ happy. (/New v3.03) % % (New v4.00) % We fire the ~\marginpar~ mechanism, so we leave \LaTeX's % ~\@addmarginpar~ untouched. % \begin{old}\begin{verbatim} % % \let\@LN@addmarginpar\@addmarginpar % % \def\@addmarginpar{% % % \ifnum\count\@currbox>-2\relax % % \expandafter\@LN@addmarginpar % % \else % % \@cons\@freelist\@currbox % % \protected@write\@auxout{}{% % % \string\newlabel % % {\csname @LNL@\the\@currbox\endcsname}% % % {{\theLineNumber}{\thepage}\@LN@ExtraLabelItems}}% % % \fi} % \end{verbatim} % \end{old} % OK, we keep Stephan's ~\@LN@ExtraLabelItems~: % (/New v4.00) \let\@LN@ExtraLabelItems\@empty % (New v4.00) % We imitate the ~\marginpar~ mechanism without using the % ~\@freelist~ boxes. ~\linelabel~ will indeed place a signal % penalty (~\@Mllbcodepen~, new), and it will put a label into % some list macro ~\@LN@labellist~. A new part of the output % routine will take the labels from the list and will write % ~\newlabel~s to the .aux file. % % The following is a version of \LaTeX's ~\@xnext~. \def\@LN@xnext#1\@lt#2\@@#3#4{\def#3{#1}\gdef#4{#2}} % This takes an item ~#1~ from a list ~#4~ into ~#3~; % to be used as ~\expandafter\@LN@xnext#4\@@#3#4~. % Our lists use ~\@lt~ after each item for separating. % Indeed, there will be another list macro which can % appear as argument ~#4~, this will be used for moving % ~\vadjust~ items (section_\ref{ss:PVadj}). % The list for ~\linelabel~s is the following: \global\let\@LN@labellist\@empty % The next is the new part of the output routine writing the % ~\newlabel~ to the .aux file. Since it is no real page output, % the page is put back to top of the main vertical list. \def\WriteLineNo{% \unvbox\@cclv \expandafter \@LN@xnext \@LN@labellist \@@ \@LN@label \@LN@labellist \protected@write\@auxout{}{\string\newlabel{\@LN@label}% {{\theLineNumber}{\thepage}\@LN@ExtraLabelItems}}% } % (/New v4.00) % % \subsection{% % The \scs{linelabel} command % \unskip}\label{ss:LL} % To refer to a place in line ~\ref{~~}~ at page % ~\pageref{~~}~ you place a ~\linelabel{~~}~ at % that place. % % \linelabel{demo} % \marginpar{\tiny\raggedright % See if it works: This paragraph % starts on page \pageref{demo}, line % \ref{demo}. % \unskip}% % (New v4.11) % \begin{old}\begin{verbatim} % % If you use this command outside a ~\linenumbers~ % % paragraph, you will get references to some bogus % % line numbers, sorry. But we don't disable the command, % % because only the ~\par~ at the end of a paragraph may % % decide whether to print line numbers on this paragraph % % or not. A ~\linelabel~ may legally appear earlier than % % ~\linenumbers~. % \end{verbatim} % \end{old} % This trick is better not allowed---see subsections_\ref{ss:LL} % and_\ref{ss:OnOff}. % (/New v4.11) % % ~\linelabel~ % \begin{old}\begin{verbatim} % %, via a fake float number $-2$, %% new mechanism v4.00 % \end{verbatim} % \end{old} % puts a % ~\penalty~ into a ~\vadjust~, which triggers the % pagebuilder after putting the current line to the main % vertical list. A ~\write~ is placed on the main vertical % list, which prints a reference to the current value of % ~\thelinenumber~ and ~\thepage~ at the time of the % ~\shipout~. % % A ~\linelabel~ is allowed only in outer horizontal mode. % In outer vertical mode we start a paragraph, and ignore % trailing spaces (by fooling ~\@esphack~). % % (New v4.00) We aim at relaxing the previous condition. % We insert a hook ~\@LN@mathhook~ and a shorthand % ~\@LN@postlabel~ to support the ~mathrefs~ option which % allows ~\linelabel~ in math mode. % % The next paragraph is no longer valid. % \begin{old}\begin{verbatim} % % The argument of ~\linelabel~ is put into a macro with a % % name derived from the number of the allocated float box. % % Much of the rest is dummy float setup. % \end{verbatim} % \end{old} % (/New v4.00) % % (New v4.11) % \begin{old}\begin{verbatim} % % \def\linelabel#1{% % \end{verbatim} % \end{old} % I forgot ~\linenumbers~ today, costed me hours or so. \def\@LN@LLerror{\PackageError{lineno}{% \string\linelabel\space without \string\linenumbers}{% Just see documentation. (New feature v4.11)}\@gobble} % (New v4.3) Here some things have changed for v4.3. % The previous ~#1~ has been replaced by ~\@gobble~. % Ensuing, the ~\linelabel~ error message is re-implemented. % I find it difficult to compare efficiency of slight % alternatives---so choose an easy one. Explicit switching % in ~\linenumbers~ and ~\nolinenumbers~ is an additional % command that may better be avoided. \newcommand\linelabel{% \ifLineNumbers \expandafter \@LN@linelabel \else \expandafter \@LN@LLerror \fi} %%\let\linelabel\@LN@LLerror \gdef\@LN@linelabel#1{% %% % ~\gdef~ for hyperref ``symbolically''. (/New v4.11) %% \ifx\protect\@typeset@protect %% % $\gets$ And a ~\linelabel~ should never be replicated in a % mark or a TOC entry. (/New v4.3) %% \ifvmode \ifinner \else \leavevmode \@bsphack \@savsk\p@ \fi \else \@bsphack \fi \ifhmode \ifinner \@parmoderr \else %% % (New v4.00) %% \@LN@postlabel{#1}% % \begin{old}\begin{verbatim} % % \@floatpenalty -\@Mii % % \@next\@currbox\@freelist % % {\global\count\@currbox-2% % % \expandafter\gdef\csname @LNL@\the\@currbox\endcsname{#1}}% % % {\@floatpenalty\z@ \@fltovf \def\@currbox{\@tempboxa}}% % % \begingroup % % \setbox\@currbox \color@vbox \vbox \bgroup \end@float % % \endgroup % % \@ignorefalse \@esphack % \end{verbatim} % \end{old} % (/New v4.00) %% \@esphack %% % (New v4.00) % The ~\@ignorefalse~ was appropriate before because the % ~\@Esphack~ in ~\end@float~ set ~\@ignoretrue~. Cf._\LaTeX's % ~\@xympar~. (/New v4.00) %% \fi \else %% % (New v4.00) %% \@LN@mathhook{#1}% % \begin{old}\begin{verbatim} % % \@parmoderr % \end{verbatim} % \end{old} % Instead of complaining, you may just do your job. % (/New v4.00) %% \fi \fi } % (New v4.00) The shorthand just does what happened % with ~linenox0.sty~ before ~ednmath0.sty~ (New v4.1: % now ~mathrefs~ option) appeared, and % the hook is initialized to serve the same purpose. % So errors come just where Stephan had built them in, % and this is just the \LaTeX\ ~\marginpar~ behaviour. \def\@LN@postlabel#1{\g@addto@macro\@LN@labellist{#1\@lt}% \vadjust{\penalty-\@Mllbcodepen}} \def\@LN@mathhook#1{\@parmoderr} % (/New v4.00) % % \modulolinenumbers[3] % \firstlinenumber{1} % \section{% % The appearance of the line numbers % \unskip}\label{appearance} % \subsection{% % Basic code %% own subsec. v4.2. % \unskip} % % The line numbers are set as ~\tiny\sffamily\arabic{linenumber}~, % $10pt$ left of the text. With options to place it % right of the text, or . . . % % . . . here are the hooks: \def\makeLineNumberLeft{% \hss\linenumberfont\LineNumber\hskip\linenumbersep} \def\makeLineNumberRight{% \linenumberfont\hskip\linenumbersep\hskip\columnwidth \hb@xt@\linenumberwidth{\hss\LineNumber}\hss} \def\linenumberfont{\normalfont\tiny\sffamily} \newdimen\linenumbersep \newdimen\linenumberwidth \linenumberwidth=10pt \linenumbersep=10pt % Margin switching requires ~pagewise~ numbering mode, but % choosing the left or right margin for the numbers always % works. \def\switchlinenumbers{\@ifstar {\let\makeLineNumberOdd\makeLineNumberRight \let\makeLineNumberEven\makeLineNumberLeft}% {\let\makeLineNumberOdd\makeLineNumberLeft \let\makeLineNumberEven\makeLineNumberRight}% } \def\setmakelinenumbers#1{\@ifstar {\let\makeLineNumberRunning#1% \let\makeLineNumberOdd#1% \let\makeLineNumberEven#1}% {\ifx\c@linenumber\c@runninglinenumber \let\makeLineNumberRunning#1% \else \let\makeLineNumberOdd#1% \let\makeLineNumberEven#1% \fi}% } \def\leftlinenumbers{\setmakelinenumbers\makeLineNumberLeft} \def\rightlinenumbers{\setmakelinenumbers\makeLineNumberRight} \leftlinenumbers* % ~\LineNumber~ is a hook which is used for the modulo stuff. % It is the command to use for the line number, when you % customize ~\makeLineNumber~. Use ~\thelinenumber~ to % change the outfit of the digits. % % % We will implement two modes of operation: % \begin{itemize} % \item numbers ~running~ through (parts of) the text % \item ~pagewise~ numbers starting over with one on top of % each page. % \end{itemize} % Both modes have their own count register, but only one is % allocated as a \LaTeX\ counter, with the attached % facilities serving both. \newcounter{linenumber} \newcount\c@pagewiselinenumber \let\c@runninglinenumber\c@linenumber % Only the running mode counter may be reset, or preset, % for individual paragraphs. The pagewise counter must % give a unique anonymous number for each line. % % (New v4.3) ~\newcounter{linenumber}~ % was the only ~\newcounter~ in the whole package, and % formerly I was near using ~\newcount~ instead. Yet % ~\newcounter~ may be quite useful for ~\includeonly~. % It also supports resetting ``subcounters'', but what % could these be? Well, ~edtable~ might introduce a % subcounter for columns. % (Note that \LaTeX's setting commands would work with % ~\newcount\c@linenumber~ already, apart from this. % And perhaps sometimes ~\refstepcounter{linenumber}~ % wouldn't work---cf._my discussion of ~\stepcounter~ in % subsection_\ref{ss:MLN}, similarly ~\refstep...~ would % be quite useless. % Even the usual redefinitions of ~\thelinenumber~ would % work. It is nice, on the other hand, that % ~\thelinenumber~ is predefined here. \LaTeX's % initialization of the value perhaps just serves making % clear \LaTeX\ counters should always be changed % globally.---Shortened and improved the discussion here.) % (/New v4.3) % % (New v4.22) % ~\c@linenumber~ usually is---globally---incremented by % ~\stepcounter~ (at present), so resetting it locally would % raise the save stack problem of \TeX book p._301, moreover % it would be is useless, there is no hope of keeping the % values local (but see subsection_\ref{ss:ILN}). So I insert % ~\global~: (/New v4.22) \newcommand*\resetlinenumber[1][\@ne]{% \global % v4.22 \c@runninglinenumber#1\relax} % (New v4.00) % \begin{old}\begin{verbatim} % % \newcommand\resetlinenumber[1][1]{\c@runninglinenumber#1} % \end{verbatim} % \end{old} % Added ~\relax~, being quite sure that this does no harm % and is quite important, as with ~\setcounter~ etc. % I consider this a bug fix (although perhaps no user has % ever had a problem with this). (/New v4.00) % % (v4.22: I had made much fuss about resetting subordinate % counters here---removed, somewhat postponed.) % %% TODO v4.4+: %% \newcommand*\resetlinenumber[1][\@ne]{% %% \ifx\c@linenumber\c@runninglinenumber %% \global\c@linenumber#1\relax %% \global\advance\c@linenumber\m@ne %% \stepLineNumber %% \else %% \PackageError{lineno}%% Shorthand!? %% {You can't reset line number in pagewise mode}% %% {This should suffice.}% %% \fi %% } % % \subsection{% % Running line numbers % \unskip} % % Running mode is easy, ~\LineNumber~ and ~\theLineNumber~ % produce ~\thelinenumber~, which defaults to % ~\arabic{linenumber}~, using the ~\c@runninglinenumber~ % counter. This is the default mode of operation. \def\makeRunningLineNumber{\makeLineNumberRunning} \def\setrunninglinenumbers{% \def\theLineNumber{\thelinenumber}% \let\c@linenumber\c@runninglinenumber \let\makeLineNumber\makeRunningLineNumber } \setrunninglinenumbers\resetlinenumber % % \subsection{% % Pagewise line numbers % \unskip}\label{ss:PW} % % Difficult, if you think about it. The number has to be % printed when there is no means to know on which page it % will end up, except through the aux-file. My solution % is really expensive, but quite robust. % % With version ~v2.00~ the hashsize requirements are % reduced, because we do not need one controlsequence for % each line any more. But this costs some computation time % to find out on which page we are. % % ~\makeLineNumber~ gets a hook to log the line and page % number to the aux-file. Another hook tries to find out % what the page offset is, and subtracts it from the counter % ~\c@linenumber~. Additionally, the switch % ~\ifoddNumberedPage~ is set true for odd numbered pages, % false otherwise. \def\setpagewiselinenumbers{% \let\theLineNumber\thePagewiseLineNumber \let\c@linenumber\c@pagewiselinenumber \let\makeLineNumber\makePagewiseLineNumber } \def\makePagewiseLineNumber{\logtheLineNumber\getLineNumber \ifoddNumberedPage \makeLineNumberOdd \else \makeLineNumberEven \fi } % Each numbered line gives a line to the aux file % \begin{verse} % ~\@LN{~~}{~~}~ % \end{verse} % very similar to the ~\newlabel~ business, except that we need % an arabic representation of the page number, not what % there might else be in ~\thepage~. \def\logtheLineNumber{\protected@write\@auxout{}{% %% % (New v4.00) (UL) % As Daniel Doherty observed, the earlier line % \begin{old}\begin{verbatim} % % \string\@LN{\the\c@linenumber}{\noexpand\the\c@page}}} % \end{verbatim} % \end{old} % here may lead into an infinite loop when the user resets % the page number (think of ~\pagenumbering~, e.g.). % Stephan and I brief\/ly discussed the matter and decided % to introduce a ``physical''-page counter to which % ~\logtheLineNumber~ refers. It was Stephan's idea to use % ~\cl@page~ for reliably augmenting the ``physical''-page % counter. However, this relies on the output routine once % doing ~\stepcounter{page}~. Before Stephan's % suggestion, I had thought of appending the stepping to % \LaTeX's ~\@outputpage~.---So the macro definition ends % as follows. %% \string\@LN{\the\c@linenumber}{% %% % (New v4.2) %% \noexpand\number\n@LN@truepage}}} %% % The `truepage' counter must start with ~\c@~ so it works % with ~\include~, and the ~\@addtoreset~ below is needed % for the same purpose. %% \noexpand\the\c@LN@truepage}}} %% \newcount\n@LN@truepage %% \g@addto@macro\cl@page{\global\advance\n@LN@truepage\@ne} \newcount\c@LN@truepage \g@addto@macro\cl@page{\global\advance\c@LN@truepage\@ne} \@addtoreset{LN@truepage}{@ckpt} % (/New v4.2) I had thought of offering more % features of a \LaTeX\ counter. However, the user should % better \emph{not} have access to this counter. ~\c@page~ % should suffice as a pagewise master counter.---To be sure, % along the present lines the user \emph{can} manipulate % ~\c@LN@truepage~ by ~\stepcounter{page}~. E.g., she might % do this in order to manually insert a photograph. Well, % seems not to harm. % % The above usage of ~\g@addto@macro~ and ~\cl@page~ may be % not as stable as Stephan intended. His proposal used % ~\xdef~ directly. But he used ~\cl@page~ as well, and who % knows \dots{} And as to ~\g@addto@macro~, I have introduced % it for list macros anyway. % (/UL) (/New v4.00) % % From the aux-file we get one macro ~\LN@P~ for each % page with line numbers on it. This macro calls four other % macros with one argument each. These macros are % dynamically defined to do tests and actions, to find out % on which page the current line number is located. % % We need sort of a pointer to the first page with line % numbers, initiallized to point to nothing: \def\LastNumberedPage{first} \def\LN@Pfirst{\nextLN\relax} % The four dynamic macros are initiallized to reproduce % themselves in an ~\xdef~ \let\lastLN\relax % compare to last line on this page \let\firstLN\relax % compare to first line on this page \let\pageLN\relax % get the page number, compute the linenumber \let\nextLN\relax % move to the next page % During the end-document run through the aux-files, we % disable ~\@LN~. I may put in a check here later, to give % a rerun recommendation. \AtEndDocument{\let\@LN\@gobbletwo} % Now, this is the tricky part. First of all, the whole % definition of ~\@LN~ is grouped, to avoid accumulation % on the save stack. Somehow ~\csname~~\endcsname~ pushes % an entry, which stays after an ~\xdef~ to that . % % If ~\LN@P~ is undefined, initialize it with the % current page and line number, with the % \emph{pointer-to-the-next-page} pointing to nothing. And % the macro for the previous page will be redefined to point % to the current one. % % If the macro for the current page already exists, just % redefine the \emph{last-line-number} entry. % % Finally, save the current page number, to get the pointer to the % following page later. \def\@LN#1#2{{\expandafter\@@LN \csname LN@P#2C\@LN@column\expandafter\endcsname \csname LN@PO#2\endcsname {#1}{#2}}} \def\@@LN#1#2#3#4{\ifx#1\relax \ifx#2\relax\gdef#2{#3}\fi \expandafter\@@@LN\csname LN@P\LastNumberedPage\endcsname#1% \xdef#1{\lastLN{#3}\firstLN{#3}% \pageLN{#4}{\@LN@column}{#2}\nextLN\relax}% \else \def\lastLN##1{\noexpand\lastLN{#3}}% \xdef#1{#1}% \fi \xdef\LastNumberedPage{#4C\@LN@column}} % The previous page macro gets its pointer to the % current one, replacing the ~\relax~ with the cs-token % ~\LN@P~. \def\@@@LN#1#2{{\def\nextLN##1{\noexpand\nextLN\noexpand#2}% \xdef#1{#1}}} % Now, to print a line number, we need to find the page, % where it resides. This will most probably be the page where % the last one came from, or maybe the next page. However, it can % be a completely different one. We maintain a cache, % which is ~\let~ to the last page's macro. But for now % it is initialized to expand ~\LN@first~, where the poiner % to the first numbered page has been stored in. \def\NumberedPageCache{\LN@Pfirst} % To find out on which page the current ~\c@linenumber~ is, % we define the four dynamic macros to do something usefull % and execute the current cache macro. ~\lastLN~ is run % first, testing if the line number in question may be on a % later page. If so, disable ~\firstLN~, and go on to the % next page via ~\nextLN~. \def\testLastNumberedPage#1{\ifnum#1<\c@linenumber \let\firstLN\@gobble \fi} % Else, if ~\firstLN~ finds out that we need an earlier % page, we start over from the beginning. Else, ~\nextLN~ % will be disabled, and ~\pageLN~ will run % ~\gotNumberedPage~ with four arguments: the first line % number on this column, the page number, the column % number, and the first line on the page. \def\testFirstNumberedPage#1{\ifnum#1>\c@linenumber \def\nextLN##1{\testNextNumberedPage\LN@Pfirst}% \else \let\nextLN\@gobble \def\pageLN{\gotNumberedPage{#1}}% \fi} % We start with ~\pageLN~ disabled and ~\nextLN~ defined to % continue the search with the next page. \long\def \@gobblethree #1#2#3{} \def\testNumberedPage{% \let\lastLN\testLastNumberedPage \let\firstLN\testFirstNumberedPage \let\pageLN\@gobblethree \let\nextLN\testNextNumberedPage \NumberedPageCache } % When we switch to another page, we first have to make % sure that it is there. If we are done with the last % page, we probably need to run \TeX\ again, but for the % rest of this run, the cache macro will just return four % zeros. This saves a lot of time, for example if you have % half of an aux-file from an aborted run, in the next run % the whole page-list would be searched in vain again and % again for the second half of the document. % % If there is another page, we iterate the search. \def\testNextNumberedPage#1{\ifx#1\relax \global\def\NumberedPageCache{\gotNumberedPage0000}% \PackageWarningNoLine{lineno}% {Linenumber reference failed, \MessageBreak rerun to get it right}% \else \global\let\NumberedPageCache#1% \fi \testNumberedPage } % \linelabel{demo2} % \marginpar{\tiny\raggedright % Let's see if it finds the label % on page \pageref{demo}, % line \ref{demo}, and back here % on page \pageref{demo2}, line % \ref{demo2}. % \unskip}% % To separate the official hooks from the internals there is % this equivalence, to hook in later for whatever purpose: \let\getLineNumber\testNumberedPage % So, now we got the page where the number is on. We % establish if we are on an odd or even page, and calculate % the final line number to be printed. \newif\ifoddNumberedPage \newif\ifcolumnwiselinenumbers \columnwiselinenumbersfalse \def\gotNumberedPage#1#2#3#4{\oddNumberedPagefalse \ifodd \if@twocolumn #3\else #2\fi\relax\oddNumberedPagetrue\fi \advance\c@linenumber\@ne \ifcolumnwiselinenumbers \subtractlinenumberoffset{#1}% \else \subtractlinenumberoffset{#4}% \fi } % You might want to run the pagewise mode with running line % numbers, or you might not. It's your choice: \def\runningpagewiselinenumbers{% \let\subtractlinenumberoffset\@gobble } \def\realpagewiselinenumbers{% \def\subtractlinenumberoffset##1{\advance\c@linenumber-##1\relax}% } \realpagewiselinenumbers % For line number references, we need a protected call to % the whole procedure, with the requested line number stored % in the ~\c@linenumber~ counter. This is what gets printed % to the aux-file to make a label: \def\thePagewiseLineNumber{\protect \getpagewiselinenumber{\the\c@linenumber}}% % And here is what happens when the label is refered to: \def\getpagewiselinenumber#1{{% \c@linenumber #1\relax\testNumberedPage \thelinenumber }} % % % A summary of all per line expenses: % \begin{description}\item % [CPU:] The ~\output~ routine is called for each line, % and the page-search is done. % \item % [DISK:] One line of output to the aux-file for each % numbered line % \item % [MEM:] One macro per page. Great improvement over v1.02, % which had one control sequence per line in % addition. It blew the hash table after some five % thousand lines. % \end{description} % % \subsection{% % Twocolumn mode (New v3.06) % \unskip} % % Twocolumn mode requires another patch to the ~\output~ % routine, in order to print a column tag to the .aux % file. \AtBeginDocument{% v4.2, revtex4.cls (e.g.). % <- TODO v4.4+: Or better in \LineNoLaTeXOutput!? \let\@LN@orig@makecol\@makecol} \def\@LN@makecol{% \@LN@orig@makecol \setbox\@outputbox \vbox{% \boxmaxdepth \@maxdepth \protected@write\@auxout{}{% \string\@LN@col{\if@firstcolumn1\else2\fi}% }% \box\@outputbox }% \vbox } %% TODO cf. revtexln.sty. \def\@LN@col{\def\@LN@column} % v4.22, removed #1. \@LN@col{1} % % \subsection{% % Numbering modulo $m$, starting at $f$ %% Numbering modulo 5 % \unskip}\label{ss:Mod} % % Most users want to have only one in five lines numbered. % ~\LineNumber~ is supposed to produce the outfit of the % line number attached to the line, while ~\thelinenumber~ % is used also for references, which should appear even if % they are not multiples of five. % % (New v4.00) Moreover, some users want to % control which line number should be printed first. Support % of this is now introduced here---see ~\firstlinenumber~ % below.---~numline.sty~ by Michael Jaegermann and % James Fortune offers controlling which \emph{final} % line numbers should not be printed. What is % it good for? We ignore this here until some user demands % it.---Peter Wilson's ~ledmac.sty~ offers much different % choices of line numbers to be printed, due to Wayne Sullivan. % (/New v4.00) % % (New v4.22) ~\c@linenumbermodulo~ is rendered a % fake counter, as discussed since v4.00. So it can % no longer be set by ~\setcounter~. ~\modulolinenumbers~ % serves this purpose. Well, does anybody want to do % what worked with ~\addtocounter~? (Then please tell % me.)---At least, ~\value~ still works. For the same % purpose I rename the fake `firstlinenumber' counter % ~\n@...~ to ~\c@...~. (/New v4.22) % \begin{old}\begin{verbatim} % % \newcount\c@linenumbermodulo % removed for v4.22 % \end{verbatim} % \end{old} % %% Removed for v4.22: %% (UL) On my question why, e.g., %% ~\chardef~ would not have sufficed, Stephan couldn't remember %% exactly; guessed that he wanted to offer \LaTeX\ counter %% facilities. However, the typical ones don't come this way. %% So I'm quite sure that I will change this next time. %% %% However, I observed at least two times that users gave a very %% high value to ~\c@linenumbermodulo~ in order to suppress %% printing of the line number. One of these users preferred an %% own way of handling line numbers, just wanted to use %% ~\linelabel~ and ~ednotes.sty~ features. Should we support this? %% I rather would like to advise them to %% ~\let\makeLineNumber\relax~. (/UL) % % (New v4.00) \par % ~\themodulolinenumber~ waits for being declared % ~\LineNumber~ by ~\modulolinenumbers~. (This has % been so before, no change.) Here is how it % looked before: % \begin{old}\begin{verbatim} % % \def\themodulolinenumber{{\@tempcnta\c@linenumber % % \divide\@tempcnta\c@linenumbermodulo % % \multiply\@tempcnta\c@linenumbermodulo % % \ifnum\@tempcnta=\c@linenumber\thelinenumber\fi % % }} % \end{verbatim} % \end{old} % (UL) This was somewhat slow. This arithmetic % happens at every line. This time I tend to declare an extra %% TODO v4.4+ % line counter (as opposed to my usual recommendations to use % counters as rarely as possible) which is stepped every line. % It could be incremented in the same way as ~\c@LN@truepage~ % is incremented via ~\cl@page~! This is another point in favour % of ~{linenumber}~ being a \LaTeX\ counter! % When this new counter equals ~\c@linenumbermodulo~, it is reset, % and ~\thelinenumber~ is executed.---It gets much slower by my % support of controlling the first line number below. I should % improve this.---On %% %% TODO v4.4+--pagewise!? % the other hand, time expense means very little nowadays, % while the number of \TeX\ counters still is limited. % % For the same purpose, moreover, attaching the line number % box could be intercepted earlier (in ~\MakeLineNo~), % without changing ~\LineNumber~. However, this may be % bad for the latter's announcement as a wizard interface % in section_\ref{s:UserCmds}. %% %% I wonder about Stephan's group. Its only effect is that %% ~\@tempcnta~ is restored after using it. What for is this? %% I tend to remove the group braces. %% TODO v4.4+ % (/UL) % % Here is the new code. It is very near to my ~lnopatch.sty~ % code which introduced the first line number feature % before.---I add starting with a ~\relax~ which is so often % recommended---without understanding this really. At least, % it will not harm.---Former group braces appear as % ~\begingroup~/~\endgroup~ here. \def\themodulolinenumber{\relax \ifnum\c@linenumber<\c@firstlinenumber \else \begingroup \@tempcnta\c@linenumber \advance\@tempcnta-\c@firstlinenumber \divide\@tempcnta\c@linenumbermodulo \multiply\@tempcnta\c@linenumbermodulo \advance\@tempcnta\c@firstlinenumber \ifnum\@tempcnta=\c@linenumber \thelinenumber \fi \endgroup \fi } % (/New v4.00) % % The user command to set the modulo counter: % (New v4.31) \dots\ a star variant is introduced to implement % Hillel Chayim Yisraeli's idea to print the first line number % after an interruption of the edited text by some editor's % text, regardless of the modulo. If it is 1, it is printed only % with ~\firstlinenumber{1}~. I.e., you use ~\modulolinenumbers*~ % for the new feature, without the star you get the simpler % behaviour that we have had so far. And you can switch back % from the refined behaviour to the simple one by using % ~\modulolinenumbers~ without the star.---This enhancement % is accompanied by a new package option ~modulo*~ which just % executes ~\modulolinenumbers*~ % (subsection_\ref{ss:v3opts}).---`With ~\firstlinenumber{1}~' % exactly means: `1' is printed if and only if the last % ~\firstlinenumber~ before or in the paragraph that follows % the ``interruption'' has argument `1' (or something % \emph{expanding} to `1', or (to) something that \TeX\ % ``reads'' as 1, e.g.: a \TeX\ count register storing % 1).---At present, this behaviour may be unsatisfactory with % pagewise line-numbering $\dots$ I'll make an experimental % extra package if someone complains \dots \newcommand\modulolinenumbers{% \@ifstar {\def\@LN@maybe@moduloresume{% \global\let\@LN@maybe@normalLineNumber \@LN@normalLineNumber}% \@LN@modulolinenos}% {\let\@LN@maybe@moduloresume\relax \@LN@modulolinenos}% } \global\let\@LN@maybe@normalLineNumber\relax \let\@LN@maybe@moduloresume\relax \gdef\@LN@normalLineNumber{% \ifnum\c@linenumber=\c@firstlinenumber \else \ifnum\c@linenumber>\@ne \def\LineNumber{\thelinenumber}% \fi \fi %% % ~\def~ instead of ~\let~ enables taking account of a % redefinition of ~\thelinenumber~ in a present numbering % environment (e.g.). %% \global\let\@LN@maybe@normalLineNumber\relax} % Instead of changing ~\LineNumber~ directly by % ~LN@moduloresume~, these tricks enable ~\modulolinenumbers*~ % to act as locally as I can make it. I don't know how to % avoid that the output routine switches back to the normal % modulo behaviour by a global change. (An ~\aftergroup~ may % fail in admittedly improbable cases.) \newcommand*\@LN@modulolinenos[1][\z@]{% %% % The definition of this macro is that of the former % ~\modulolinenumbers~. (/New v4.31) %% \let\LineNumber\themodulolinenumber \ifnum#1>\@ne \chardef % v4.22, note below \c@linenumbermodulo#1\relax \else\ifnum#1=\@ne % \begin{old}\begin{verbatim} % % \def\LineNumber{\thelinenumber}% % \end{verbatim} % \end{old} % (New v4.00) I am putting something here to enable % ~\firstlinenumber~ with $~\c@linenumbermodulo~=1$. % With ~lnopatch.sty~, a trick was offered for this purpose. % It is now obsolete. % \def\LineNumber{\@LN@ifgreat\thelinenumber}% %% % (/New v4.00) %% \fi\fi } % (New v4.00) The default of ~\@LN@ifgreat~ is \let\@LN@ifgreat\relax % The previous changes as soon as ~\firstlinenumber~ is used: \newcommand*\firstlinenumber[1]{% \chardef\c@firstlinenumber#1\relax %% % No counter, little values allowed only---OK?---(UL) % The change is local---OK? The good thing is that % ~\global\firstlinenumber{~~}~ works. Moreover, % ~\modulolinenumbers~ acts locally as well. (/UL) % % (New v4.31) %% \let\@LN@ifgreat\@LN@ifgreat@critical} \def\@LN@ifgreat@critical{% \ifnum\c@linenumber<\c@firstlinenumber \expandafter \@gobble \fi}% % (/New v4.31) % % The default % value of ~\c@firstlinenumber~ %% v4.31 % is 0. This is best for what one would expect from modulo % printing. \let\c@firstlinenumber=\z@ % % For usage and effects of ~\modulolinenumbers~ and %% v4.31 % ~\firstlinenumbers~, please consult section_\ref{s:UserCmds}. % Two details on ~\firstlinenumbers~ here: % (i)_~\firstlinenumber~ acts on a paragraph if and only if % (a)_the paragraph is broken into lines ``in line-numbering % mode'' (after ~\linenumbers~, e.g.); % (b)_it is the last occurrence of a ~\firstlinenumbers~ % before or in the paragraph. % (The practical applications of this that I can imagine % don't seem appealing to me.) % Cf._the explanation above of how ~\modulolinenumbers~ and % ~\firstlinenumbers~ interact---for this and for (ii), % which is concerned with possible arguments for % ~\firstlinenumbers~. % % Note that the line numbers of the present section % demonstrate the two devices. (/New v4.00) %%\setcounter{linenumbermodulo}{5} \chardef\c@linenumbermodulo=5 % v4.2; ugly? \modulolinenumbers[1] % (New v4.22) The new implementation through ~\chardef~ % decreases the functionality and raises certain compatibility % problems. I face this without fear. The maximum modulo value % is now ~255~. I expect that this suffices for usual applications. % However, some users have ``abused'' ~lineno.sty~ to get % ~ednotes.sty~ features without line numbers, so have set the % modulo to a value beyond the total number of lines in their % edition. This ought to be replaced by % ~\let\makeLineNumber\relax~. (/New v4.22) % % \section{% % Package options % \unskip}\label{s:Opts} % % (New v4.1) % The last heading formerly was the heading of what is now % subsection_\ref{ss:v3opts}. The options declared there were % said to execute user commands only. This was wrong already % concerning ~displaymath~ and ~hyperref~. At least, however, % these options were no or almost no occasion to skip definitions % or allocations. This is different with the options that we now % insert. % %% (New v4.2) v4.00 moved the ``options'' below the %% ``extensions''. This was bad with ~\do@mlineno~ in %% subsection_\ref{ss:v3opts} which is to control %% subsection_\ref{ss:display}---undone here. (/New v4.2) % % \subsection{% % Extended referencing to line numbers. (v4.2) % \unskip} % This subsection explains and declares package option ~addpageno~. %% v4.31 % % If a line to whose number you refer by ~\ref~ is not on the % present page, it may be useful to add the number of the page % on which the line occurs---and perhaps it should not be added % otherwise. In general, you could use the Standard \LaTeX\ % package varioref for this. However, the latter usually % produces verbose output like `on the preceding page'--- % unless costumized---, while in critical editions, e.g., one % may prefer just adding the page number and some mark on the % left of the line number, irrespectively of how far the page is % apart etc. To support this, package option ~addpageno~ % provides a command ~\vpagelineref~ to be used in place of % ~\ref~. This produces, e.g., `34.15' when referring to line_15 % on page_34 while the present page is not 34. You can customize % the outcome, see the package file ~vplref.sty~ where the code % and further details are. You may conceive of % ~\vpagelineref~ as a certain customization of varioref's % ~\vref~. % % This implies that option ~addpageno~ requires the files % ~vplref.sty~ and ~varioref.sty~. ~addpageno~ automatically % loads both of them. Yet you can also load ~varioref.sty~ % on your own to use its package options. % % Of course, you might better introduce a shorter command name % for ~\vpagelineref~ for your work, while we cannot predict % here what shorthand will fit your work. E.g., % ~\newcommand{\lref}{\vpagelineref}~. % % If you really want to add the page number in \emph{any} case, % use, e.g., some ~\myref~ instead of ~\ref~, after % \[~newcommand*{\myref}{\pageref{#1}.\ref{#1}}~\] % or what you like. You don't need the ~addpageno~ option in % this case. % % ~addpageno~ is due to a suggestion by Sergei Mariev. \DeclareOption{addpageno}{% \AtEndOfPackage{\RequirePackage{vplref}[2005/04/25]}} % \subsection{% % \scs{linelabel} in math mode % \unskip}\label{ss:MathRef} % % We have made some first steps towards allowing ~\linelabel~ in % math mode. Because our code for this is presently experimental, % we leave it to the user to decide for the experiment by calling % option ~mathrefs~. We are in a hurry now and thus leave the % code, explanations, and discussion in the separate package % ~ednmath0.sty~. Maybe we later find the time to improve the % code and move the relevant content of ~ednmath0.sty~ to here. % The optimal situation would be to define ~\linelabel~ from % the start so it works in math mode, omitting the ~mathrefs~ % option. % % Actually, this package even provides adjustments for analogously % allowing ~ednotes.sty~ commands in math mode. Loading the package % is postponed to ~\AtBeginDocument~ when we know whether these % adjustments are needed. \DeclareOption{mathrefs}{\AtBeginDocument {\RequirePackage{ednmath0}[2004/08/20]}} % % \subsection{% % Arrays, tabular environments (Revised v4.11) % \unskip}\label{ss:Tab} % % This subsection explains and declares package options %% v4.31 % ~edtable~, ~longtable~, and ~nolongtablepatch~. % % The standard \LaTeX\ tabular environments come as single % boxes, so the ~lineno.sty~ versions before v4.00 treated them as % (parts of) single lines, printing (at most) one line number % beside each and stepping the line number counter once only. % Moreover, ~\linelabel~s got lost. Of course, tables are % usually so high that you will want to treat each row like a % line. (Christian Tapp even desires that the lines of table % entries belonging to a single row are treated like ordinary % lines.) Footnotes get lost in such environments as well, which % was bad for ~ednotes.sty~. % % We provide adjustments to count lines, print their numbers % etc.\ as desired at least for \emph{some} \LaTeX\ tabular % environments. (Like with other details, ``some'' is to some % extent explained in ~edtable.sty~.) We do this similarly as % with option ~mathrefs~ before. We leave code % and explanations in the separate package ~edtable.sty~. % (For wizards: this package provides adjustments for % ~ednotes.sty~ as well. However, in the present case we don't try % to avoid them unless ~ednotes.sty~ is loaded.) % Package option ~edtable~ % defines---by loading ~edtable.sty~---an environment ~{edtable}~ % which is able to change some \LaTeX\ tabular environments % with the desired effects. (v4.11: ~edtable.sty~ v1.3 counts % \LaTeX's ~{array}~ [etc.\@] as a ``tabular environment'' as % well.) % % The ~{edtable}~ environment doesn't help with ~longtable.sty~, % however. To make up for this, ~{longtable}~ is adjusted in a % different way---and this happens only when another ~lineno.sty~ % option ~longtable~ is called. In this case, option ~edtable~ % needn't be called explicitly: option ~longtable~ works as if % ~edtable~ had been called. % % Now, we are convinced that vertical spacing around % ~{longtable}~ works wrongly---see \LaTeX\ bugs database % tools/3180 and 3485, or see explanations in the package % ~ltabptch.sty~ (which is to be obtained from CTAN folder % \path{macros/latex/ltabptch}). Our conviction is so strong % that the ~longtable~ option loads---after ~longtable.sty~---the % patch package ~ltabptch.sty~. If the user doesn't want this % (maybe preferring her own arrangement with the vertical % spacing), she can forbid it by calling ~nolongtablepatch~. % % The following code just collects some choices, which are % then executed in section_\ref{ss:ExOpt}. We use an ~\if...~ % without ~\newif~ since ~\if...true~ and ~\if...false~ % would occur at most two times and only within the present % package. (~\AtEndOfClass{\RequirePackage{edtable}}~ % could be used instead, I just overlooked this. Now I don't % change it because it allows to change the version requirement % at one place only.) \let\if@LN@edtable\iffalse \DeclareOption{edtable}{\let\if@LN@edtable\iftrue} \DeclareOption{longtable}{\let\if@LN@edtable\iftrue \PassOptionsToPackage{longtable}{edtable}} \DeclareOption{nolongtablepatch}{% \PassOptionsToPackage{nolongtablepatch}{edtable}} % (/New v4.1) % % \subsection{% % Switch among settings % \unskip}\label{ss:v3opts} % % There is a bunch of package options that execute %% v4.2 %% There is a bunch of package options, all of them executing %% executing only user commands (see below). %% Cf. start of section. % user commands only. % % Options ~left~ (~right~) put the line numbers on the left % (right) margin. This works in all modes. ~left~ is the % default. \DeclareOption{left}{\leftlinenumbers*} \DeclareOption{right}{\rightlinenumbers*} % Option ~switch~ (~switch*~) puts the line numbers on the % outer (inner) margin of the text. This requires running % the pagewise mode, but we turn off the page offset % subtraction, getting sort of running numbers again. The % ~pagewise~ option may restore true pagewise mode later. \DeclareOption{switch}{\setpagewiselinenumbers \switchlinenumbers \runningpagewiselinenumbers} \DeclareOption{switch*}{\setpagewiselinenumbers \switchlinenumbers*% \runningpagewiselinenumbers} % In twocolumn mode, we can switch the line numbers to % the outer margin, and/or start with number 1 in each % column. Margin switching is covered by the ~switch~ % options. \DeclareOption{columnwise}{\setpagewiselinenumbers \columnwiselinenumberstrue \realpagewiselinenumbers} % The options ~pagewise~ and ~running~ select the major % linenumber mechanism. ~running~ line numbers refer to a real % counter value, which can be reset for any paragraph, % even getting multiple paragraphs on one page starting % with line number one. ~pagewise~ line numbers get a % unique hidden number within the document, but with the % opportunity to establish the page on which they finally % come to rest. This allows the subtraction of the page % offset, getting the numbers starting with 1 on top of each % page, and margin switching in twoside formats becomes % possible. The default mode is ~running~. % % The order of declaration of the options is important here % ~pagewise~ must come after ~switch~, to overide running % pagewise mode. ~running~ comes last, to reset the running % line number mode, e.g, after selecting margin switch mode % for ~pagewise~ running. Once more, if you specify all % three of the options ~[switch,pagewise,running]~, the % result is almost nothing, but if you later say % ~\pagewiselinenumbers~, you get margin switching, with % real pagewise line numbers. % \DeclareOption{pagewise}{\setpagewiselinenumbers \realpagewiselinenumbers} \DeclareOption{running}{\setrunninglinenumbers} % The option ~modulo~ causes only those linenumbers to be % printed which are multiples of five. \DeclareOption{modulo}{\modulolinenumbers\relax} % Option ~modulo*~ modifies ~modulo~ in working like % ~\modulolinenumbers*~---see section_\ref{s:UserCmds}. \DeclareOption{modulo*}{\modulolinenumbers*\relax} % The package option ~mathlines~ switches the behavior of % the ~{linenomath}~ environment with its star-form. % Without this option, the ~{linenomath}~ environment does % not number the lines of the display, while the star-form % does. With this option, its just the opposite. % %%% 1999-06-10: renamed ~displaymath~ to ~mathlines~. \DeclareOption{mathlines}{\linenumberdisplaymath} % ~displaymath~ now calls for wrappers of the standard % \LaTeX\ display math environment. This was previously % done by ~mlineno.sty~. % % (New v4.3) Option `displaymath' becomes default according % to Erik \mbox{Luijten}'s suggestion. I was finally convinced % of this as soon as I discovered how to avoid a spurious line % number above ~\begin{linenomath}~ (subsection_\ref{ss:DM}). % ~\endlinenomath~ provides ~\ignorespaces~, so what could go % wrong now? \DeclareOption{displaymath}{\PackageWarningNoLine{lineno}{% Option [displaymath] is obsolete -- default now!}} %% %%\let\do@mlineno\relax %%\DeclareOption{displaymath}{\let\do@mlineno\@empty} % (/New v4.3) % % \subsection{% % Compatibility with \texttt{hyperref} %% own subsec. v4.3. % \unskip} % The ~hyperref~ package, via ~nameref~, requires three more % groups in the second argment of a ~\newlabel~. Well, why % shouldn't it get them? (New v3.07) The presence of the % ~nameref~ package is now detected automatically % ~\AtBeginDocument~. (/New v3.07) (Fixed in v3.09) We try % to be smart, and test ~\AtBeginDocument~ if the ~nameref~ % package is loaded, but ~hyperref~ postpones the loading of % ~nameref~ too, so this is all in vain. % % (New v4.3) But we can also test at the first ~\linelabel~. % Regarding the error-message for misplaced ~\linelabel~ from v4.11: % previously, ~\linenumbers~ rendered ~\linelabel~ the genuine % version of ~\linelabel~ from the start on. This doesn't work % now, since ~\@LN@linelabel~ may change its meaning after the % first ~\linenumbers~ and before a next one (if there is some). % (/New v4.3) \DeclareOption{hyperref}{\PackageWarningNoLine{lineno}{% Option [hyperref] is obsolete. \MessageBreak The hyperref package is detected automatically.}} \AtBeginDocument{% \@ifpackageloaded{nameref}{% %% % (New v4.3) ``Global'' is merely ``symbolic'' ~\AtBeginDoc...~. % If ~nameref~ is not detected here, the next ~\@LN@linelabel~ % will do almost the same, then globally indeed. %% \gdef\@LN@ExtraLabelItems{{}{}{}}% }{% \global\let\@LN@@linelabel\@LN@linelabel \gdef\@LN@linelabel{% %% % ~\@ifpackageloaded~ is ``preamble only'', its---very % internal---preamble definition is replicated here: %% \expandafter \ifx\csname ver@nameref.sty\endcsname\relax \else \gdef\@LN@ExtraLabelItems{{}{}{}}% \fi %% % Now aim at the ``usual'' behaviour: %% \global\let\@LN@linelabel\@LN@@linelabel \global\let\@LN@@linelabel\relax \@LN@linelabel }% }% } % (/New v4.3) % % (New v4.1) % \subsection{% % A note on calling so many options % \unskip} % % The number of package options may stimulate worrying about how to % \emph{enter} all the options that one would like to use---they may % not fit into one line. Fortunately, you can safely break code lines % after the commas separating the option names in the ~\usepackage~ % command (no comment marks needed). % % \subsection{% % Execute options % \unskip}\label{ss:ExOpt} % % We stop declaring options and execute the ones that are % called by the user. (/New v4.1) \ProcessOptions % (New v4.1) Now we know whether ~edtable.sty~ is wanted % and (if it is) with which options it is to be called. \if@LN@edtable \RequirePackage{edtable}[2005/03/07] \fi % (/New v4.1) % % \section{% % Former package extensions % \label{s:Xt}\unskip} % % The extensions in this section were previously supplied % in separate ~.sty~ files. % % \subsection{% % $display math$ % \unskip}\label{ss:display} %% (New v4.32) % (New v4.3) From now on, you no longer need to type % the ~{linenomath}~ environment with the ~\[~, ~{equation}~, % and ~{eqnarray}~ environments---and you no longer need to % use the former package option ~displaymath~ for this feature. % (/New v4.3) %% (/New v4.32) % % The standard \LaTeX\ display math environments are % wrapped in a ~{linenomath}~ environment. % % (New 3.05) The ~[fleqn]~ option of the standard % \LaTeX\ classes defines the display math % environments such that line numbers appear just % fine. Thus, we need not do any tricks when % ~[fleqn]~ is loaded, as indicated by presents of % the ~\mathindent~ register. (/New 3.05) % % (New 3.05a) for ~{eqnarray}~s we rather keep the % old trick. (/New 3.05a) % % (New 3.08) Wrap ~\[~ and ~\]~ into ~{linenomath}~, % instead of ~{displaymath}~. Also save the definition % of ~\equation~, instead of replicating the current % \LaTeX\ definition. (/New 3.08) %%\ifx\do@mlineno\@empty \@ifundefined{mathindent}{ %% \AtBeginDocument{% \let\LN@displaymath\[% \let\LN@enddisplaymath\]% \renewcommand\[{\begin{linenomath}\LN@displaymath}% \renewcommand\]{\LN@enddisplaymath\end{linenomath}}% % \let\LN@equation\equation \let\LN@endequation\endequation \renewenvironment{equation}% {\linenomath\LN@equation}% {\LN@endequation\endlinenomath}% %% } }{}% \@ifundefined{mathindent} -- 3rd arg v4.2, was \par! %%\AtBeginDocument{% \let\LN@eqnarray\eqnarray \let\LN@endeqnarray\endeqnarray \renewenvironment{eqnarray}% {\linenomath\LN@eqnarray}% {\LN@endeqnarray\endlinenomath}% %%} %%\fi % (UL) Indeed. The \LaTeX\ macros are saved for % unnumbered mode, which is detected by ~\linenomath~. % (/UL) % % \subsection{% % Line numbers in internal vertical mode % \unskip}\label{ss:ILN} % % The command ~\internallinenumbers~ adds line numbers in % internal vertical mode, but with limitations: we assume % fixed baseline skip. % % (v4.22) v3.10 provided a global (~\global\advance~) % as well as a local version (star-form, using % ~\c@internallinenumber~). ~\resetlinenumbers~ acted % locally and was here used with the global version---save % stack danger, \TeX book p._301---in v4.00 I % disabled the global version therefore. Now I find that % it is better to keep a global version, and the now global % ~\resetlinenumbers~ is perfect for this. The global version % allows continuing the ``internal'' numbers in the ensuing % ``external'' text, and---unless reset by brackets % argument---continuing the above series of line numbers. % As with v3.10, the local version always starts with % line number one. A new ~\@LN@iglobal~ steps ~\global~ly % in the global version, otherwise it is ~\relax~. % (I also remove all my stupid discussions as of v4.00. % And I use ~\newcommand~.) (v4.22) \let\@LN@iglobal\global % v4.22 \newcommand\internallinenumbers{\setrunninglinenumbers \let\@@par\internallinenumberpar \ifx\@par\@@@par\let\@par\internallinenumberpar\fi \ifx\par\@@@par\let\par\internallinenumberpar\fi \ifx\@par\linenumberpar\let\@par\internallinenumberpar\fi \ifx\par\linenumberpar\let\par\internallinenumberpar\fi \@ifnextchar[{\resetlinenumber}%] {\@ifstar{\let\c@linenumber\c@internallinenumber \let\@LN@iglobal\relax % v4.22 \c@linenumber\@ne}{}}% } \let\endinternallinenumbers\endlinenumbers \@namedef{internallinenumbers*}{\internallinenumbers*} \expandafter\let\csname endinternallinenumbers*\endcsname\endlinenumbers \newcount\c@internallinenumber \newcount\c@internallinenumbers \newcommand\internallinenumberpar{% \ifvmode\@@@par\else\ifinner\@@@par\else\@@@par \begingroup \c@internallinenumbers\prevgraf \setbox\@tempboxa\hbox{\vbox{\makeinternalLinenumbers}}% \dp\@tempboxa\prevdepth \ht\@tempboxa\z@ \nobreak\vskip-\prevdepth \nointerlineskip\box\@tempboxa \endgroup \fi\fi } \newcommand\makeinternalLinenumbers{% \ifnum\c@internallinenumbers>\z@ % v4.2 \hb@xt@\z@{\makeLineNumber}% \@LN@iglobal % v4.22 \advance\c@linenumber\@ne \advance\c@internallinenumbers\m@ne \expandafter\makeinternalLinenumbers\fi } % TODO v4.4+: star: line numbers right!? cf. lnocapt.sty % % \subsection{% % Line number references with offset % \unskip} % % This extension defines macros to refer to line % numbers with an offset, e.g., to refer to a line % which cannot be labeled directly (display math). % This was formerly knows as ~rlineno.sty~. % % To refer to a pagewise line number with offset: % \begin{quote} % ~\linerefp[~~]{~