ordinal/0000755000176200001440000000000013575527472011723 5ustar liggesusersordinal/NAMESPACE0000644000176200001440000001033713332050747013131 0ustar liggesusersuseDynLib("ordinal", .registration = TRUE) importFrom(graphics, plot, par, abline, lines, points, contour) importFrom(grDevices, dev.interactive, devAskNewPage) importFrom(utils, "combn", "packageDescription", "as.roman") importFrom(ucminf, ucminf) importFrom(numDeriv, grad, hessian) importFrom("stats", ".checkMFClasses", ".getXlevels", "AIC", "add.scope", "approx", "as.formula", "binomial", "coef", "confint", "dcauchy", "dlogis", "dnorm", "drop.scope", "drop.terms", "extractAIC", "fitted", "formula", "glm.fit", "is.empty.model", "logLik", "model.frame", "model.matrix", "model.offset", "model.response", "model.weights", "na.pass", "napredict", "naprint", "nlminb", "optim", "pcauchy", "pchisq", "pgamma", "plogis", "pnorm", "printCoefmat", "profile", "qchisq", "qlogis", "qnorm", "runif", "setNames", "spline", "terms", "update.formula", "vcov", "nobs", "delete.response", "lm.fit", "resid") ## importFrom(stats, ## nobs) import(methods) ## import(stats) ## importFrom(methods, ## as, ## checkAtAssignment, ## loadMethod) import(Matrix) ## importFrom(nlme, ## ranef, ## VarCorr) ## importFrom(numDeriv, ## hessian, ## grad) importFrom(MASS, ginv, addterm, dropterm) ## importFrom(stats, ## coef, ## confint, ## nobs, ## logLik, ## profile, ## vcov, ## extractAIC, ## anova, ## fitted## , ## ## terms ## ## update ## ) # Functions: export(clm) export(clm.fit) export(clmm) export(clm.control) export(clmm.control) export(slice) export(convergence) export(drop.coef) export(nominal_test) export(scale_test) ## NOTE: Also exporting ranef.clmm and VarCorr.clmm to make sure the ## clmm-methods work after loading nlme (or lme4): export(ranef, ranef.clmm, condVar, VarCorr, VarCorr.clmm) ## export(ranef, ranef.clmm, condVar, condVar.clmm, VarCorr, VarCorr.clmm) export(gnorm, glogis, gcauchy, pgumbel, dgumbel, ggumbel, qgumbel, rgumbel, plgamma, dlgamma, glgamma ## , ## pAO, dAO, gAO, ) ## Methods: S3method(clm.fit, default) S3method(clm.fit, factor) S3method(print, clm) S3method(vcov, clm) S3method(summary, clm) S3method(print, summary.clm) S3method(convergence, clm) S3method(print, convergence.clm) S3method(slice, clm) S3method(plot, slice.clm) S3method(anova, clm) S3method(print, anova.clm) S3method(predict, clm) S3method(coef, clm) S3method(nobs, clm) S3method(coef, summary.clm) S3method(scale_test, clm) S3method(nominal_test, clm) S3method(profile, clm) S3method(confint, clm) S3method(confint, profile.clm) S3method(plot, profile.clm) S3method(logLik, clm) S3method(extractAIC, clm) S3method(model.matrix, clm) S3method(model.frame, clm) S3method(terms, clm) S3method(print, clmm) S3method(vcov, clmm) S3method(summary, clmm) S3method(print, summary.clmm) S3method(logLik, clmm) S3method(extractAIC, clmm) S3method(anova, clmm) S3method(nobs, clmm) ## S3method(profile, clmm) ## S3method(confint, profile.clmm) ## S3method(plot, profile.clmm) ## S3method(update, clmm) ## S3method(fixef, clmm) S3method(ranef, clmm) S3method(condVar, clmm) S3method(VarCorr, clmm) ################################################################## ### clm2 stuff: ## Functions: export(clm2) export(clmm2) export(clm2.control) export(clmm2.control) ## Methods: S3method(print, clm2) S3method(vcov, clm2) S3method(summary, clm2) S3method(print, summary.clm2) S3method(anova, clm2) S3method(predict, clm2) S3method(profile, clm2) S3method(confint, clm2) S3method(confint, profile.clm2) S3method(plot, profile.clm2) S3method(logLik, clm2) S3method(extractAIC, clm2) S3method(update, clm2) S3method(dropterm, clm2) S3method(addterm, clm2) S3method(print, clmm2) S3method(vcov, clmm2) S3method(summary, clmm2) S3method(print, summary.clmm2) S3method(anova, clmm2) S3method(profile, clmm2) S3method(confint, profile.clmm2) S3method(plot, profile.clmm2) S3method(update, clmm2) ordinal/LICENCE.note0000644000176200001440000000155313277531311013642 0ustar liggesusersCopyrights ========== All files are copyright (C) 2011 R. H. B. Christensen with all rights assigned to R. H. B. Christensen Licence ======= This is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 or 3 of the License (at your option). This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Files share/licenses/GPL-2 and share/licenses/GPL-3 in the R (source or binary) distribution are copies of versions 2 and 3 of the 'GNU General Public License'. These can also be viewed at http://www.r-project.org/licenses/ Rune.Haubo@gmail.com ordinal/data/0000755000176200001440000000000012174033664012621 5ustar liggesusersordinal/data/income.rda0000644000176200001440000000047412174033664014570 0ustar liggesusers }RJ0IgYC"ċJfo9͓]CЭśϤ/&mJv!&F}\R!Dedȩx;Yk ͜ TBk˴*6n8F Ӽ!qnC$o:jH\NOK_9?owF-`ȳ+ 외v2hjDcPcF ׈3) x|N 0K)#K!Oi5szƺ|AoQ`7(2YYmռ^;ordinal/data/soup.rda0000644000176200001440000002170612174033664014305 0ustar liggesusers w$Y񭧪dM&dzvYl|&VN2&s9gx#9_x_x9vwݣgԣ'N ݹKx~oڒ-yrؒf Womŧ&[[{"Dg3uf̓y3OqMySg̙7w-yKg[9μ3o9μ3;9μ3Ӝy7gݙp=y/gۙq}y?gߑp9ӝy379ܙgvsfߙ8s9<әqY|3vfg-gm|3sÝg ݙ ̝̋Hg>ʙ;g>ڙqc8g>ޙOp\vy3w;s=μܙW8Jg>əW9s3Wϙy3ՙOsӝ g>ә7:Y|3:y|3_:E|3_̗:e|3_W:U|3_:u|37:M|3̷:m|3w:]|3:}|3?:C3?̏:c3?O:S3?:s3/:K\{ڣ{y4lvcm7ڍv|qp!7u[Nmw[续vkee>ﶺwnnn];];];];];];];];ݞw=v{enHwv^w|{ݞ=u{y~w]]cc]]]]]]]]]]]]]]]]mq~so^o7;y_ڼ/mޗ6K}iy_ڼ/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/mtx?8q/|_K;ߗvЗvЗvЗvqqqqqqqqqW/////=u{^=u{^=u{^=u{^=u{^=uW`{N\~C7JT_&]zl篥=E}3ھWбt rWGo;{]O˓}I/oݼz2Esն]v.3*lclW[2FWmZ^ߪLWߢq?}ouvq[c'=~-x2|e߳ep9NҖ'2`,3/,:W-se]_mkjޓ]Ԯkk}A;jӮl<\/vkӢm/1akֶeo[Ss}5P=j͞Ku7tvNȤuQSoV[[zZl.|qǶG^vck=52ohi?}m^+\G ]kahL-[TrLWk_vSɢCYdom*PORjmgטvhW)v5ͧKuf[̶ϷRld^/T:)ǘ-',xLc^izoC}[[t8d+͞a>lvҸ/qǶ}}RtQ_f]s;E֯=ZM.e}r\svh˳3;/Ef_kc\^;gl9_snصj]˶_tj2^`Zֶ5CjY.8l(V<Ҧa\9믭=k.qQ>nCloVnӳ&]tsZ{۹ʎrma\[ߞCl][}uێ#نuڪ^ƆMwU/vE9e[479[Z96kq.幧+cvPKi|M2/C4uM._>盲}ڹ]3+瀡g<44o, <9Ժ*:qh/j][ ٷکnh9瓲j՚nFym`u]y݋ϫgr>zLZ ^2N1{ݿc]x9K.G2v/:6ugћ.eۿS˺>NKOzLM]}nܱ;1{~ﺏu?Ov9-S5TiY:紌z]mϚW}NG) w{Ϲ+>:~@o|}n} 箼K}:x[g[Z}:uoVuo2j11uu۩?y{Z=-Ǿ'j|{;{}ju/]q0Uqy}Xƞ[u/wkзcoݭV?C=k;:z7l}ƕ{/L5}܏}|^M6&l2e|[^t̓IuMUnr~cv[]gƮ7c`~]Ʈץ^ryq5\ o]۱»c~}|ScUScglc+j<λyhs[߱Kݱer^3c統ތcToxokTjkcƮTŻ˘k!{N5}m4\=>ɱǁ޹er`s>g>y3U޸UMi7vۍvc8n}Ա3{|~oeOzom:{=Vʺu?uﺷwZ׫~foSe}{}j=-YgSͺ&X]y׼o^pޣm}ѕG~Zұлc_]1=0/ M.cy_Pv>U/Sy6k çǞ8;cylSvtZ\s2ج7֫u?O|ry6= ^-=66>vSXyeݷSǯy4o۹agns{[|:(dM6dMFdM6dM6d'Rƾ+&l&lrr'鑯 tWӶsS3Wg?tww\xkfyOOE.~;/㮗q.-&]}zۅop}ͷ]l~[nyo7u_Gr܄N~S{=޼oMɛ[$O%oEޚ y[v;w$Dޙ yW4n{$Eޛy_~$3HxM6_|sEw87uk"LI><|(y6|<Rr|"y48}y9yy%$*r/B#?jɤM>D^C^K^G^O@>|*43g7"M>|.< /"_L|)2+W"_M|-:7o"L|+6;w"M|/>"?L~(1'O"?M~,9/_"L~*57ȟ#_"&-W_%u7$mw%M~=? #?!.=?'O /ɿ"!#oȿ%{$g_%L7 /!?ܶ>7_|{ͷ7_|{77_M^JH>^I__/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ sȝgR"########_-4f'=;(\灰Dx } ~ f_wzםn~;r8}u3 @.UߚB3yDž'crvƠ~,6b]Ź-PXDaŠ3U؏3Ii6P0Y/LKWƆy|; i̮(~>n6֎;.CwnWYu55ZV[nZXF_ipiI$3И f};P-VwM Li+gQqC 5I㗴(b_ Εy&>Yx<9 *Lk ordinal/man/0000755000176200001440000000000013575515131012463 5ustar liggesusersordinal/man/predict.Rd0000644000176200001440000001136013332050747014403 0ustar liggesusers\name{predict.clm} \alias{predict.clm} \title{Predict Method for CLM fits} \description{ Obtains predictions from a cumulative link model. } \usage{ \method{predict}{clm}(object, newdata, se.fit = FALSE, interval = FALSE, level = 0.95, type = c("prob", "class", "cum.prob", "linear.predictor"), na.action = na.pass, ...) } \arguments{ \item{object}{a fitted object of class inheriting from \code{clm}.} \item{newdata}{optionally, a data frame in which to look for variables with which to predict. Note that all predictor variables should be present having the same names as the variables used to fit the model. If the response variable is present in \code{newdata} predictions are obtained for the levels of the response as given by \code{newdata}. If the response variable is omitted from \code{newdata} predictions are obtained for all levels of the response variable for each of the rows of \code{newdata}. } \item{se.fit}{should standard errors of the predictions be provided? Not applicable and ignored when \code{type = "class"}. } \item{interval}{should confidence intervals for the predictions be provided? Not applicable and ignored when \code{type = "class"}. } \item{level}{the confidence level. } \item{type}{the type of predictions. \code{"prob"} gives probabilities, \code{"class"} gives predicted response class membership defined as highest probability prediction, \code{"cum.prob"} gives cumulative probabilities (see details) and \code{"linear.predictor"} gives predictions on the scale of the linear predictor including the boundary categories. } \item{na.action}{function determining what should be done with missing values in \code{newdata}. The default is to predict \code{NA}. } \item{\dots}{further arguments passed to or from other methods. } } \details{ If \code{newdata} is omitted and \code{type = "prob"} a vector of fitted probabilities are returned identical to the result from \code{fitted}. If \code{newdata} is supplied and the response variable is omitted, then predictions, standard errors and intervals are matrices rather than vectors with the same number of rows as \code{newdata} and with one column for each response class. If \code{type = "class"} predictions are always a vector. If \code{newdata} is omitted, the way missing values in the original fit are handled is determined by the \code{na.action} argument of that fit. If \code{na.action = na.omit} omitted cases will not appear in the residuals, whereas if \code{na.action = na.exclude} they will appear (in predictions, standard errors or interval limits), with residual value \code{NA}. See also \code{\link{napredict}}. If \code{type = "cum.prob"} or \code{type = "linear.predictor"} there will be two sets of predictions, standard errors and intervals; one for j and one for j-1 (in the usual notation) where j = 1, ..., J index the response classes. If newdata is supplied and the response variable is omitted, then \code{predict.clm} returns much the same thing as \code{predict.polr} (matrices of predictions). Similarly, if \code{type = "class"}. If the fit is rank-deficient, some of the columns of the design matrix will have been dropped. Prediction from such a fit only makes sense if newdata is contained in the same subspace as the original data. That cannot be checked accurately, so a warning is issued (cf. \code{\link{predict.lm}}). If a flexible link function is used (\code{Aranda-Ordaz} or \code{log-gamma}) standard errors and confidence intervals of predictions do not take the uncertainty in the link-parameter into account. } \value{ A list containing the following components \item{fit}{predictions or fitted values if \code{newdata} is not supplied. } \item{se.fit}{if \code{se.fit=TRUE} standard errors of the predictions otherwise \code{NULL}. } \item{upr, lwr}{if \code{interval=TRUE} lower and upper confidence limits.} } \author{Rune Haubo B Christensen} \seealso{ \code{\link[ordinal]{clm}}, \code{\link[ordinal]{clmm}}. } \examples{ ## simple model: fm1 <- clm(rating ~ contact + temp, data=wine) summary(fm1) ## Fitted values with standard errors and confidence intervals: predict(fm1, se.fit=TRUE, interval=TRUE) # type="prob" ## class predictions for the observations: predict(fm1, type="class") newData <- expand.grid(temp = c("cold", "warm"), contact = c("no", "yes")) ## Predicted probabilities in all five response categories for each of ## the four cases in newData: predict(fm1, newdata=newData, type="prob") ## now include standard errors and intervals: predict(fm1, newdata=newData, se.fit=TRUE, interval=TRUE, type="prob") } \keyword{models} ordinal/man/confint.clmmOld.Rd0000644000176200001440000001075012176227250016001 0ustar liggesusers\name{profile.clmm2} \alias{profile.clmm2} \alias{confint.clmm2} \alias{confint.profile.clmm2} \alias{profile.clmm2} \alias{plot.profile.clmm2} \title{ Confidence intervals and profile likelihoods for the standard deviation for the random term in cumulative link mixed models } \description{ Computes confidence intervals from the profiled likelihood for the standard devation for the random term in a fitted cumulative link mixed model, or plots the associated profile likelihood function. } \usage{ \method{confint}{profile.clmm2}(object, parm = seq_along(Pnames), level = 0.95, \dots) \method{profile}{clmm2}(fitted, alpha = 0.01, range, nSteps = 20, trace = 1, \dots) \method{plot}{profile.clmm2}(x, parm = seq_along(Pnames), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, fig = TRUE, n = 1e3, ..., ylim = NULL) } \arguments{ \item{object}{ a fitted \code{profile.clmm2} object. } \item{fitted}{ a fitted \code{\link{clmm2}} object. } \item{x}{a \code{profile.clmm2} object. } \item{parm}{ For \code{confint.profile.clmm2}: a specification of which parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all parameters are considered. Currently only \code{"stDev"} or \code{1} are supported. For \code{plot.profile.clmm2}: a specification of which parameters the profile likelihood are to be plotted for, either a vector of numbers or a vector of names. If missing, all parameters are considered. Currently only \code{"stDev"} or \code{1} are supported. } \item{level}{ the confidence level required. Observe that the model has to be profiled in the appropriate region; otherwise the limits are \code{NA}. } \item{trace}{ logical. Should profiling be traced? Defaults to \code{TRUE} due to the time consuming nature of the computation. } \item{alpha}{Determines the range of profiling. By default the likelihood is profiled approximately in the 99\% confidence interval region as determined by the Wald approximation. This is usually sufficient for 95\% profile likelihood confidence limits. } \item{range}{if range is specified, this overrules the range computation based on \code{alpha}. \code{range} should be all positive and \code{stDev} is profiled in \code{range(range)}. } \item{nSteps}{the number of points at which to profile the likelihood function. This determines the resolution and accuracy of the profile likelihood function; higher values gives a higher resolution, but also longer computation times. } \item{Log}{should the profile likelihood be plotted on the log-scale? } \item{relative}{should the relative or the absolute likelihood be plotted? } \item{fig}{should the profile likelihood be plotted? } \item{n}{the no. points used in the spline interpolation of the profile likelihood for plotting. } \item{ylim}{overrules default y-limits on the plot of the profile likelihood. } \item{\dots}{ additional argument(s), e.g. graphical parameters for the \code{plot} method. } } \details{ A \code{confint.clmm2} method deliberately does not exist due to the time consuming nature of the computations. The user is required to compute the profile object first and then call \code{confint} on the profile object to obtain profile likelihood confidence intervals. In \code{plot.profile.clm2}: at least one of \code{Log} and \code{relative} arguments have to be \code{TRUE}. } \value{ \code{confint}: A matrix with columns giving lower and upper confidence limits. These will be labelled as (1-level)/2 and 1 - (1-level)/2 in \% (by default 2.5\% and 97.5\%). \code{plot.profile.clm2} invisibly returns the profile object. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{profile}} and \code{\link{confint}} } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) if(require(lme4)) { ## access cbpp data cbpp2 <- rbind(cbpp[,-(2:3)], cbpp[,-(2:3)]) cbpp2 <- within(cbpp2, { incidence <- as.factor(rep(0:1, each=nrow(cbpp))) freq <- with(cbpp, c(incidence, size - incidence)) }) ## Fit with Laplace approximation: fm1 <- clmm2(incidence ~ period, random = herd, weights = freq, data = cbpp2, Hess = 1) pr.fm1 <- profile(fm1) confint(pr.fm1) par(mfrow = c(2,2)) plot(pr.fm1) plot(pr.fm1, Log=TRUE, relative = TRUE) plot(pr.fm1, Log=TRUE, relative = FALSE) } } \keyword{models} ordinal/man/predictOld.Rd0000644000176200001440000000573113331072710015040 0ustar liggesusers\name{predict.clm2} \alias{predict.clm2} \alias{predict.clmm2} \title{Predict Method for CLM fits} \description{ Obtains predictions from a cumulative link (mixed) model. } \usage{ \method{predict}{clm2}(object, newdata, ...) %% \method{predict}{clmm}(object, newdata, ...) } \arguments{ \item{object}{a fitted object of class inheriting from \code{clm2} including \code{clmm2} objects.} \item{newdata}{optionally, a data frame in which to look for variables with which to predict. Observe that the response variable should also be present.} \item{\dots}{further arguments passed to or from other methods.} } \details{ This method does not duplicate the behavior of \code{predict.polr} in package \code{MASS} which produces a matrix instead of a vector of predictions. The behavior of \code{predict.polr} can be mimiced as shown in the examples. If \code{newdata} is not supplied, the fitted values are obtained. For \code{clmm2} fits this means predictions that are controlled for the observed value of the random effects. If the predictions for a random effect of zero, i.e. an average 'subject', are wanted, the same data used to fit the model should be supplied in the \code{newdata} argument. For \code{clm2} fits those two sets of predictions are identical. } \value{ A vector of predicted probabilities. } \author{Rune Haubo B Christensen} \seealso{ \code{\link[ordinal]{clm2}}, \code{\link[ordinal]{clmm2}}. } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) ## More manageable data set for less voluminous printing: (tab26 <- with(soup, table("Product" = PROD, "Response" = SURENESS))) dimnames(tab26)[[2]] <- c("Sure", "Not Sure", "Guess", "Guess", "Not Sure", "Sure") dat26 <- expand.grid(sureness = as.factor(1:6), prod = c("Ref", "Test")) dat26$wghts <- c(t(tab26)) dat26 m1 <- clm2(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logistic") predict(m1) mN1 <- clm2(sureness ~ 1, nominal = ~prod, data = dat26, weights = wghts) predict(mN1) predict(update(m1, scale = ~.-prod)) ################################# ## Mimicing the behavior of predict.polr: if(require(MASS)) { ## Fit model from polr example: fm1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) predict(fm1) set.seed(123) nlev <- 3 y <- gl(nlev, 5) x <- as.numeric(y) + rnorm(15) fm.clm <- clm2(y ~ x) fm.polr <- polr(y ~ x) ## The equivalent of predict.polr(object, type = "probs"): (pmat.polr <- predict(fm.polr, type = "probs")) ndat <- expand.grid(y = gl(nlev,1), x = x) (pmat.clm <- matrix(predict(fm.clm, newdata = ndat), ncol=nlev, byrow = TRUE)) all.equal(c(pmat.clm), c(pmat.polr), tol = 1e-5) # TRUE ## The equivalent of predict.polr(object, type = "class"): (class.polr <- predict(fm.polr)) (class.clm <- factor(apply(pmat.clm, 1, which.max))) all.equal(class.clm, class.polr) ## TRUE } } \keyword{internal} ordinal/man/addtermOld.Rd0000644000176200001440000000653413331072710015030 0ustar liggesusers\name{addterm.clm2} \alias{addterm.clm2} \alias{dropterm.clm2} \title{ Try all one-term additions to and deletions from a model } \description{ Try fitting all models that differ from the current model by adding or deleting a single term from those supplied while maintaining marginality. } \usage{ \method{addterm}{clm2}(object, scope, scale = 0, test = c("none", "Chisq"), k = 2, sorted = FALSE, trace = FALSE, which = c("location", "scale"), \dots) \method{dropterm}{clm2}(object, scope, scale = 0, test = c("none", "Chisq"), k = 2, sorted = FALSE, trace = FALSE, which = c("location", "scale"), \dots) } \arguments{ \item{object}{ A \code{\link{clm2}} object. } \item{scope}{ for \code{addterm}: a formula specifying a maximal model which should include the current one. All additional terms in the maximal model with all marginal terms in the original model are tried. For \code{dropterm}: a formula giving terms which might be dropped. By default, the model formula. Only terms that can be dropped and maintain marginality are actually tried. } \item{scale}{ used in the definition of the AIC statistic for selecting the models. Specifying \code{scale} asserts that the dispersion is known. } \item{test}{ should the results include a test statistic relative to the original model? The Chisq test is a likelihood-ratio test. } \item{k}{ the multiple of the number of degrees of freedom used for the penalty. Only \code{k=2} gives the genuine AIC: \code{k = log(n)} is sometimes referred to as BIC or SBC. } \item{sorted}{ should the results be sorted on the value of AIC? } \item{trace}{ if \code{TRUE} additional information may be given on the fits as they are tried. } \item{which}{should additions or deletions occur in location or scale models? } \item{\dots}{ arguments passed to or from other methods. }} \value{ A table of class \code{"anova"} containing columns for the change in degrees of freedom, AIC and the likelihood ratio statistic. If \code{test = "Chisq"} a column also contains the p-value from the Chisq test. } \details{ The definition of AIC is only up to an additive constant because the likelihood function is only defined up to an additive constant. } \author{Rune Haubo B Christensen} \seealso{ \code{\link[ordinal]{clm2}}, \code{\link[=anova.clm2]{anova}}, \code{\link{addterm.default}} and \code{\link{dropterm.default}} } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) if(require(MASS)) { ## dropterm, addterm, housing mB1 <- clm2(SURENESS ~ PROD + GENDER + SOUPTYPE, scale = ~ COLD, data = soup, link = "probit", Hess = FALSE) dropterm(mB1, test = "Chi") # or dropterm(mB1, which = "location", test = "Chi") dropterm(mB1, which = "scale", test = "Chi") addterm(mB1, scope = ~.^2, test = "Chi", which = "location") addterm(mB1, scope = ~ . + GENDER + SOUPTYPE, test = "Chi", which = "scale") addterm(mB1, scope = ~ . + AGEGROUP + SOUPFREQ, test = "Chi", which = "location") ## Fit model from polr example: fm1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) addterm(fm1, ~ Infl + Type + Cont, test= "Chisq", which = "scale") dropterm(fm1, test = "Chisq") } } \keyword{internal} ordinal/man/ranef.Rd0000644000176200001440000000535312176227043014051 0ustar liggesusers\name{ranef} \alias{ranef} \alias{condVar} \alias{ranef.clmm} \alias{condVar.clmm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Extract conditional modes and conditional variances from clmm objects } \description{ The ranef function extracts the conditional modes of the random effects from a clmm object. That is, the modes of the distributions for the random effects given the observed data and estimated model parameters. In a Bayesian language they are posterior modes. The conditional variances are computed from the second order derivatives of the conditional distribution of the random effects. Note that these variances are computed at a fixed value of the model parameters and thus do not take the uncertainty of the latter into account. } \usage{ ranef(object, ...) condVar(object, ...) \method{ranef}{clmm}(object, condVar=FALSE, ...) \method{condVar}{clmm}(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{a \code{\link{clmm}} object. } \item{condVar}{ an optional logical argument indicating of conditional variances should be added as attributes to the conditional modes. } \item{\dots}{ currently not used by the \code{clmm} methods. } } \details{ The \code{ranef} method returns a list of \code{data.frame}s; one for each distinct grouping factor. Each \code{data.frame} has as many rows as there are levels for that grouping factor and as many columns as there are random effects for each level. For example a model can contain a random intercept (one column) or a random intercept and a random slope (two columns) for the same grouping factor. If conditional variances are requested, they are returned in the same structure as the conditional modes (random effect estimates/predictions). } \value{ The \code{ranef} method returns a list of \code{data.frame}s with the random effects predictions/estimates computed as conditional modes. If \code{condVar = TRUE} a \code{data.frame} with the conditional variances is stored as an attribute on each \code{data.frame} with conditional modes. The \code{condVar} method returns a list of \code{data.frame}s with the conditional variances. It is a convenience function that simply computes the conditional modes and variances, then extracts and returns only the latter. } \author{ Rune Haubo B Christensen } \examples{ fm1 <- clmm(rating ~ contact + temp + (1|judge), data=wine) ## Extract random effect estimates/conditional modes: re <- ranef(fm1, condVar=TRUE) ## Get conditional variances: attr(re$judge, "condVar") ## Alternatively: condVar(fm1) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/ordinal-package.Rd0000644000176200001440000001102613330271132015760 0ustar liggesusers\name{ordinal-package} \alias{ordinal-package} \alias{ordinal} \docType{package} \title{ Regression Models for Ordinal Data via Cumulative Link (Mixed) Models } \description{ This package facilitates analysis of ordinal (ordered categorical data) via cumulative link models (CLMs) and cumulative link mixed models (CLMMs). Robust and efficient computational methods gives speedy and accurate estimation. A wide range of methods for model fits aids the data analysis. } \details{ \tabular{ll}{ Package: \tab ordinal\cr Type: \tab Package\cr License: \tab GPL (>= 2)\cr LazyLoad: \tab yes\cr } This package implements cumualtive link models and cumulative link models with normally distributed random effects, denoted cumulative link mixed (effects) models. Cumulative link models are also known as ordered regression models, proportional odds models, proportional hazards models for grouped survival times and ordered logit/probit/... models. Cumulative link models are fitted with \code{\link{clm}} and the main features are: \itemize{ \item{A range of standard link functions are available.} \item{In addition to the standard location (additive) effects, scale (multiplicative) effects are also allowed.} \item{nominal effects are allowed for any subset of the predictors --- these effects are also known as partial proportional odds effects when using the logit link.} \item{Restrictions can be imposed on the thresholds/cut-points, e.g., symmetry or equidistance.} \item{A (modified) Newton-Raphson algorithm provides the maximum likelihood estimates of the parameters. The estimation scheme is robust, fast and accurate.} \item{Rank-deficient designs are identified and unidentified coefficients exposed in \code{print} and \code{summary} methods as with \code{\link{glm}}.} \item{A suite of standard methods are available including \code{anova}, \code{add}/\code{drop}-methods, \code{step}, \code{profile}, \code{confint}.} \item{A \code{slice} method facilitates illustration of the likelihood function and a \code{convergence} method summarizes the accuracy of the model estimation.} \item{The \code{predict} method can predict probabilities, response class-predictions and cumulative probabilities, and it provides standard errors and confidence intervals for the predictions.} } Cumulative link mixed models are fitted with \code{\link{clmm}} and the main features are: \itemize{ \item{Any number of random effect terms can be included.} \item{The syntax for the model formula resembles that of \code{\link[lme4]{lmer}} from the \code{lme4} package.} \item{Nested random effects, crossed random effects and partially nested/crossed random effects are allowed.} \item{Estimation is via maximum likelihood using the Laplace approximation or adaptive Gauss-Hermite quadrature (one random effect).} \item{Vector-valued and correlated random effects such as random slopes (random coefficient models) are fitted with the Laplace approximation.} \item{Estimation employs sparse matrix methods from the \code{\link{Matrix}} package. } \item{During model fitting a Newton-Raphson algorithm updates the conditional modes of the random effects a large number of times. The likelihood function is optimized with a general purpose optimizer.} } A major update of the package in August 2011 introduced new and improved implementations of \code{\link{clm}} and \code{\link{clmm}}. The old implementations are available with \code{\link{clm2}} and \code{\link{clmm2}}. At the time of writing there is functionality in \code{clm2} and \code{clmm2} not yet available in \code{clm} and \code{clmm}. This includes flexible link functions (log-gamma and Aranda-Ordaz links) and a profile method for random effect variance parameters in CLMMs. The new implementations are expected to take over the old implementations at some point, hence the latter will eventually be \code{\link[=.Deprecated]{deprecated}} and \code{\link[=.Defunct]{defunct}}. } \author{ Rune Haubo B Christensen Maintainer: Rune Haubo B Christensen } %% \references{ %% ~~ Literature or other references for background information ~~ %% } \keyword{ package } %% \seealso{ %% ~~ Optional links to other man pages, e.g. ~~ %% %% ~~ \code{\link[:-package]{}} ~~ %% } \examples{ ## A simple cumulative link model: fm1 <- clm(rating ~ contact + temp, data=wine) summary(fm1) ## A simple cumulative link mixed model: fmm1 <- clmm(rating ~ contact + temp + (1|judge), data=wine) summary(fmm1) } ordinal/man/slice.clm.Rd0000644000176200001440000000701412176227250014623 0ustar liggesusers\name{slice} \alias{slice} \alias{slice.clm} \alias{plot.slice.clm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Slice the likelihood of a clm } \description{ Slice likelihood and plot the slice. This is usefull for illustrating the likelihood surface around the MLE (maximum likelihood estimate) and provides graphics to substantiate (non-)convergence of a model fit. Also, the closeness of a quadratic approximation to the log-likelihood function can be inspected for relevant parameters. A slice is considerably less computationally demanding than a profile. } \usage{ slice(object, ...) \method{slice}{clm}(object, parm = seq_along(par), lambda = 3, grid = 100, quad.approx = TRUE, ...) \method{plot}{slice.clm}(x, parm = seq_along(x), type = c("quadratic", "linear"), plot.mle = TRUE, ask = prod(par("mfcol")) < length(parm) && dev.interactive(), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{for the \code{clm} method an object of class \code{"clm"}, i.e., the result of a call to \code{clm}. } \item{x}{ a \code{slice.clm} object, i.e., the result of \code{slice(clm.object)}. } \item{parm}{ for \code{slice.clm} a numeric or character vector indexing parameters, for \code{plot.slice.clm} only a numeric vector is accepted. By default all parameters are selected. } \item{lambda}{ the number of curvature units on each side of the MLE the slice should cover. } \item{grid}{ the number of values at which to compute the log-likelihood for each parameter. } \item{quad.approx}{ compute and include the quadratic approximation to the log-likelihood function? } \item{type}{ \code{"quadratic"} plots the log-likelihood function which is approximately quadratic, and \code{"linear"} plots the signed square root of the log-likelihood function which is approximately linear. } \item{plot.mle}{ include a vertical line at the MLE (maximum likelihood estimate) when \code{type = "quadratic"}? Ignored for \code{type = "linear"}. } \item{ask}{ logical; if \code{TRUE}, the user is asked before each plot, see \code{\link{par}}\code{(ask=.)}. } \item{\dots}{ further arguments to \code{plot.default} for the plot method. Not used in the slice method. } } %% \details{ bla %% %% ~~ If necessary, more details than the description above ~~ %% } \value{ The \code{slice} method returns a list of \code{data.frame}s with one \code{data.frame} for each parameter slice. Each \code{data.frame} contains in the first column the values of the parameter and in the second column the values of the (positive) log-likelihood \code{"logLik"}. A third column is present if \code{quad.approx = TRUE} and contains the corresponding quadratic approximation to the log-likelihood. The original model fit is included as the attribute \code{"original.fit"}. The \code{plot} method produces a plot of the likelihood slice for each parameter. } \author{ Rune Haubo B Christensen } \examples{ ## fit model: fm1 <- clm(rating ~ contact + temp, data = wine) ## slice the likelihood: sl1 <- slice(fm1) ## three different ways to plot the slices: par(mfrow = c(2,3)) plot(sl1) plot(sl1, type = "quadratic", plot.mle = FALSE) plot(sl1, type = "linear") ## Verify convergence to the optimum: sl2 <- slice(fm1, lambda = 1e-5, quad.approx = FALSE) plot(sl2) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/clm.fit.Rd0000644000176200001440000000756713332050747014323 0ustar liggesusers\name{clm.fit} \alias{clm.fit} \alias{clm.fit.default} \alias{clm.fit.factor} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Fit Cumulative Link Models %% ~~function to do ... ~~ } \description{ A direct fitter of cumulative link models. } \usage{ clm.fit(y, ...) \method{clm.fit}{default}(y, ...) \method{clm.fit}{factor}(y, X, S, N, weights = rep(1, nrow(X)), offset = rep(0, nrow(X)), S.offset = rep(0, nrow(X)), control = list(), start, doFit=TRUE, link = c("logit", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{y}{for the default method a list of model components. For the factor method the response variable; a factor, preferably and ordered factor. } \item{X, S, N}{optional design matrices for the regression parameters, scale parameters and nominal parameters respectively. } \item{weights}{optional case weights. } \item{offset}{an optional offset. } \item{S.offset}{an optional offset for the scale part of the model. } \item{control}{a list of control parameters, optionally a call to \code{\link{clm.control}}. } \item{start}{an optional list of starting values of the form \code{c(alpha, beta, zeta)} for the thresholds and nominal effects (\code{alpha}), regression parameters (\code{beta}) and scale parameters (\code{zeta}). } \item{doFit}{logical for whether the model should be fit or the model environment should be returned. } \item{link}{the link function. } \item{threshold}{the threshold structure, see further at \code{\link{clm}}. } \item{\dots}{currently not used.} } \details{ This function does almost the same thing that \code{\link{clm}} does: it fits a cumulative link model. The main differences are that \code{clm.fit} does not setup design matrices from formulae and only does minimal post processing after parameter estimation. Compared to \code{\link{clm}}, \code{clm.fit} does little to warn the user of any problems with data or model. However, \code{clm.fit} will attempt to identify column rank defecient designs. Any unidentified parameters are indicated in the \code{aliased} component of the fit. \code{clm.fit.factor} is not able to check if all thresholds are increasing when nominal effects are specified since it needs access to the terms object for the nominal model. If the terms object for the nominal model (\code{nom.terms}) is included in \code{y}, the default method is able to chech if all thresholds are increasing. %% In contrast to \code{\link{clm}}, \code{clm.fit} allows non-positive %% weights. } \value{ A list with the following components: \code{aliased, alpha, coefficients, cond.H, convergence, df.residual, edf, fitted.values, gradient, Hessian, logLik, maxGradient, message, n, niter, nobs, tJac, vcov} and optionally \code{beta, zeta} These components are documented in \code{\link{clm}}. } %% \references{ bla %% %% ~put references to the literature/web site here ~ %% } \author{ Rune Haubo B Christensen } %% \note{ bla %% %% ~~further notes~~ %% } %% %% %% ~Make other sections like Warning with \section{Warning }{....} ~ %% \seealso{ \code{\link{clm}} } \examples{ ## A simple example: fm1 <- clm(rating ~ contact + temp, data=wine) summary(fm1) ## get the model frame containing y and X: mf1 <- update(fm1, method="design") names(mf1) res <- clm.fit(mf1$y, mf1$X) ## invoking the factor method stopifnot(all.equal(coef(res), coef(fm1))) names(res) ## Fitting with the default method: mf1$control$method <- "Newton" res2 <- clm.fit(mf1) stopifnot(all.equal(coef(res2), coef(fm1))) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/gumbel.Rd0000644000176200001440000000614013575515041014226 0ustar liggesusers\name{gumbel} \alias{dgumbel} \alias{pgumbel} \alias{qgumbel} \alias{rgumbel} \alias{ggumbel} \title{ The Gumbel Distribution %% ~~function to do ... ~~ } \description{ Density, distribution function, quantile function, random generation, and gradient of density of the extreme value (maximum and minimum) distributions. The Gumbel distribution is also known as the extreme value maximum distribution, the double-exponential distribution and the log-Weibull distribution. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ dgumbel(x, location = 0, scale = 1, log = FALSE, max = TRUE) pgumbel(q, location = 0, scale = 1, lower.tail = TRUE, max = TRUE) qgumbel(p, location = 0, scale = 1, lower.tail = TRUE, max = TRUE) rgumbel(n, location = 0, scale = 1, max = TRUE) ggumbel(x, max = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x,q}{ numeric vector of quantiles. } \item{p}{ vector of probabilities. } \item{n}{ number of observations. } \item{location}{ numeric scalar. } \item{scale}{ numeric scalar. } \item{lower.tail}{ logical; if \code{TRUE} (default), probabilities are \eqn{P[X \leq x]}{P[X <= x]} otherwise, \eqn{P[X > x]}. } \item{log}{ logical; if \code{TRUE}, probabilities p are given as log(p). } \item{max}{ distribution for extreme maxima (default) or minima? The default corresponds to the standard right-skew Gumbel distribution. } } \details{ \code{dgumbel}, \code{pgumbel} and \code{ggumbel} are implemented in C for speed and care is taken that 'correct' results are provided for values of \code{NA}, \code{NaN}, \code{Inf}, \code{-Inf} or just extremely small or large. The distribution functions, densities and gradients are used in the Newton-Raphson algorithms in fitting cumulative link models with \code{\link{clm}} and cumulative link mixed models with \code{\link{clmm}}. } \value{ \code{pgumbel} gives the distribution function, \code{dgumbel} gives the density, \code{ggumbel} gives the gradient of the density, \code{qgumbel} is the quantile function, and \code{rgumbel} generates random deviates. } \references{ \url{https://en.wikipedia.org/wiki/Gumbel_distribution} } \seealso{ Gradients of densities are also implemented for the normal, logistic, cauchy, cf. \code{\link[=gnorm]{gfun}} and the log-gamma distribution, cf. \code{\link{lgamma}}. } \author{ Rune Haubo B Christensen } \examples{ ## Illustrating the symmetry of the distribution functions: pgumbel(5) == 1 - pgumbel(-5, max=FALSE) ## TRUE dgumbel(5) == dgumbel(-5, max=FALSE) ## TRUE ggumbel(5) == -ggumbel(-5, max=FALSE) ## TRUE ## More examples: x <- -5:5 (pp <- pgumbel(x)) qgumbel(pp) dgumbel(x) ggumbel(x) (ppp <- pgumbel(x, max=FALSE)) ## Observe that probabilities close to 0 are more accurately determined than ## probabilities close to 1: qgumbel(ppp, max=FALSE) dgumbel(x, max=FALSE) ggumbel(x, max=FALSE) ## random deviates: set.seed(1) (r1 <- rgumbel(10)) set.seed(1) r2 <- -rgumbel(10, max = FALSE) all(r1 == r2) ## TRUE } \keyword{distribution} ordinal/man/soup.Rd0000755000176200001440000000470011616740137013744 0ustar liggesusers\name{soup} \alias{soup} \title{ Discrimination study of packet soup } \description{ The \code{soup} data frame has 1847 rows and 13 variables. 185 respondents participated in an A-not A discrimination test with sureness. Before experimentation the respondents were familiarized with the reference product and during experimentation, the respondents were asked to rate samples on an ordered scale with six categories given by combinations of (reference, not reference) and (sure, not sure, guess) from 'referene, sure' = 1 to 'not reference, sure' = 6. %given by the levels of the \code{SURENESS} variable. } \usage{ soup } \format{ \describe{ \item{\code{RESP}}{ factor with 185 levels: the respondents in the study. } \item{\code{PROD}}{ factor with 2 levels: index reference and test products. } \item{\code{PRODID}}{ factor with 6 levels: index reference and the five test product variants. } \item{\code{SURENESS}}{ ordered factor with 6 levels: the respondents ratings of soup samples. } \item{\code{DAY}}{ factor with two levels: experimentation was split over two days. } \item{\code{SOUPTYPE}}{ factor with three levels: the type of soup regularly consumed by the respondent. } \item{\code{SOUPFREQ}}{ factor with 3 levels: the frequency with which the respondent consumes soup. } \item{\code{COLD}}{ factor with two levels: does the respondent have a cold? } \item{\code{EASY}}{ factor with ten levels: How easy did the respondent find the discrimation test? 1 = difficult, 10 = easy. } \item{\code{GENDER}}{ factor with two levels: gender of the respondent. } \item{\code{AGEGROUP}}{ factor with four levels: the age of the respondent. } \item{\code{LOCATION}}{ factor with three levels: three different locations where experimentation took place. } %% \item{\code{SEQ}}{ %% integer vector: the sequence at which experimentation took %% place. Numbering restarted at the second day of experimentation. %% } }} \source{ Data are produced by Unilever Research. Permission to publish the data is granted. } \references{ Christensen, R. H. B., Cleaver, G. and Brockhoff, P. B.(2011) Statistical and Thurstonian models for the A-not A protocol with and without sureness. \emph{Food Quality and Preference, 22}, pp. 542-549. } \keyword{datasets} ordinal/man/income.Rd0000644000176200001440000000315712176227250014230 0ustar liggesusers\name{income} \alias{income} \title{ Income distribution (percentages) in the Northeast US } \description{ Income distribution (percentages) in the Northeast US in 1960 and 1970 adopted from McCullagh (1980). } \usage{ income } \format{ \describe{ \item{\code{year}}{ year. } \item{\code{pct}}{ percentage of population in income class per year. } \item{\code{income}}{ income groups. The unit is thousands of constant (1973) US dollars. } } } \source{ Data are adopted from McCullagh (1980). } \references{ McCullagh, P. (1980) Regression Models for Ordinal Data. \emph{Journal of the Royal Statistical Society. Series B (Methodological)}, Vol. 42, No. 2., pp. 109-142. } \examples{ print(income) ## Convenient table: (tab <- xtabs(pct ~ year + income, income)) ## small rounding error in 1970: rowSums(tab) ## compare link functions via the log-likelihood: links <- c("logit", "probit", "cloglog", "loglog", "cauchit") sapply(links, function(link) { clm(income ~ year, data=income, weights=pct, link=link)$logLik }) ## a heavy tailed (cauchy) or left skew (cloglog) latent distribution ## is fitting best. ## The data are defined as: income.levels <- c(0, 3, 5, 7, 10, 12, 15) income <- paste(income.levels, c(rep("-", 6), "+"), c(income.levels[-1], ""), sep = "") income <- data.frame(year=factor(rep(c("1960", "1970"), each = 7)), pct = c(6.5, 8.2, 11.3, 23.5, 15.6, 12.7, 22.2, 4.3, 6, 7.7, 13.2, 10.5, 16.3, 42.1), income=factor(rep(income, 2), ordered=TRUE, levels=income)) } \keyword{datasets} ordinal/man/lgamma.Rd0000755000176200001440000000711111617032142014202 0ustar liggesusers\name{lgamma} \alias{plgamma} \alias{dlgamma} \alias{glgamma} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The log-gamma distribution %% ~~function to do ... ~~ } \description{ Density, distribution function and gradient of density for the log-gamma distribution. These are implemented in C for speed and care is taken that the correct results are provided for values of \code{NA}, \code{NaN}, \code{Inf}, \code{-Inf} or just extremely small or large values. The log-gamma is a flexible location-scale distribution on the real line with an extra parameter, \eqn{\lambda}. For \eqn{\lambda = 0} the distribution equals the normal or Gaussian distribution, and for \eqn{\lambda} equal to 1 and -1, the Gumbel minimum and maximum distributions are obtained. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ plgamma(q, lambda, lower.tail = TRUE) dlgamma(x, lambda, log = FALSE) glgamma(x, lambda) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x,q}{ numeric vector of quantiles. } \item{lambda}{ numerical scalar } %% \item{location}{ %% numeric scalar. %% } %% \item{scale}{ %% numeric scalar. %% } \item{lower.tail}{ logical; if \code{TRUE} (default), probabilities are \eqn{P[X \leq x]}{P[X <= x]} otherwise, \eqn{P[X > x]}. } \item{log}{ logical; if \code{TRUE}, probabilities p are given as log(p). } } \details{ If \eqn{\lambda < 0} the distribution is right skew, if \eqn{\lambda = 0} the distribution is symmetric (and equals the normal distribution), and if \eqn{\lambda > 0} the distribution is left skew. The log-gamma distribution function is defined as \ldots pending. The density and gradient of the density are defined as\ldots pending. These distribution functions, densities and gradients are used in the Newton-Raphson algorithms in fitting cumulative link models with \code{\link{clm2}} and cumulative link mixed models with \code{\link{clmm2}} using the log-gamma link. } \value{ \code{plgamma} gives the distribution function, \code{dlgamma} gives the density and \code{glgamma} gives the gradient of the density. } \references{ Genter, F. C. and Farewell, V. T. (1985) Goodness-of-link testing in ordinal regression models. \emph{The Canadian Journal of Statistics}, 13(1), 37-44. } \seealso{ Gradients of densities are also implemented for the normal, logistic, cauchy, cf. \code{\link[=gnorm]{gfun}} and the Gumbel distribution, cf. \code{\link[=dgumbel]{gumbel}}. } \author{ Rune Haubo B Christensen } \examples{ ## Illustrating the link to other distribution functions: x <- -5:5 plgamma(x, lambda = 0) == pnorm(x) all.equal(plgamma(x, lambda = -1), pgumbel(x)) ## TRUE, but: plgamma(x, lambda = -1) == pgumbel(x) plgamma(x, lambda = 1) == pgumbel(x, max = FALSE) dlgamma(x, lambda = 0) == dnorm(x) dlgamma(x, lambda = -1) == dgumbel(x) dlgamma(x, lambda = 1) == dgumbel(x, max = FALSE) glgamma(x, lambda = 0) == gnorm(x) all.equal(glgamma(x, lambda = -1), ggumbel(x)) ## TRUE, but: glgamma(x, lambda = -1) == ggumbel(x) all.equal(glgamma(x, lambda = 1), ggumbel(x, max = FALSE)) ## TRUE, but: glgamma(x, lambda = 1) == ggumbel(x, max = FALSE) ## There is a loss of accuracy, but the difference is very small: glgamma(x, lambda = 1) - ggumbel(x, max = FALSE) ## More examples: x <- -5:5 plgamma(x, lambda = .5) dlgamma(x, lambda = .5) glgamma(x, lambda = .5) } \keyword{distribution} ordinal/man/VarCorr.Rd0000644000176200001440000000317712176227043014336 0ustar liggesusers\name{VarCorr} \alias{VarCorr} \alias{VarCorr.clmm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Extract variance and correlation parameters } \description{ The VarCorr function extracts the variance and (if present) correlation parameters for random effect terms in a cumulative link mixed model (CLMM) fitted with \code{clmm}. } \usage{ VarCorr(x, ...) \method{VarCorr}{clmm}(x, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{a \code{\link{clmm}} object. } \item{\dots}{ currently not used by the \code{clmm} method. } } \details{ The \code{VarCorr} method returns a list of \code{data.frame}s; one for each distinct grouping factor. Each \code{data.frame} has as many rows as there are levels for that grouping factor and as many columns as there are random effects for each level. For example a model can contain a random intercept (one column) or a random intercept and a random slope (two columns) for the same grouping factor. If conditional variances are requested, they are returned in the same structure as the conditional modes (random effect estimates/predictions). } \value{ A list of matrices with variances in the diagonal and correlation parameters in the off-diagonal --- one matrix for each random effects term in the model. Standard deviations are provided as attributes to the matrices. } \author{ Rune Haubo B Christensen } \examples{ fm1 <- clmm(rating ~ contact + temp + (1|judge), data=wine) VarCorr(fm1) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/clmm.controlOld.Rd0000755000176200001440000000322311617247306016024 0ustar liggesusers\name{clmm2.control} \alias{clmm2.control} \title{Set control parameters for cumulative link mixed models} \description{ Set control parameters for cumulative link mixed models } \usage{ clmm2.control(method = c("ucminf", "nlminb", "model.frame"), ..., trace = 0, maxIter = 50, gradTol = 1e-4, maxLineIter = 50, innerCtrl = c("warnOnly", "noWarn", "giveError")) } \arguments{ \item{method}{ the optimizer used to maximize the marginal likelihood function. } \item{\dots}{control arguments passed on to the chosen optimizer; see \code{\link[ucminf]{ucminf}}, \code{\link{optim}}, and \code{\link{nlminb}} for details. } \item{trace}{numerical, if > 0 information is printed about and during the outer optimization process, if < 0 information is also printed about the inner optimization process. Defaults to \code{0}. } \item{maxIter}{the maximum number of Newton updates of the inner optimization. \code{50}. } \item{gradTol}{the maximum absolute gradient of the inner optimization. } \item{maxLineIter}{the maximum number of step halfings allowed if a Newton(-Raphson) step over shoots during the inner optimization. } \item{innerCtrl}{the use of warnings/errors if the inner optimization fails to converge. } } \details{ When the default optimizer, \code{ucminf} is used, the default values of that optimizers control options are changed to \code{grtol = 1e-5} and \code{grad = "central"}. } \value{ a list of control parameters. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{clmm2}} } \keyword{models} ordinal/man/gfun.Rd0000755000176200001440000000375411617032222013713 0ustar liggesusers\name{gfun} \alias{gnorm} \alias{glogis} \alias{gcauchy} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gradients of common densities %% ~~function to do ... ~~ } \description{ Gradients of common density functions in their standard forms, i.e., with zero location (mean) and unit scale. These are implemented in C for speed and care is taken that the correct results are provided for the argument being \code{NA}, \code{NaN}, \code{Inf}, \code{-Inf} or just extremely small or large. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ gnorm(x) glogis(x) gcauchy(x) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ numeric vector of quantiles. } } \details{ The gradients are given by: \itemize{ \item{gnorm: If \eqn{f(x)} is the normal density with mean 0 and spread 1, then the gradient is \deqn{f'(x) = -x f(x)} } \item{glogis: If \eqn{f(x)} is the logistic density with mean 0 and scale 1, then the gradient is \deqn{f'(x) = 2 \exp(-x)^2 (1 + \exp(-x))^{-3} - \exp(-x)(1+\exp(-x))^{-2}} } \item{pcauchy: If \eqn{f(x) = [\pi(1 + x^2)^2]^{-1}}{f(x) =1 / [pi (1 + x^2)^2]} is the cauchy density with mean 0 and scale 1, then the gradient is \deqn{f'(x) = -2x [\pi(1 + x^2)^2]^{-1}}{f'(x) = -2x / [pi (1 + x^2)^2]} } } These gradients are used in the Newton-Raphson algorithms in fitting cumulative link models with \code{\link{clm}} and cumulative link mixed models with \code{\link{clmm}}. } \value{ a numeric vector of gradients. } \seealso{ Gradients of densities are also implemented for the extreme value distribtion (\code{\link[=dgumbel]{gumbel}}) and the the log-gamma distribution (\code{\link[=lgamma]{log-gamma}}). } \author{ Rune Haubo B Christensen } \examples{ x <- -5:5 gnorm(x) glogis(x) gcauchy(x) } \keyword{distribution} ordinal/man/clmOld.Rd0000644000176200001440000003167412176227250014175 0ustar liggesusers\name{clm2} \alias{clm2} \title{Cumulative link models} \description{ A new improved implementation of CLMs is available in \code{\link{clm}}. Fits cumulative link models with an additive model for the location and a multiplicative model for the scale. The function allows for structured thresholds. A popular special case of a CLM is the proportional odds model. In addition to the standard link functions, two flexible link functions, "Arandar-Ordaz" and "log-gamma" are available, where an extra link function parameter provides additional flexibility. A subset of the predictors can be allowed to have nominal rather than ordinal effects. This has been termed "partial proportional odds" when the link is the logistic. } \usage{ clm2(location, scale, nominal, data, weights, start, subset, na.action, contrasts, Hess = TRUE, model, link = c("logistic", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), lambda, doFit = TRUE, control, threshold = c("flexible", "symmetric", "equidistant"), ...) } \arguments{ \item{location}{ a formula expression as for regression models, of the form \code{response ~ predictors}. The response should be a factor (preferably an ordered factor), which will be interpreted as an ordinal response with levels ordered as in the factor. The model must have an intercept: attempts to remove one will lead to a warning and will be ignored. An offset may be used. See the documentation of \code{\link{formula}} for other details. } \item{scale}{ a optional formula expression as for the location part, of the form \code{ ~ predictors}, i.e. with an empty left hand side. An offset may be used. See the documentation of \code{\link{formula}} for other details. } \item{nominal}{ an optional formula of the form \code{ ~ predictors}, i.e. with an empty left hand side. The effects of the predictors in this formula are assumed to nominal. } \item{data}{ an optional data frame in which to interpret the variables occurring in the formulas. } \item{weights}{ optional case weights in fitting. Defaults to 1. } \item{start}{ initial values for the parameters in the format \code{c(alpha, beta, log(zeta), lambda)}. } \item{subset}{ expression saying which subset of the rows of the data should be used in the fit. All observations are included by default. } \item{na.action}{ a function to filter missing data. Applies to terms in all three formulae. } \item{contrasts}{ a list of contrasts to be used for some or all of the factors appearing as variables in the model formula. } \item{Hess}{ logical for whether the Hessian (the inverse of the observed information matrix) should be computed. Use \code{Hess = TRUE} if you intend to call \code{summary} or \code{vcov} on the fit and \code{Hess = FALSE} in all other instances to save computing time. The argument is ignored if \code{method = "Newton"} where the Hessian is always computed and returned. Defaults to \code{TRUE}. } \item{model}{ logical for whether the model frames should be part of the returned object. } \item{link}{link function, i.e. the type of location-scale distribution assumed for the latent distribution. The \code{Aranda-Ordaz} and \code{log-gamma} links add additional flexibility with a link function parameter, \code{lambda}. The \code{Aranda-Ordaz} link (Aranda-Ordaz, 1983) equals the logistic link, when \code{lambda = 1} and approaches the \code{loglog} link when \code{lambda} approaches zero. The \code{log-gamma} link (Genter and Farewell, 1985) equals the \code{loglog} link when \code{lambda = 1}, the \code{probit} link when \code{lambda = 0} and the \code{cloglog} link when \code{lambda = -1}. } \item{lambda}{numerical scalar: the link function parameter. Used in combination with link \code{Aranda-Ordaz} or \code{log-gamma} and otherwise ignored. If lambda is specified, the model is estimated with lambda fixed at this value and otherwise lambda is estimated by ML. For \code{Aranda-Ordaz} lambda has to be positive; \code{> 1e-5} for numerical reasons. } \item{doFit}{logical for whether the model should be fit or the model environment should be returned. } \item{control}{a call to \code{\link{clm2.control}}. } \item{threshold}{specifies a potential structure for the thresholds (cut-points). \code{"flexible"} provides the standard unstructured thresholds, \code{"symmetric"} restricts the distance between the thresholds to be symmetric around the central one or two thresholds for odd or equal numbers or thresholds respectively, and \code{"equidistant"} restricts the distance between consecutive thresholds to the same value. } \item{\dots}{ additional arguments are passed on to \code{\link{clm2.control}} and possibly further on to the optimizer, which can lead to surprising error or warning messages when mistyping arguments etc. } } \details{ There are methods for the standard model-fitting functions, including \code{\link{summary}}, \code{\link{vcov}}, \code{\link[ordinal]{predict}}, \code{\link[=anova.clm2]{anova}}, \code{\link{logLik}}, \code{\link[=profile.clm2]{profile}}, \code{\link[=profile.clm2]{plot.profile}}, \code{\link[=confint.clm2]{confint}}, \code{\link[=update.clm2]{update}}, \code{\link[=addterm.clm2]{dropterm}}, \code{\link[=addterm.clm2]{addterm}}, and an \code{extractAIC} method. The design of the implementation is inspired by an idea proposed by Douglas Bates in the talk "Exploiting sparsity in model matrices" presented at the DSC conference in Copenhagen, July 14 2009. Basically an environment is set up with all the information needed to optimize the likelihood function. Extractor functions are then used to get the value of likelihood at current or given parameter values and to extract current values of the parameters. All computations are performed inside the environment and relevant variables are updated during the fitting process. After optimizer termination relevant variables are extracted from the environment and the remaining are discarded. Some aspects of \code{clm2}, for instance, how starting values are obtained, and of the associated methods are inspired by \code{\link[MASS]{polr}} from package \code{MASS}. } \value{ If \code{doFit = FALSE} the result is an environment representing the model ready to be optimized. If \code{doFit = TRUE} the result is an object of class \code{"clm2"} with the following components: \item{beta}{the parameter estimates of the location part. } \item{zeta}{the parameter estimates of the scale part on the log scale; the scale parameter estimates on the original scale are given by \code{exp(zeta)}. } \item{Alpha}{vector or matrix of the threshold parameters. } \item{Theta}{vector or matrix of the thresholds. } \item{xi}{vector of threshold parameters, which, given a threshold function (e.g. \code{"equidistant"}), and possible nominal effects define the class boundaries, \code{Theta}. } \item{lambda}{the value of lambda if lambda is supplied or estimated, otherwise missing. } \item{coefficients}{the coefficients of the intercepts (\code{theta}), the location (\code{beta}), the scale (\code{zeta}), and the link function parameter (\code{lambda}). } \item{df.residual}{the number of residual degrees of freedoms, calculated using the weights. } \item{fitted.values}{vector of fitted values for each observation. An observation here is each of the scalar elements of the multinomial table and not a multinomial vector. } \item{convergence}{\code{TRUE} if the gradient based convergence criterion is met and \code{FALSE} otherwise. } \item{gradient}{vector of gradients for all the parameters at termination of the optimizer. } \item{optRes}{list with results from the optimizer. The contents of the list depends on the choice of optimizer. } \item{logLik}{the log likelihood of the model at optimizer termination. } \item{Hessian}{if the model was fitted with \code{Hess = TRUE}, this is the Hessian matrix of the parameters at the optimum. } \item{scale}{\code{model.frame} for the scale model. } \item{location}{\code{model.frame} for the location model. } \item{nominal}{\code{model.frame} for the nominal model. } \item{edf}{the (effective) number of degrees of freedom used by the model. } \item{start}{the starting values. } \item{convTol}{convergence tolerance for the maximum absolute gradient of the parameters at termination of the optimizer. } \item{method}{character, the optimizer. } \item{y}{the response variable. } \item{lev}{the names of the levels of the response variable. } \item{nobs}{the (effective) number of observations, calculated as the sum of the weights. } \item{threshold}{character, the threshold function used in the model. } \item{estimLambda}{\code{1} if lambda is estimated in one of the flexible link functions and \code{0} otherwise. } \item{link}{character, the link function used in the model. } \item{call}{the matched call. } \item{contrasts}{contrasts applied to terms in location and scale models. } \item{na.action}{the function used to filter missing data. } } \author{Rune Haubo B Christensen} \references{ Agresti, A. (2002) \emph{Categorical Data Analysis.} Second edition. Wiley. Aranda-Ordaz, F. J. (1983) An Extension of the Proportional-Hazards Model for Grouped Data. \emph{Biometrics}, 39, 109-117. Genter, F. C. and Farewell, V. T. (1985) Goodness-of-link testing in ordinal regression models. \emph{The Canadian Journal of Statistics}, 13(1), 37-44. Christensen, R. H. B., Cleaver, G. and Brockhoff, P. B. (2011) Statistical and Thurstonian models for the A-not A protocol with and without sureness. \emph{Food Quality and Preference, 22}, pp. 542-549. } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) ## A tabular data set: (tab26 <- with(soup, table("Product" = PROD, "Response" = SURENESS))) dimnames(tab26)[[2]] <- c("Sure", "Not Sure", "Guess", "Guess", "Not Sure", "Sure") dat26 <- expand.grid(sureness = as.factor(1:6), prod = c("Ref", "Test")) dat26$wghts <- c(t(tab26)) m1 <- clm2(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logistic") ## print, summary, vcov, logLik, AIC: m1 summary(m1) vcov(m1) logLik(m1) AIC(m1) coef(m1) coef(summary(m1)) ## link functions: m2 <- update(m1, link = "probit") m3 <- update(m1, link = "cloglog") m4 <- update(m1, link = "loglog") m5 <- update(m1, link = "cauchit", start = coef(m1)) m6 <- update(m1, link = "Aranda-Ordaz", lambda = 1) m7 <- update(m1, link = "Aranda-Ordaz") m8 <- update(m1, link = "log-gamma", lambda = 1) m9 <- update(m1, link = "log-gamma") ## nominal effects: mN1 <- clm2(sureness ~ 1, nominal = ~ prod, data = dat26, weights = wghts, link = "logistic") anova(m1, mN1) ## optimizer / method: update(m1, scale = ~ 1, method = "Newton") update(m1, scale = ~ 1, method = "nlminb") update(m1, scale = ~ 1, method = "optim") \dontshow{ update(m1, scale = ~ 1, method = "model.frame") update(m1, location = ~.-prod, scale = ~ 1, nominal = ~ prod, method = "model.frame") } ## threshold functions mT1 <- update(m1, threshold = "symmetric") mT2 <- update(m1, threshold = "equidistant") anova(m1, mT1, mT2) ## Extend example from polr in package MASS: ## Fit model from polr example: if(require(MASS)) { fm1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) fm1 summary(fm1) ## With probit link: summary(update(fm1, link = "probit")) ## Allow scale to depend on Cont-variable summary(fm2 <- update(fm1, scale =~ Cont)) anova(fm1, fm2) ## which seems to improve the fit } ################################# ## It is possible to fit multinomial models (i.e. with nominal ## effects) as the following example shows: if(require(nnet)) { (hous1.mu <- multinom(Sat ~ 1, weights = Freq, data = housing)) (hous1.clm <- clm2(Sat ~ 1, weights = Freq, data = housing)) ## It is the same likelihood: all.equal(logLik(hous1.mu), logLik(hous1.clm)) ## and the same fitted values: fitHous.mu <- t(fitted(hous1.mu))[t(col(fitted(hous1.mu)) == unclass(housing$Sat))] all.equal(fitted(hous1.clm), fitHous.mu) ## The coefficients of multinom can be retrieved from the clm2-object ## by: Pi <- diff(c(0, plogis(hous1.clm$xi), 1)) log(Pi[2:3]/Pi[1]) ## A larger model with explanatory variables: (hous.mu <- multinom(Sat ~ Infl + Type + Cont, weights = Freq, data = housing)) (hous.clm <- clm2(Sat ~ 1, nominal = ~ Infl + Type + Cont, weights = Freq, data = housing)) ## Almost the same likelihood: all.equal(logLik(hous.mu), logLik(hous.clm)) ## And almost the same fitted values: fitHous.mu <- t(fitted(hous.mu))[t(col(fitted(hous.mu)) == unclass(housing$Sat))] all.equal(fitted(hous.clm), fitHous.mu) all.equal(round(fitted(hous.clm), 5), round(fitHous.mu), 5) } } \keyword{models} ordinal/man/updateOld.Rd0000644000176200001440000000316513331072710014667 0ustar liggesusers\name{update.clm2} \alias{update.clm2} \alias{update.clmm2} \title{Update method for cumulative link models} \description{ Update method for cumulative link models fitted with \code{clm2}. This makes it possible to use e.g. \code{update(obj, location = ~ . - var1, scale = ~ . + var2)} } \usage{ \method{update}{clm2}(object, formula., location, scale, nominal,..., evaluate = TRUE) \method{update}{clmm2}(object, formula., location, scale, nominal,..., evaluate = TRUE) } \arguments{ \item{object}{a \code{\link{clm2}} object. } \item{formula.}{not used---unfortunately this argument is part of the default method. } \item{location}{an optional new formula for the location; see \code{\link{update.formula}} for details. } \item{scale}{an optional new formula for the scale; see \code{\link{update.formula}} for details. } \item{nominal}{an optional new formula for nominal effects; see \code{\link{update.formula}} for details. } \item{\dots}{additional arguments to the call, or arguments with changed values. } \item{evaluate}{if true evaluate the new call else return the call. } } \value{ If \code{evaluate = TRUE} the fitted object is returned, otherwise the updated call. } \author{Rune Haubo B Christensen} \examples{ options(contrasts = c("contr.treatment", "contr.poly")) m1 <- clm2(SURENESS ~ PROD, scale = ~PROD, data = soup, link = "logistic") m2 <- update(m1, link = "probit") m3 <- update(m1, link = "cloglog") m4 <- update(m1, link = "loglog") anova(m1, update(m1, scale = ~.-PROD)) mT1 <- update(m1, threshold = "symmetric") } \keyword{internal} ordinal/man/clm.anova.Rd0000644000176200001440000000366413332047510014631 0ustar liggesusers\name{anova.clm} %%\alias{anova} \alias{anova.clm} \title{ANODE Tables and Likelihood ratio test of cumulative link models} \description{ Type I, II, and III analysis of deviance (ANODE) tables for cumulative link models and comparison of cumulative link models with likelihood ratio tests. Models may differ by terms in location, scale and nominal formulae, in link, threshold function. } \usage{ \method{anova}{clm}(object, ..., type = c("I", "II", "III", "1", "2", "3")) } \arguments{ \item{object}{a \code{\link{clm}} object. } \item{\dots}{optionally one or more additional \code{\link{clm}} objects. } \item{type}{the type of hypothesis test if \code{anova} is called with a single model; ignored if more than one model is passed to the method. } } \details{ The ANODE table returned when \code{anova} is called with a single model apply only to terms in \code{formula}, that is, terms in \code{nominal} and \code{scale} are ignored. } \value{ An analysis of deviance table based on Wald chi-square test if called with a single model and a comparison of models with likelihood ratio tests if called with more than one model. } \author{Rune Haubo B Christensen} \seealso{ \code{\link[ordinal]{clm}} } \examples{ ## Analysis of deviance tables with Wald chi-square tests: fm <- clm(rating ~ temp * contact, scale=~contact, data=wine) anova(fm, type="I") anova(fm, type="II") anova(fm, type="III") options(contrasts = c("contr.treatment", "contr.poly")) m1 <- clm2(SURENESS ~ PROD, scale = ~PROD, data = soup, link = "logistic") ## anova anova(m1, update(m1, scale = ~.-PROD)) mN1 <- clm2(SURENESS ~ 1, nominal = ~PROD, data = soup, link = "logistic") anova(m1, mN1) anova(m1, update(m1, scale = ~.-PROD), mN1) ## Fit model from polr example: if(require(MASS)) { fm1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) anova(fm1, update(fm1, scale =~ Cont)) } } \keyword{models} ordinal/man/clm.control.Rd0000644000176200001440000000527013330271656015210 0ustar liggesusers\name{clm.control} \alias{clm.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{Set control parameters for cumulative link models} \description{ Set control parameters for cumulative link models } \usage{ clm.control(method = c("Newton", "model.frame", "design", "ucminf", "nlminb", "optim"), sign.location = c("negative", "positive"), sign.nominal = c("positive", "negative"), ..., trace = 0L, maxIter = 100L, gradTol = 1e-06, maxLineIter = 15L, relTol = 1e-6, tol = sqrt(.Machine$double.eps), maxModIter = 5L, convergence = c("warn", "silent", "stop", "message")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{method}{\code{"Newton"} fits the model by maximum likelihood and \code{"model.frame"} cause \code{\link{clm}} to return the \code{model.frame}, \code{"design"} causes \code{\link{clm}} to return a list of design matrices etc. that can be used with \code{\link{clm.fit}}. \code{ucminf}, \code{nlminb} and \code{optim} refer to general purpose optimizers. } \item{sign.location}{change sign of the location part of the model. } \item{sign.nominal}{change sign of the nominal part of the model. } \item{trace}{numerical, if \code{> 0} information is printed about and during the optimization process. Defaults to \code{0}. } \item{maxIter}{the maximum number of Newton-Raphson iterations. Defaults to \code{100}. } \item{gradTol}{the maximum absolute gradient; defaults to \code{1e-6}. } \item{maxLineIter}{the maximum number of step halfings allowed if a Newton(-Raphson) step over shoots. Defaults to \code{15}. } \item{relTol}{relative convergence tolerence: relative change in the parameter estimates between Newton iterations. Defaults to \code{1e-6}. } \item{tol}{numerical tolerence on eigenvalues to determine negative-definiteness of Hessian. If the Hessian of a model fit is negative definite, the fitting algorithm did not converge. If the Hessian is singular, the fitting algorithm did converge albeit not to a \emph{unique} optimum, so one or more parameters are not uniquely determined even though the log-likelihood value is. } \item{maxModIter}{the maximum allowable number of consecutive iterations where the Newton step needs to be modified to be a decent direction. Defaults to \code{5}. } \item{convergence}{action to take if the fitting algorithm did not converge. } \item{\dots}{control arguments parsed on to \code{\link{ucminf}}, \code{\link{nlminb}} or \code{\link{optim}}. } } \value{ a list of control parameters. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{clm}} } \keyword{models} ordinal/man/clm.controlOld.Rd0000755000176200001440000000332511616455300015644 0ustar liggesusers\name{clm2.control} \alias{clm2.control} \title{Set control parameters for cumulative link models} \description{ Set control parameters for cumulative link models } \usage{ clm2.control(method = c("ucminf", "Newton", "nlminb", "optim", "model.frame"), ..., convTol = 1e-4, trace = 0, maxIter = 100, gradTol = 1e-5, maxLineIter = 10) } \arguments{ \item{method}{ the optimizer used to maximize the likelihood function. \code{"Newton"} only works for models without \code{scale}, structured thresholds and flexible link functions, but is considerably faster than the other optimizers when applicable. \code{model.frame} simply returns a list of model frames with the location, scale and nominal model frames. \code{"optim"} uses the \code{"BFGS"} method. } \item{\dots}{control arguments passed on to the chosen optimizer; see \code{\link[ucminf]{ucminf}}, \code{\link{optim}}, and \code{\link{nlminb}} for details. } \item{convTol}{convergence criterion on the size of the maximum absolute gradient. } \item{trace}{numerical, if > 0 information is printed about and during the optimization process. Defaults to \code{0}. } \item{maxIter}{the maximum number of Newton-Raphson iterations. Defaults to \code{100}. } \item{gradTol}{the maximum absolute gradient. This is the termination criterion and defaults to \code{1e-5}. } \item{maxLineIter}{the maximum number of step halfings allowed if a Newton(-Raphson) step over shoots. Defaults to \code{10}. } } \value{ a list of control parameters. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{clm2}} } \keyword{models} ordinal/man/clmm.Rd0000644000176200001440000001572113277541507013715 0ustar liggesusers\name{clmm} \alias{clmm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Cumulative Link Mixed Models } \description{ Fits Cumulative Link Mixed Models with one or more random effects via the Laplace approximation or quadrature methods } \usage{ clmm(formula, data, weights, start, subset, na.action, contrasts, Hess = TRUE, model = TRUE, link = c("logit", "probit", "cloglog", "loglog", "cauchit"), doFit = TRUE, control = list(), nAGQ = 1L, threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) %% also document getNLA(rho, par) here and include examples } %- maybe also 'usage' for other objects documented here. \arguments{ \item{formula}{ a two-sided linear formula object describing the fixed-effects part of the model, with the response on the left of a ~ operator and the terms, separated by + operators, on the right. The vertical bar character "|" separates an expression for a model matrix and a grouping factor. } \item{data}{ an optional data frame in which to interpret the variables occurring in the formula. } \item{weights}{ optional case weights in fitting. Defaults to 1. } \item{start}{ optional initial values for the parameters in the format \code{c(alpha, beta, tau)}, where \code{alpha} are the threshold parameters, \code{beta} are the fixed regression parameters and \code{tau} are variance parameters for the random effects on the log scale. } \item{subset}{ expression saying which subset of the rows of the data should be used in the fit. All observations are included by default. } \item{na.action}{ a function to filter missing data. } \item{contrasts}{ a list of contrasts to be used for some or all of the factors appearing as variables in the model formula. } \item{Hess}{ logical for whether the Hessian (the inverse of the observed information matrix) should be computed. Use \code{Hess = TRUE} if you intend to call \code{summary} or \code{vcov} on the fit and \code{Hess = FALSE} in all other instances to save computing time. } \item{model}{ logical for whether the model frames should be part of the returned object. } \item{link}{ link function, i.e. the type of location-scale distribution assumed for the latent distribution. The default \code{"logit"} link gives the proportional odds mixed model. } \item{doFit}{ logical for whether the model should be fit or the model environment should be returned. } \item{control}{ a call to \code{\link{clmm.control}} } \item{nAGQ}{ integer; the number of quadrature points to use in the adaptive Gauss-Hermite quadrature approximation to the likelihood function. The default (\code{1}) gives the Laplace approximation. Higher values generally provide higher precision at the expense of longer computation times, and values between 5 and 10 generally provide accurate maximum likelihood estimates. Negative values give the non-adaptive Gauss-Hermite quadrature approximation, which is generally faster but less accurate than the adaptive version. See the references for further details. Quadrature methods are only available with a single random effects term; the Laplace approximation is always available. } \item{threshold}{ specifies a potential structure for the thresholds (cut-points). \code{"flexible"} provides the standard unstructured thresholds, \code{"symmetric"} restricts the distance between the thresholds to be symmetric around the central one or two thresholds for odd or equal numbers or thresholds respectively, \code{"symmetric2"} restricts the latent mean in the reference group to zero; this means that the central threshold (even no. response levels) is zero or that the two central thresholds are equal apart from their sign (uneven no. response levels), and \code{"equidistant"} restricts the distance between consecutive thresholds to be of the same size. } \item{\dots}{ additional arguments are passed on to \code{\link{clm.control}}. } } \details{ This is a new (as of August 2011) improved implementation of CLMMs. The old implementation is available in \code{\link{clmm2}}. Some features are not yet available in \code{clmm}; for instance scale effects, nominal effects and flexible link functions are currently only available in \code{clmm2}. \code{clmm} is expected to take over \code{clmm2} at some point. There are standard print, summary and anova methods implemented for \code{"clmm"} objects. } \value{ a list containing \item{alpha}{threshold parameters.} \item{beta}{fixed effect regression parameters.} \item{stDev}{standard deviation of the random effect terms.} \item{tau}{\code{log(stDev)} - the scale at which the log-likelihood function is optimized.} \item{coefficients}{the estimated model parameters = \code{c(alpha, beta, tau)}.} \item{control}{List of control parameters as generated by \code{\link{clm.control}}. } \item{Hessian}{Hessian of the model coefficients.} \item{edf}{the estimated degrees of freedom used by the model = \code{length(coefficients)}.} \item{nobs}{\code{sum(weights)}.} \item{n}{length(y).} \item{fitted.values}{fitted values evaluated with the random effects at their conditional modes.} \item{df.residual}{residual degrees of freedom; \code{length(y) - sum(weights)}} \item{tJac}{Jacobian of the threshold function corresponding to the mapping from standard flexible thresholds to those used in the model.} \item{terms}{the terms object for the fixed effects.} \item{contrasts}{contrasts applied to the fixed model terms.} \item{na.action}{the function used to filter missing data.} \item{call}{the matched call.} \item{logLik}{value of the log-likelihood function for the model at the optimum.} \item{Niter}{number of Newton iterations in the inner loop update of the conditional modes of the random effects.} \item{optRes}{list of results from the optimizer.} \item{ranef}{list of the conditional modes of the random effects.} \item{condVar}{list of the conditional variance of the random effects at their conditional modes.} } %% \references{ bla %% %% ~put references to the literature/web site here ~ %% } \author{ Rune Haubo B Christensen } \examples{ ## Cumulative link model with one random term: fmm1 <- clmm(rating ~ temp + contact + (1|judge), data = wine) summary(fmm1) \dontrun{ ## May take a couple of seconds to run this. ## Cumulative link mixed model with two random terms: mm1 <- clmm(SURENESS ~ PROD + (1|RESP) + (1|RESP:PROD), data = soup, link = "probit", threshold = "equidistant") mm1 summary(mm1) ## test random effect: mm2 <- clmm(SURENESS ~ PROD + (1|RESP), data = soup, link = "probit", threshold = "equidistant") anova(mm1, mm2) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/nominal.test.Rd0000644000176200001440000000521412175703724015371 0ustar liggesusers\name{nominal_test} \alias{nominal_test} \alias{scale_test} \alias{nominal_test.clm} \alias{scale_test.clm} \title{ Likelihood ratio tests of model terms in scale and nominal formulae } \description{ Add all model terms to scale and nominal formulae and perform likelihood ratio tests. These tests can be viewed as goodness-of-fit tests. With the logit link, \code{nominal_test} provides likelihood ratio tests of the proportional odds assumption. The \code{scale_test} tests can be given a similar interpretation. } \usage{ nominal_test(object, ...) \method{nominal_test}{clm}(object, scope, trace=FALSE, ...) scale_test(object, ...) \method{scale_test}{clm}(object, scope, trace=FALSE, ...) } \arguments{ \item{object}{for the \code{clm} method an object of class \code{"clm"}, i.e., the result of a call to \code{clm}. } \item{scope}{ a formula or character vector specifying the terms to add to scale or nominal. In \code{nominal_test} terms in scope already in \code{nominal} are ignored. In \code{scale_test} terms in scope already in \code{scale} are ignored. In \code{nominal_test} the default is to add all terms from \code{formula} (location part) and \code{scale} that are not also in \code{nominal}. In \code{scale_test} the default is to add all terms from \code{formula} (location part) that are not also in \code{scale}. } \item{trace}{ if \code{TRUE} additional information may be given on the fits as they are tried. } \item{\dots}{ arguments passed to or from other methods. } } \value{ A table of class \code{"anova"} containing columns for the change in degrees of freedom, AIC, the likelihood ratio statistic and a p-value based on the asymptotic chi-square distribtion of the likelihood ratio statistic under the null hypothesis. } \details{ The definition of AIC is only up to an additive constant because the likelihood function is only defined up to an additive constant. } \author{Rune Haubo B Christensen} \examples{ ## Fit cumulative link model: fm <- clm(rating ~ temp + contact, data=wine) summary(fm) ## test partial proportional odds assumption for temp and contact: nominal_test(fm) ## no evidence of non-proportional odds. ## test if there are signs of scale effects: scale_test(fm) ## no evidence of scale effects. ## tests of scale and nominal effects for the housing data from MASS: if(require(MASS)) { fm1 <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) scale_test(fm1) nominal_test(fm1) ## Evidence of multiplicative/scale effect of 'Cont'. This is a breach ## of the proportional odds assumption. } } \keyword{models} ordinal/man/convergence.clm.Rd0000644000176200001440000000474012205357257016031 0ustar liggesusers\name{convergence} \alias{convergence} \alias{convergence.clm} \alias{print.convergence.clm} \title{Check convergence of cumulative link models} \description{ Check the accuracy of the parameter estimates of cumulative link models. The number of correct decimals and number of significant digits is given for the maximum likelihood estimates of the parameters in a cumulative link model fitted with \code{\link{clm}}. } \usage{ convergence(object, ...) \method{convergence}{clm}(object, digits = max(3, getOption("digits") - 3), tol = sqrt(.Machine$double.eps), ...) } \arguments{ \item{object}{for the \code{clm} method an object of class \code{"clm"}, i.e., the result of a call to \code{clm}. } \item{digits}{the number of digits in the printed table. } \item{tol}{numerical tolerence to judge if the Hessian is positive definite from its smallest eigenvalue. } \item{...}{arguments to a from methods. Not used by the \code{clm} method. } } \value{ Convergence information. In particular a table where the \code{Error} column gives the numerical error in the parameter estimates. These numbers express how far the parameter estimates in the fitted model are from the true maximum likelihood estimates for this model. The \code{Cor.Dec} gives the number of correct decimals with which the the parameters are determined and the \code{Sig.Dig} gives the number of significant digits with which the parameters are determined. The number denoted \code{logLik.error} is the error in the value of log-likelihood in the fitted model at the parameter values of that fit. An accurate determination of the log-likelihood is essential for accurate likelihood ratio tests in model comparison. } \details{ The number of correct decimals is defined as... The number of significant digits is defined as ... The number of correct decimals and the number of significant digits are determined from the numerical errors in the parameter estimates. The numerical errors are determined from the Method Independent Error Theorem (Elden et al, 2004) and is based on the Newton step evaluated at convergence. } \references{ Elden, L., Wittmeyer-Koch, L. and Nielsen, H. B. (2004) \emph{Introduction to Numerical Computation --- analysis and Matlab illustrations.} Studentliteratur. } %% \seealso{ %% } \examples{ ## Simple model: fm1 <- clm(rating ~ contact + temp, data=wine) summary(fm1) convergence(fm1) } \author{Rune Haubo B Christensen} \keyword{models} ordinal/man/confintOld.Rd0000644000176200001440000001370313331072710015044 0ustar liggesusers\name{confint.clm2} \alias{confint.clm2} \alias{confint.profile.clm2} \alias{profile.clm2} \alias{plot.profile.clm2} \title{ Confidence intervals and profile likelihoods for parameters in cumulative link models } \description{ Computes confidence intervals from the profiled likelihood for one or more parameters in a fitted cumulative link model, or plots the profile likelihood function. } \usage{ \method{confint}{clm2}(object, parm, level = 0.95, whichL = seq_len(p), whichS = seq_len(k), lambda = TRUE, trace = 0, \dots) \method{confint}{profile.clm2}(object, parm = seq_along(Pnames), level = 0.95, \dots) \method{profile}{clm2}(fitted, whichL = seq_len(p), whichS = seq_len(k), lambda = TRUE, alpha = 0.01, maxSteps = 50, delta = LrootMax/10, trace = 0, stepWarn = 8, \dots) \method{plot}{profile.clm2}(x, parm = seq_along(Pnames), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, fig = TRUE, n = 1e3, ..., ylim = NULL) } \arguments{ \item{object}{ a fitted \code{\link{clm2}} object or a \code{profile.clm2} object. } \item{fitted}{ a fitted \code{\link{clm2}} object. } \item{x}{a \code{profile.clm2} object. } \item{parm}{not used in \code{confint.clm2}. For \code{confint.profile.clm2}: a specification of which parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all parameters are considered. For \code{plot.profile.clm2}: a specification of which parameters the profile likelihood are to be plotted for, either a vector of numbers or a vector of names. If missing, all parameters are considered. } \item{level}{ the confidence level required. } \item{whichL}{ a specification of which \emph{location} parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all location parameters are considered. } \item{whichS}{ a specification of which \emph{scale} parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all scale parameters are considered. } \item{lambda}{ logical. Should profile or confidence intervals be computed for the link function parameter? Only used when one of the flexible link functions are used; see the \code{link}-argument in \code{\link{clm2}}. } \item{trace}{ logical. Should profiling be traced? } \item{alpha}{Determines the range of profiling. By default the likelihood is profiled in the 99\% confidence interval region as determined by the profile likelihood. } \item{maxSteps}{the maximum number of profiling steps in each direction (up and down) for each parameter. } \item{delta}{the length of profiling steps. To some extent this parameter determines the degree of accuracy of the profile likelihood in that smaller values, i.e. smaller steps gives a higher accuracy. Note however that a spline interpolation is used when constructing confidence intervals so fairly long steps can provide high accuracy. } \item{stepWarn}{a warning is issued if the no. steps in each direction (up or down) for a parameter is less than \code{stepWarn} (defaults to 8 steps) because this indicates an unreliable profile. } \item{Log}{should the profile likelihood be plotted on the log-scale? } \item{relative}{should the relative or the absolute likelihood be plotted? } \item{fig}{should the profile likelihood be plotted? } \item{n}{the no. points used in the spline interpolation of the profile likelihood. } \item{ylim}{overrules default y-limits on the plot of the profile likelihood. } \item{\dots}{ additional argument(s) for methods including \code{range} (for the hidden function \code{profileLambda}) that sets the range of values of \code{lambda} at which the likelihood should be profiled for this parameter. } } \value{ \code{confint}: A matrix (or vector) with columns giving lower and upper confidence limits for each parameter. These will be labelled as (1-level)/2 and 1 - (1-level)/2 in \% (by default 2.5\% and 97.5\%). The parameter names are preceded with \code{"loc."} or \code{"sca."} to indicate whether the confidence interval applies to a location or a scale parameter. \code{plot.profile.clm2} invisibly returns the profile object. } \details{ These \code{confint} methods call the appropriate profile method, then finds the confidence intervals by interpolation of the profile traces. If the profile object is already available, this should be used as the main argument rather than the fitted model object itself. In \code{plot.profile.clm2}: at least one of \code{Log} and \code{relative} arguments have to be \code{TRUE}. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{profile}} and \code{\link{confint}} } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) ## More manageable data set: (tab26 <- with(soup, table("Product" = PROD, "Response" = SURENESS))) dimnames(tab26)[[2]] <- c("Sure", "Not Sure", "Guess", "Guess", "Not Sure", "Sure") dat26 <- expand.grid(sureness = as.factor(1:6), prod = c("Ref", "Test")) dat26$wghts <- c(t(tab26)) m1 <- clm2(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logistic") ## profile pr1 <- profile(m1) par(mfrow = c(2, 2)) plot(pr1) m9 <- update(m1, link = "log-gamma") pr9 <- profile(m9, whichL = numeric(0), whichS = numeric(0)) par(mfrow = c(1, 1)) plot(pr9) plot(pr9, Log=TRUE, relative = TRUE) plot(pr9, Log=TRUE, relative = TRUE, ylim = c(-4, 0)) plot(pr9, Log=TRUE, relative = FALSE) ## confint confint(pr9) confint(pr1) ## Extend example from polr in package MASS: ## Fit model from polr example: if(require(MASS)) { fm1 <- clm2(Sat ~ Infl + Type + Cont, scale = ~ Cont, weights = Freq, data = housing) pr1 <- profile(fm1) confint(pr1) par(mfrow=c(2,2)) plot(pr1) } } \keyword{internal} ordinal/man/wine.Rd0000644000176200001440000000443012176227250013713 0ustar liggesusers\name{wine} \alias{wine} \title{ Bitterness of wine } \description{ The \code{wine} data set is adopted from Randall(1989) and from a factorial experiment on factors determining the bitterness of wine. Two treatment factors (temperature and contact) each have two levels. Temperature and contact between juice and skins can be controlled when cruching grapes during wine production. Nine judges each assessed wine from two bottles from each of the four treatment conditions, hence there are 72 observations in all. } \usage{ wine } \format{ \describe{ \item{\code{response}}{ scorings of wine bitterness on a 0---100 continuous scale. } \item{\code{rating}}{ ordered factor with 5 levels; a grouped version of \code{response}. } \item{\code{temp}}{ temperature: factor with two levels. } \item{\code{contact}}{ factor with two levels (\code{"no"} and \code{"yes"}). } \item{\code{bottle}}{ factor with eight levels. } \item{\code{judge}}{ factor with nine levels. } }} \source{ Data are adopted from Randall (1989). } \references{ Randall, J (1989). The analysis of sensory data by generalised linear model. \emph{Biometrical journal 7}, pp. 781--793. Tutz, G. and W. Hennevogl (1996). Random effects in ordinal regression models. \emph{Computational Statistics & Data Analysis 22}, pp. 537--557. } \examples{ head(wine) str(wine) ## Variables 'rating' and 'response' are related in the following way: (intervals <- seq(0,100, by = 20)) all(wine$rating == findInterval(wine$response, intervals)) ## ok ## A few illustrative tabulations: ## Table matching Table 5 in Randall (1989): temp.contact.bottle <- with(wine, temp:contact:bottle)[drop=TRUE] xtabs(response ~ temp.contact.bottle + judge, data = wine) ## Table matching Table 6 in Randall (1989): with(wine, { tcb <- temp:contact:bottle tcb <- tcb[drop=TRUE] table(tcb, rating) }) ## or simply: with(wine, table(bottle, rating)) ## Table matching Table 1 in Tutz & Hennevogl (1996): tab <- xtabs(as.numeric(rating) ~ judge + temp.contact.bottle, data = wine) colnames(tab) <- paste(rep(c("c","w"), each = 4), rep(c("n", "n", "y", "y"), 2), 1:8, sep=".") tab ## A simple model: m1 <- clm(rating ~ temp * contact, data = wine) summary(m1) } \keyword{datasets} ordinal/man/clm.Rd0000644000176200001440000002536613332050747013537 0ustar liggesusers\name{clm} \alias{clm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Cumulative Link Models %% ~~function to do ... ~~ } \description{ Fits cumulative link models (CLMs) such as the propotional odds model. The model allows for various link functions and structured thresholds that restricts the thresholds or cut-points to be e.g., equidistant or symmetrically arranged around the central threshold(s). Nominal effects (partial proportional odds with the logit link) are also allowed. A modified Newton algorithm is used to optimize the likelihood function. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ clm(formula, scale, nominal, data, weights, start, subset, doFit = TRUE, na.action, contrasts, model = TRUE, control=list(), link = c("logit", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{formula}{ a formula expression as for regression models, of the form \code{response ~ predictors}. The response should be a factor (preferably an ordered factor), which will be interpreted as an ordinal response with levels ordered as in the factor. The model must have an intercept: attempts to remove one will lead to a warning and will be ignored. An offset may be used. See the documentation of \code{\link{formula}} for other details. } \item{scale}{ an optional formula expression, of the form \code{ ~ predictors}, i.e. with an empty left hand side. An offset may be used. Variables included here will have multiplicative effects and can be interpreted as effects on the scale (or dispersion) of a latent distribution. } \item{nominal}{ an optional formula of the form \code{ ~ predictors}, i.e. with an empty left hand side. The effects of the predictors in this formula are assumed to be nominal rather than ordinal - this corresponds to the so-called partial proportional odds (with the logit link). } \item{data}{ an optional data frame in which to interpret the variables occurring in the formulas. } \item{weights}{ optional case weights in fitting. Defaults to 1. Negative weights are not allowed. } \item{start}{ initial values for the parameters in the format \code{c(alpha, beta, zeta)}, where \code{alpha} are the threshold parameters (adjusted for potential nominal effects), \code{beta} are the regression parameters and \code{zeta} are the scale parameters. } \item{subset}{ expression saying which subset of the rows of the data should be used in the fit. All observations are included by default. } \item{doFit}{ logical for whether the model should be fitted or the model environment should be returned. } \item{na.action}{ a function to filter missing data. Applies to terms in all three formulae. } \item{contrasts}{ a list of contrasts to be used for some or all of the factors appearing as variables in the model formula. } \item{model}{ logical for whether the model frame should be part of the returned object. } \item{control}{ a list of control parameters passed on to \code{\link{clm.control}}. } \item{link}{ link function, i.e., the type of location-scale distribution assumed for the latent distribution. The default \code{"logit"} link gives the proportional odds model. } \item{threshold}{ specifies a potential structure for the thresholds (cut-points). \code{"flexible"} provides the standard unstructured thresholds, \code{"symmetric"} restricts the distance between the thresholds to be symmetric around the central one or two thresholds for odd or equal numbers or thresholds respectively, \code{"symmetric2"} restricts the latent mean in the reference group to zero; this means that the central threshold (even no. response levels) is zero or that the two central thresholds are equal apart from their sign (uneven no. response levels), and \code{"equidistant"} restricts the distance between consecutive thresholds to be of the same size. } \item{\dots}{ additional arguments are passed on to \code{\link{clm.control}}. } } \details{ This is a new (as of August 2011) improved implementation of CLMs. The old implementation is available in \code{\link{clm2}}, but will probably be removed at some point. There are methods for the standard model-fitting functions, including \code{\link{summary}}, \code{\link{anova}}, \code{\link{model.frame}}, \code{\link{model.matrix}}, \code{\link{drop1}}, \code{\link{dropterm}}, \code{\link{step}}, \code{\link{stepAIC}}, \code{\link{extractAIC}}, \code{\link{AIC}}, \code{\link{coef}}, \code{\link{nobs}}, \code{\link{profile}}, \code{\link{confint}}, \code{\link{vcov}} and \code{\link[=slice.clm]{slice}}. %% \code{slice}. } \value{ If \code{doFit = FALSE} the result is an environment representing the model ready to be optimized. If \code{doFit = TRUE} the result is an object of class \code{"clm"} with the components listed below. Note that some components are only present if \code{scale} and \code{nominal} are used. \item{aliased}{list of length 3 or less with components \code{alpha}, \code{beta} and \code{zeta} each being logical vectors containing alias information for the parameters of the same names. } \item{alpha}{a vector of threshold parameters. } \item{alpha.mat}{(where relevant) a table (\code{data.frame}) of threshold parameters where each row corresponds to an effect in the \code{nominal} formula. } \item{beta}{(where relevant) a vector of regression parameters. } \item{call}{the mathed call. } \item{coefficients}{a vector of coefficients of the form \code{c(alpha, beta, zeta)} } \item{cond.H}{condition number of the Hessian matrix at the optimum (i.e. the ratio of the largest to the smallest eigenvalue). } \item{contrasts}{(where relevant) the contrasts used for the \code{formula} part of the model. } \item{control}{list of control parameters as generated by \code{\link{clm.control}}. } \item{convergence}{convergence code where 0 indicates successful convergence and negative values indicate convergence failure; 1 indicates successful convergence to a non-unique optimum. } \item{edf}{the estimated degrees of freedom, i.e., the number of parameters in the model fit. } \item{fitted.values}{the fitted probabilities. } \item{gradient}{a vector of gradients for the coefficients at the estimated optimum. } \item{Hessian}{the Hessian matrix for the parameters at the estimated optimum. } \item{info}{a table of basic model information for printing. } \item{link}{character, the link function used. } \item{logLik}{the value of the log-likelihood at the estimated optimum. } \item{maxGradient}{the maximum absolute gradient, i.e., \code{max(abs(gradient))}. } \item{model}{if requested (the default), the \code{\link{model.frame}} containing variables from \code{formula}, \code{scale} and \code{nominal} parts. } \item{n}{the number of observations counted as \code{nrow(X)}, where \code{X} is the design matrix. } \item{na.action}{(where relevant) information returned by \code{\link{model.frame}} on the special handling of \code{NA}s. } \item{nobs}{the number of observations counted as \code{sum(weights)}. } \item{nom.contrasts}{(where relevant) the contrasts used for the \code{nominal} part of the model. } \item{nom.terms}{(where relevant) the terms object for the \code{nominal} part. } \item{nom.xlevels}{(where relevant) a record of the levels of the factors used in fitting for the \code{nominal} part. } \item{start}{the parameter values at which the optimization has started. An attribute \code{start.iter} gives the number of iterations to obtain starting values for models where \code{scale} is specified or where the \code{cauchit} link is chosen. } \item{S.contrasts}{(where relevant) the contrasts used for the \code{scale} part of the model. } \item{S.terms}{(where relevant) the terms object for the \code{scale} part. } \item{S.xlevels}{(where relevant) a record of the levels of the factors used in fitting for the \code{scale} part. } \item{terms}{the terms object for the \code{formula} part. } \item{Theta}{(where relevant) a table (\code{data.frame}) of thresholds for all combinations of levels of factors in the \code{nominal} formula. } \item{threshold}{character, the threshold structure used. } \item{tJac}{the transpose of the Jacobian for the threshold structure. } \item{xlevels}{(where relevant) a record of the levels of the factors used in fitting for the \code{formula} part. } \item{y.levels}{the levels of the response variable after removing levels for which all weights are zero. } \item{zeta}{(where relevant) a vector of scale regression parameters. } } \author{ Rune Haubo B Christensen } \examples{ fm1 <- clm(rating ~ temp * contact, data = wine) fm1 ## print method summary(fm1) fm2 <- update(fm1, ~.-temp:contact) anova(fm1, fm2) drop1(fm1, test = "Chi") add1(fm1, ~.+judge, test = "Chi") fm2 <- step(fm1) summary(fm2) coef(fm1) vcov(fm1) AIC(fm1) extractAIC(fm1) logLik(fm1) fitted(fm1) confint(fm1) ## type = "profile" confint(fm1, type = "Wald") pr1 <- profile(fm1) confint(pr1) ## plotting the profiles: par(mfrow = c(2, 2)) plot(pr1, root = TRUE) ## check for linearity par(mfrow = c(2, 2)) plot(pr1) par(mfrow = c(2, 2)) plot(pr1, approx = TRUE) par(mfrow = c(2, 2)) plot(pr1, Log = TRUE) par(mfrow = c(2, 2)) plot(pr1, Log = TRUE, relative = FALSE) ## other link functions: fm4.lgt <- update(fm1, link = "logit") ## default fm4.prt <- update(fm1, link = "probit") fm4.ll <- update(fm1, link = "loglog") fm4.cll <- update(fm1, link = "cloglog") fm4.cct <- update(fm1, link = "cauchit") anova(fm4.lgt, fm4.prt, fm4.ll, fm4.cll, fm4.cct) ## structured thresholds: fm5 <- update(fm1, threshold = "symmetric") fm6 <- update(fm1, threshold = "equidistant") anova(fm1, fm5, fm6) ## the slice methods: slice.fm1 <- slice(fm1) par(mfrow = c(3, 3)) plot(slice.fm1) ## see more at '?slice.clm' ## Another example: fm.soup <- clm(SURENESS ~ PRODID, data = soup) summary(fm.soup) if(require(MASS)) { ## dropterm, addterm, stepAIC, housing fm1 <- clm(rating ~ temp * contact, data = wine) dropterm(fm1, test = "Chi") addterm(fm1, ~.+judge, test = "Chi") fm3 <- stepAIC(fm1) summary(fm3) ## Example from MASS::polr: fm1 <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) summary(fm1) } } \keyword{models} ordinal/man/dropCoef.Rd0000644000176200001440000000275012175705440014516 0ustar liggesusers\name{drop.coef} \alias{drop.coef} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Ensure Full Rank Design Matrix } \description{ Coefficients (columns) are dropped from a design matrix to ensure that it has full rank. } \usage{ drop.coef(X, silent = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{X}{ a design matrix, e.g., the result of \code{\link{model.matrix}} possibly of less than full column rank, i.e., with redundant parameters. Works for \code{ncol(X) >= 0} and \code{nrow(X) >= 0}. } \item{silent}{ should a message not be issued if X is column rank deficient? } } \details{ Redundant columns of the design matrix are identified with the LINPACK implementation of the \code{\link{qr}} decomposition and removed. The returned design matrix will have \code{qr(X)$rank} columns. } \value{ The design matrix \code{X} without redundant columns. } \author{ Rune Haubo B Christensen } \seealso{ \code{\link{qr}} and \code{\link{lm}} } \examples{ X <- model.matrix( ~ PRODID * DAY, data = soup) ncol(X) newX <- drop.coef(X) ncol(newX) ## Essentially this is being computed: qr.X <- qr(X, tol = 1e-7, LAPACK = FALSE) newX <- X[, qr.X$pivot[1:qr.X$rank], drop = FALSE] ## is newX of full column rank? ncol(newX) == qr(newX)$rank ## the number of columns being dropped: ncol(X) - ncol(newX) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/clmm.control.Rd0000644000176200001440000000401013277541507015361 0ustar liggesusers\name{clmm.control} \alias{clmm.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Set control parameters for cumulative link mixed models } \description{ Set control parameters for cumulative link mixed models } \usage{ clmm.control(method = c("nlminb", "ucminf", "model.frame"), ..., trace = 0, maxIter = 50, gradTol = 1e-4, maxLineIter = 50, useMatrix = FALSE, innerCtrl = c("warnOnly", "noWarn", "giveError"), checkRanef = c("warn", "error", "message")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{method}{ the optimizer used to maximize the marginal likelihood function. } \item{\dots}{control arguments passed on to the optimizer; see \code{\link[ucminf]{ucminf}} for details. \code{ucminf} for details. } \item{trace}{numerical, if > 0 information is printed about and during the outer optimization process, if < 0 information is also printed about the inner optimization process. Defaults to \code{0}. } \item{maxIter}{the maximum number of Newton updates of the inner optimization. \code{50}. } \item{gradTol}{the maximum absolute gradient of the inner optimization. } \item{maxLineIter}{the maximum number of step halfings allowed if a Newton(-Raphson) step over shoots during the inner optimization. } \item{useMatrix}{if \code{TRUE}, a general implementation of the Laplace approximation using the Matrix package is used, while if \code{FALSE} (default), a C implementation of the Laplace approximation valid only for models with a single random effects term is used when possible. \code{TRUE} is not valid for models fitted with quadrature methods. } \item{innerCtrl}{the use of warnings/errors if the inner optimization fails to converge. } \item{checkRanef}{the use of message/warning/error if there are more random effects than observations. } } \value{ a list of control parameters } \author{ Rune Haubo B Christensen } \seealso{ \code{\link{clmm}} } \keyword{models} ordinal/man/confint.clm.Rd0000644000176200001440000001302213330272017015152 0ustar liggesusers\name{confint} \alias{confint.clm} \alias{confint.profile.clm} \alias{profile.clm} \alias{plot.profile.clm} \title{ Confidence intervals and profile likelihoods for parameters in cumulative link models } \description{ Computes confidence intervals from the profiled likelihood for one or more parameters in a cumulative link model, or plots the profile likelihood. } \usage{ \method{confint}{clm}(object, parm, level = 0.95, type = c("profile", "Wald"), trace = FALSE, ...) \method{confint}{profile.clm}(object, parm = seq_len(nprofiles), level = 0.95, ...) \method{profile}{clm}(fitted, which.beta = seq_len(nbeta), which.zeta = seq_len(nzeta), alpha = 0.001, max.steps = 50, nsteps = 8, trace = FALSE, step.warn = 5, control = list(), ...) \method{plot}{profile.clm}(x, which.par = seq_len(nprofiles), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, root = FALSE, fig = TRUE, approx = root, n = 1e3, ask = prod(par("mfcol")) < length(which.par) && dev.interactive(), ..., ylim = NULL) } \arguments{ \item{object, fitted, x}{ a fitted \code{\link{clm}} object or a \code{profile.clm} object. } \item{parm, which.par, which.beta, which.zeta}{ a numeric or character vector indicating which regression coefficients should be profiled. By default all coefficients are profiled. Ignored for \code{confint.clm} where all parameters are considered. } \item{level}{ the confidence level. For the \code{plot} method a vector of levels for which horizontal lines should be drawn. } \item{type}{ the type of confidence interval. } \item{trace}{ if \code{trace} is \code{TRUE} or positive, information about progress is printed. } \item{Log}{ should the profile likelihood be plotted on the log-scale? } \item{relative}{ should the relative or the absolute likelihood be plotted? } \item{root}{ should the (approximately linear) likelihood root statistic be plotted? } \item{approx}{ should the Gaussian or quadratic approximation to the (log) likelihood be included? } \item{fig}{ should the profile likelihood be plotted? } \item{ask}{ logical; if \code{TRUE}, the user is asked before each plot, see \code{\link{par}}\code{(ask=.)}. } \item{n}{ the no. points used in the spline interpolation of the profile likelihood. } \item{ylim}{overrules default y-limits on the plot of the profile likelihood. } \item{alpha}{ the likelihood is profiled in the 100*(1-alpha)\% confidence region as determined by the profile likelihood. } \item{control}{ a list of control parameters for \code{\link{clm}}. Possibly use \code{\link{clm.control}} to set these. } %%\item{lambda}{ %% logical. Should profile or confidence intervals be computed for the %% link function parameter? Only used when one of the flexible link %% functions are used; see the \code{link}-argument in %% \code{\link{clm}}. %%} \item{max.steps}{ the maximum number of profiling steps in each direction for each parameter. } \item{nsteps}{ the (approximate) number of steps to take in each direction of the profile for each parameter. The step length is determined accordingly assuming a quadratic approximation to the log-likelihood function. The actual number of steps will often be close to \code{nsteps}, but will deviate when the log-likelihood functions is irregular. } \item{step.warn}{ a warning is issued if the number of steps in each direction (up or down) for a parameter is less than \code{step.warn}. If few steps are taken, the profile will be unreliable and derived confidence intervals will be inaccurate. } \item{\dots}{ additional arguments to be parsed on to methods. } } \value{ \code{confint}: A matrix with columns giving lower and upper confidence limits for each parameter. These will be labelled as (1-level)/2 and 1 - (1-level)/2 in \% (by default 2.5\% and 97.5\%). \code{plot.profile.clm} invisibly returns the profile object, i.e., a list of \code{\link{data.frame}}s with an \code{lroot} component for the likelihood root statistic and a matrix \code{par.vals} with values of the parameters. } \details{ These \code{confint} methods call the appropriate profile method, then finds the confidence intervals by interpolation of the profile traces. If the profile object is already available, this should be used as the main argument rather than the fitted model object itself. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{profile}} and \code{\link{confint}} } \examples{ ## Accurate profile likelihood confidence intervals compared to the ## conventional Wald intervals: fm1 <- clm(rating ~ temp * contact, data = wine) confint(fm1) ## type = "profile" confint(fm1, type = "Wald") pr1 <- profile(fm1) confint(pr1) ## plotting the profiles: par(mfrow = c(2, 2)) plot(pr1, root = TRUE) ## check for linearity par(mfrow = c(2, 2)) plot(pr1) par(mfrow = c(2, 2)) plot(pr1, approx = TRUE) par(mfrow = c(2, 2)) plot(pr1, Log = TRUE) par(mfrow = c(2, 2)) plot(pr1, Log = TRUE, relative = FALSE) ## Not likely to be useful but allowed for completeness: par(mfrow = c(2, 2)) plot(pr1, Log = FALSE, relative = FALSE) ## Example from polr in package MASS: ## Fit model from polr example: if(require(MASS)) { fm1 <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) pr1 <- profile(fm1) confint(pr1) par(mfrow=c(2,2)) plot(pr1) } } \keyword{models} ordinal/man/clmmOld.Rd0000644000176200001440000002266112176227250014346 0ustar liggesusers\name{clmm2} \alias{clmm2} \title{Cumulative link mixed models} \description{ Fits cumulative link mixed models, i.e. cumulative link models with random effects via the Laplace approximation or the standard and the adaptive Gauss-Hermite quadrature approximation. The functionality in \code{\link{clm2}} is also implemented here. Currently only a single random term is allowed in the location-part of the model. A new implementation is available in \code{\link{clmm}} that allows for more than one random effect. } \usage{ clmm2(location, scale, nominal, random, data, weights, start, subset, na.action, contrasts, Hess = FALSE, model = TRUE, sdFixed, link = c("logistic", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), lambda, doFit = TRUE, control, nAGQ = 1, threshold = c("flexible", "symmetric", "equidistant"), ...) } \arguments{ \item{location}{ as in \code{\link{clm2}}. } \item{scale}{ as in \code{\link{clm2}}. } \item{nominal}{ as in \code{\link{clm2}}. } \item{random}{ a factor for the random effects in the location-part of the model. } \item{data}{ as in \code{\link{clm2}}. } \item{weights}{ as in \code{\link{clm2}}. } \item{start}{ initial values for the parameters in the format \code{c(alpha, beta, log(zeta), lambda, log(stDev))} where \code{stDev} is the standard deviation of the random effects. } \item{subset}{ as in \code{\link{clm2}}. } \item{na.action}{ as in \code{\link{clm2}}. } \item{contrasts}{ as in \code{\link{clm2}}. } \item{Hess}{ logical for whether the Hessian (the inverse of the observed information matrix) should be computed. Use \code{Hess = TRUE} if you intend to call \code{summary} or \code{vcov} on the fit and \code{Hess = FALSE} in all other instances to save computing time. } \item{model}{ as in \code{\link{clm2}}. } \item{sdFixed}{ If \code{sdFixed} is specified (a positive scalar), a model is fitted where the standard deviation for the random term is fixed at the value of \code{sdFixed}. If \code{sdFixed} is left unspecified, the standard deviation of the random term is estimated from data. } \item{link}{ as in \code{\link{clm2}}. } \item{lambda}{ as in \code{\link{clm2}}. } \item{doFit}{ as in \code{\link{clm2}} although it can also be one of \code{c("no", "R" "C")}, where \code{"R"} use the R-implementation for fitting, \code{"C"} (default) use C-implementation for fitting and \code{"no"} behaves as \code{FALSE} and returns the environment. } \item{control}{ a call to \code{\link{clmm2.control}}. } \item{threshold}{ as in \code{\link{clm2}}. } \item{nAGQ}{ the number of quadrature points to be used in the adaptive Gauss-Hermite quadrature approximation to the marginal likelihood. Defaults to \code{1} which leads to the Laplace approximation. An odd number of quadrature points is encouraged and 3, 5 or 7 are usually enough to achive high precision. Negative values give the standard, i.e. non-adaptive Gauss-Hermite quadrature. } \item{\dots}{ additional arguments are passed on to \code{\link{clm2.control}} and possibly further on to the optimizer, which can lead to surprising error or warning messages when mistyping arguments etc. } } \details{ There are methods for the standard model-fitting functions, including \code{\link{summary}}, \code{\link{vcov}}, \code{\link[=profile.clmm2]{profile}}, \code{\link[=profile.clmm2]{plot.profile}}, \code{\link[=confint.profile.clmm2]{confint}}, \code{\link[=anova.clm2]{anova}}, \code{\link{logLik}}, \code{\link[=predict.clmm2]{predict}} and an \code{extractAIC} method. A Newton scheme is used to obtain the conditional modes of the random effects for Laplace and AGQ approximations, and a non-linear optimization is performed over the fixed parameter set to get the maximum likelihood estimates. The Newton scheme uses the observed Hessian rather than the expected as is done in e.g. \code{\link[lme4]{glmer}}, so results from the Laplace approximation for binomial fits should in general be more precise - particularly for other links than the \code{"logistic"}. Core parts of the function are implemented in C-code for speed. The function calls \code{\link{clm2}} to up an environment and to get starting values. } \value{ If \code{doFit = FALSE} the result is an environment representing the model ready to be optimized. If \code{doFit = TRUE} the result is an object of class \code{"clmm2"} with the following components: \item{stDev}{ the standard deviation of the random effects. } \item{Niter}{ the total number of iterations in the Newton updates of the conditional modes of the random effects. } \item{grFac}{ the grouping factor defining the random effects. } \item{nAGQ}{ the number of quadrature points used in the adaptive Gauss-Hermite Quadrature approximation to the marginal likelihood. } \item{ranef}{ the conditional modes of the random effects, sometimes referred to as "random effect estimates". } \item{condVar}{ the conditional variances of the random effects at their conditional modes. } \item{beta}{the parameter estimates of the location part. } \item{zeta}{the parameter estimates of the scale part on the log scale; the scale parameter estimates on the original scale are given by \code{exp(zeta)}. } \item{Alpha}{vector or matrix of the threshold parameters. } \item{Theta}{vector or matrix of the thresholds. } \item{xi}{vector of threshold parameters, which, given a threshold function (e.g. \code{"equidistant"}), and possible nominal effects define the class boundaries, \code{Theta}. } \item{lambda}{the value of lambda if lambda is supplied or estimated, otherwise missing. } \item{coefficients}{the coefficients of the intercepts (\code{theta}), the location (\code{beta}), the scale (\code{zeta}), and the link function parameter (\code{lambda}). } \item{df.residual}{the number of residual degrees of freedoms, calculated using the weights. } \item{fitted.values}{vector of fitted values conditional on the values of the random effects. Use \code{\link[=predict.clm2]{predict}} to get the fitted values for a random effect of zero. An observation here is taken to be each of the scalar elements of the multinomial table and not a multinomial vector. } \item{convergence}{\code{TRUE} if the optimizer terminates wihtout error and \code{FALSE} otherwise. } \item{gradient}{vector of gradients for the unit-variance random effects at their conditional modes. } \item{optRes}{list with results from the optimizer. The contents of the list depends on the choice of optimizer. } \item{logLik}{the log likelihood of the model at optimizer termination. } \item{Hessian}{if the model was fitted with \code{Hess = TRUE}, this is the Hessian matrix of the parameters at the optimum. } \item{scale}{\code{model.frame} for the scale model. } \item{location}{\code{model.frame} for the location model. } \item{nominal}{\code{model.frame} for the nominal model. } \item{edf}{the (effective) number of degrees of freedom used by the model. } \item{start}{the starting values. } \item{method}{character, the optimizer. } \item{y}{the response variable. } \item{lev}{the names of the levels of the response variable. } \item{nobs}{the (effective) number of observations, calculated as the sum of the weights. } \item{threshold}{character, the threshold function used in the model. } \item{estimLambda}{\code{1} if lambda is estimated in one of the flexible link functions and \code{0} otherwise. } \item{link}{character, the link function used in the model. } \item{call}{the matched call. } \item{contrasts}{contrasts applied to terms in location and scale models. } \item{na.action}{the function used to filter missing data. } } \author{Rune Haubo B Christensen} \references{ Agresti, A. (2002) \emph{Categorical Data Analysis.} Second edition. Wiley. } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) ## More manageable data set: dat <- subset(soup, as.numeric(as.character(RESP)) <= 24) dat$RESP <- dat$RESP[drop=TRUE] m1 <- clmm2(SURENESS ~ PROD, random = RESP, data = dat, link="probit", Hess = TRUE, method="ucminf", threshold = "symmetric") m1 summary(m1) logLik(m1) vcov(m1) extractAIC(m1) anova(m1, update(m1, location = SURENESS ~ 1, Hess = FALSE)) anova(m1, update(m1, random = NULL)) ## Use adaptive Gauss-Hermite quadrature rather than the Laplace ## approximation: update(m1, Hess = FALSE, nAGQ = 3) ## Use standard Gauss-Hermite quadrature: update(m1, Hess = FALSE, nAGQ = -7) ################################################################## ## Binomial example with the cbpp data from the lme4-package: if(require(lme4)) { cbpp2 <- rbind(cbpp[,-(2:3)], cbpp[,-(2:3)]) cbpp2 <- within(cbpp2, { incidence <- as.factor(rep(0:1, each=nrow(cbpp))) freq <- with(cbpp, c(incidence, size - incidence)) }) ## Fit with Laplace approximation: fm1 <- clmm2(incidence ~ period, random = herd, weights = freq, data = cbpp2, Hess = 1) summary(fm1) ## Fit with the adaptive Gauss-Hermite quadrature approximation: fm2 <- clmm2(incidence ~ period, random = herd, weights = freq, data = cbpp2, Hess = 1, nAGQ = 7) summary(fm2) } } \keyword{models} ordinal/man/anovaOld.Rd0000644000176200001440000000432513331072710014510 0ustar liggesusers\name{anova.clm2} %%\alias{anova} \alias{anova.clm2} \alias{anova.clmm2} \title{Likelihood ratio test of cumulative link models} \description{ Comparison of cumulative link models in likelihood ratio tests. The models may differ by terms in location, scale and nominal formulae, in link, threshold function and random effect structure. } \usage{ \method{anova}{clm2}(object, ..., test = c("Chisq", "none")) \method{anova}{clmm2}(object, ..., test = c("Chisq", "none")) } \arguments{ \item{object}{a \code{\link{clm2}} object. } \item{\dots}{one or more additional \code{\link{clm2}} objects. } \item{test}{if \code{test = "none"} the p-value for the likelihood ratio test is suppressed. } } \value{ The method returns an object of class \code{Anova} (for printing) and \code{data.frame} with the following elements \item{Model}{character description of the cumulative link models being compared. Location, scale and nominal formulae are separated by "|"s in this order. } \item{Resid.df}{the residual degrees of freedom } \item{-2logLik}{twice the negative log likelihood (proportional to the deviance)} \item{Test}{indication of which models are being compared. } \item{DF}{the difference in the degrees of freedom in the models being compared, i.e. the degrees of freedom for the chi-squared test. } \item{LR stat.}{the likelihood ratio statistic. } \item{Pr(Chi)}{the p-value from the likelihood ratio test. Absent if \code{test = "none"}. } } \author{Rune Haubo B Christensen} \seealso{ \code{\link[ordinal]{clm2}}, \code{\link[=addterm.clm2]{addterm}}, \code{\link[ordinal:addtermOld]{dropterm}} and \code{\link[=anova]{anova.default}} } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) m1 <- clm2(SURENESS ~ PROD, scale = ~PROD, data = soup, link = "logistic") ## anova anova(m1, update(m1, scale = ~.-PROD)) mN1 <- clm2(SURENESS ~ 1, nominal = ~PROD, data = soup, link = "logistic") anova(m1, mN1) anova(m1, update(m1, scale = ~.-PROD), mN1) ## Fit model from polr example: if(require(MASS)) { fm1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) anova(fm1, update(fm1, scale =~ Cont)) } } \keyword{internal} ordinal/DESCRIPTION0000644000176200001440000000304113575527472013427 0ustar liggesusersPackage: ordinal Type: Package Title: Regression Models for Ordinal Data Version: 2019.12-10 Date: 2019-12-10 Authors@R: person(given="Rune Haubo Bojesen", family="Christensen", email="rune.haubo@gmail.com", role=c("aut", "cre")) LazyData: true ByteCompile: yes Depends: R (>= 2.13.0), stats, methods Imports: ucminf, MASS, Matrix, numDeriv Suggests: lme4, nnet, xtable, testthat (>= 0.8), tools Description: Implementation of cumulative link (mixed) models also known as ordered regression models, proportional odds models, proportional hazards models for grouped survival times and ordered logit/probit/... models. Estimation is via maximum likelihood and mixed models are fitted with the Laplace approximation and adaptive Gauss-Hermite quadrature. Multiple random effect terms are allowed and they may be nested, crossed or partially nested/crossed. Restrictions of symmetry and equidistance can be imposed on the thresholds (cut-points/intercepts). Standard model methods are available (summary, anova, drop-methods, step, confint, predict etc.) in addition to profile methods and slice methods for visualizing the likelihood function and checking convergence. License: GPL (>= 2) NeedsCompilation: yes URL: https://github.com/runehaubo/ordinal BugReports: https://github.com/runehaubo/ordinal/issues Packaged: 2019-12-15 20:40:57 UTC; rhbc Author: Rune Haubo Bojesen Christensen [aut, cre] Maintainer: Rune Haubo Bojesen Christensen Repository: CRAN Date/Publication: 2019-12-15 22:10:02 UTC ordinal/build/0000755000176200001440000000000013575515125013012 5ustar liggesusersordinal/build/vignette.rds0000644000176200001440000000041613575515125015352 0ustar liggesusersON0 hؤ~ ?@.@ݦqQDLIJƗZCg?y3e!Z $O0^d T!]Km(uJ^ݔEEPyV5Ońxp5|z|)"))) stopifnot(all.equal(coef(summary(m1)), csm1)) ## link functions: m2 <- update(m1, link = "probit") m3 <- update(m1, link = "cloglog") m4 <- update(m1, link = "loglog") m5 <- update(m1, link = "cauchit", start = coef(m1)) ## m6 <- update(m1, link = "Aranda-Ordaz", lambda = 1) ## m7 <- update(m1, link = "Aranda-Ordaz") ## m8 <- update(m1, link = "log-gamma", lambda = 1) ## m9 <- update(m1, link = "log-gamma") ## nominal effects: mN1 <- clm(sureness ~ 1, nominal = ~ prod, data = dat26, weights = wghts) anova(m1, mN1) ## optimizer / method: update(m1, scale = ~ 1, method = "Newton") update(m1, scale = ~ 1, method = "ucminf") update(m1, scale = ~ 1, method = "nlminb") update(m1, scale = ~ 1, method = "optim") update(m1, scale = ~ 1, method = "model.frame") update(m1, ~.-prod, scale = ~ 1, nominal = ~ prod, method = "model.frame") ## threshold functions mT1 <- update(m1, threshold = "symmetric") mT2 <- update(m1, threshold = "equidistant") anova(m1, mT1, mT2) ## Extend example from polr in package MASS: ## Fit model from polr example: if(require(MASS)) { fm1 <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) fm1 summary(fm1) ## With probit link: summary(update(fm1, link = "probit")) ## Allow scale to depend on Cont-variable summary(fm2 <- update(fm1, scale =~ Cont)) summary(fm3 <- update(fm1, location =~.-Cont, nominal =~ Cont)) summary(fm4 <- update(fm2, location =~.-Cont, nominal =~ Cont)) anova(fm1, fm2, fm3, fm4) ## which seems to improve the fit } ################################# ## Better handling of ill-defined variance-covariance matrix of the ## parameters in summary methods for clm and clmm objects: dat26.2 <- data.frame(sureness = as.factor(1:12), prod = rep(c("One", "Two", "Three"),each=4)) fm1 <- clm(sureness ~ prod, ~prod, data = dat26.2) fm1 summary(fm1) summary(fm1, corr = 1) ## fm1$Hessian ## sl1 <- slice(fm1, 13) ## fitted(fm1) ## convergence(fm1) ## eigen(fm1$Hessian)$values ## sqrt(diag(solve(fm1$Hessian))) ## sqrt(diag(ginv(fm1$Hessian))) ################################# ## Missing values: ## Bug-report from Jonathan Williams ## , 18 March 2010 12:42 data(soup, package = "ordinal") soup$SURENESS[10] <- NA c1a <- clm(ordered(SURENESS)~PROD, data=soup); summary(c1a) c2a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup) summary(c2a) c3a <- clm(ordered(SURENESS)~1, scale = ~PROD, data=soup) summary(c3a) data(soup, package = "ordinal") soup$PROD[1] <- NA c1a <- clm(ordered(SURENESS)~PROD, data=soup) summary(c1a) c2a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup) summary(c2a) c3a <- clm(ordered(SURENESS)~1, scale = ~PROD, data=soup) summary(c3a) soup$SURENESS[10] <- NA c1a <- clm(ordered(SURENESS)~PROD, data=soup) summary(c1a) c2a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup) summary(c2a) c3a <- clm(ordered(SURENESS)~1, scale = ~PROD, data=soup) summary(c3a) ## na.actions: c4a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup, na.action=na.omit) summary(c4a) tC1 <- try(clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup, na.action=na.fail), silent = TRUE) stopifnot(inherits(tC1, "try-error")) c4a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup, na.action=na.exclude) summary(c4a) tC2 <- try(clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup, na.action=na.pass), silent = TRUE) stopifnot(inherits(tC2, "try-error")) ## Subset: data(soup, package="ordinal") c4a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup, subset = 1:100) c4a <- clm(ordered(SURENESS)~1, scale = ~PROD, data=soup, subset = 1:100) c4a <- clm(ordered(SURENESS)~PROD, data=soup, subset = 1:100) c4a <- clm(ordered(SURENESS)~1, data=soup, subset = 1:100) ## Offset: data(soup, package = "ordinal") set.seed(290980) offs <- runif(nrow(soup)) c4a <- clm(ordered(SURENESS)~PROD + offset(offs), scale = ~PROD, data=soup, subset = 1:100) summary(c4a) c4a <- clm(ordered(SURENESS)~PROD + offset(offs), scale = ~PROD + offset(offs), data=soup, subset = 1:100) summary(c4a) off2 <- offs c4a <- clm(ordered(SURENESS)~PROD + offset(offs), scale = ~PROD + offset(off2), data=soup, subset = 1:100) summary(c4a) c4a <- clm(ordered(SURENESS)~PROD, scale = ~PROD + offset(offs), data=soup, subset = 1:100) summary(c4a) ## data as matrix: dat26M <- as.matrix(dat26) m1 <- clm(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logit") summary(m1) ## data in enclosing environment: attach(soup) m1 <- clm(SURENESS ~ PROD, scale = ~PROD) summary(m1) detach(soup) ################################################################## ### Parameter estimates were not correct with large scale effects due ### to end cut-points being \pm 100. This is not enough for ### location-scale model, but seems to be for location only models. ### Bug report from Ioannis Kosmidis : ### A 2x3 contigency table that will give a large estimated value of ### zeta x <- rep(0:1, each = 3) response <- factor(rep(c(1, 2, 3), times = 2)) freq <- c(1, 11, 1, 13, 1, 14) totals <- rep(tapply(freq, x, sum), each = 3) Dat <- data.frame(response, x, freq) ### Fitting a cumulative link model with dispersion effects modClm <- clm(response ~ x, scale = ~ x, weights = freq, data = Dat, control = clm.control(grtol = 1e-10, convTol = 1e-10)) summary(modClm) ### The maximized log-likelihood for this saturated model should be sum(freq*log(freq/totals)) # > sum(freq*log(freq/totals)) # [1] -29.97808 ### but apparently clm fails to maximixe the log-likelihood modClm$logLik # > modClm$logLik # [1] -30.44452 stopifnot(isTRUE(all.equal(sum(freq*log(freq/totals)), modClm$logLik))) ### The estimates reported by clm are coef(modClm) coef.res <- structure(c(-2.48490664104217, 2.48490665578163, 2.48490659188594, 3.54758796387530), .Names = c("1|2", "2|3", "x", "x")) stopifnot(isTRUE(all.equal(coef.res, coef(modClm)))) # > modClm$coefficients # 1|2 2|3 x x # -2.297718 2.297038 1.239023 2.834285 ### while they should be (from my own software) # 1|2 2|3 x disp.x #-2.484907 2.484907 2.484907 3.547588 convergence(modClm) ################################################################## ordinal/tests/confint.R0000755000176200001440000000344011617035245014637 0ustar liggesusers################################# ## test profile and confint methods: library(ordinal) data(wine) fm1 <- clm(rating ~ contact + temp, data = wine) summary(fm1) ## profile.clm and confint.clm: pr1 <- profile(fm1) confint(pr1) pr1 <- profile(fm1, which.beta = 1:2) confint(pr1) pr1 <- profile(fm1, which.beta = 2:1) confint(pr1) pr1 <- profile(fm1, which.beta = 1) confint(pr1) pr1 <- profile(fm1, which.beta = 2) confint(pr1) pr1 <- try(profile(fm1, which.beta = 0), silent = TRUE) ## error pr1 <- try(profile(fm1, which.beta = "no.par"), silent = TRUE) ## error pr1 <- try(profile(fm1, which.beta = -1), silent = TRUE) ## error pr1 <- profile(fm1, which.beta = "tempwarm") confint(pr1) pr1 <- profile(fm1, alpha = 0.1) confint(pr1) ## should give NA in this case? pr1 <- profile(fm1, max.steps = 9) pr1 <- profile(fm1, step.warn = 7) pr1 <- profile(fm1, nsteps = 6) pr1 <- profile(fm1, trace = 1) pr1 <- profile(fm1, control = list(gradTol = .1)) confint(pr1) ## not at all unreliable... ## single regression coef setting: fm2 <- clm(rating ~ contact, data = wine) summary(fm2) pr2 <- profile(fm2) confint(pr2) ## confint.clm: confint(fm1) confint(fm1, 2) confint(fm1, 1) confint(fm1, "tempwarm") confint(fm1, type = "profile") confint(fm1, type = "Wald") confint(fm1, 2, type = "Wald") confint(fm1, level = 0.5) confint(fm1, level = 1 - 1e-6) confint(fm1, level = 1 - 1e-10) ## extreme, but it works confint(fm1, trace = 1) ## plot.profile: pr1 <- profile(fm1, which.beta=1:2, alpha = 1e-3) par(mfrow = c(1,2)) plot(pr1) plot(pr1, 1) plot(pr1, "contactyes") plot(pr1, level = .97) plot(pr1, Log = TRUE) plot(pr1, relative = FALSE) plot(pr1, root = TRUE) plot(pr1, approx = TRUE) plot(pr1, n=10) plot(pr1, ylim = c(0,2)) plot(pr1, las = 1) plot(pr2) ordinal/tests/test-all.R0000644000176200001440000000011112447563614014721 0ustar liggesusers if(require(testthat) && require(ordinal)) { test_check("ordinal") } ordinal/tests/nominal.test.R0000644000176200001440000000413412175707057015617 0ustar liggesuserslibrary(ordinal) if(require(MASS)) { fm1 <- clm(Sat ~ Infl + Type + Cont, data=housing, weights=Freq) scale_test(fm1) nominal_test(fm1) fm2 <- update(fm1, scale=~Cont) scale_test(fm2) nominal_test(fm2) fm3 <- update(fm1, nominal=~ Cont) fm3$Theta anova(fm2, fm3) fm3$alpha.mat summary(fm3) } ################################# ### Testing nominal_test and scale_test: fm1 <- clm(rating ~ temp * contact, data=wine) ## names(fm1) fm2 <- clm(rating ~ temp * contact, data=wine, nominal=~contact) (an <- anova(fm1, fm2)) (nm <- nominal_test(fm1)) stopifnot(isTRUE(all.equal(an[2, 6], nm["contact", 5]))) fm2 <- clm(rating ~ temp * contact, data=wine, scale=~contact) (an <- anova(fm1, fm2)) (sc <- scale_test(fm1)) stopifnot(isTRUE(all.equal(an[2, 6], sc["contact", "Pr(>Chi)"]))) fm1 <- clm(rating ~ temp + contact, nominal=~temp + contact, data=wine) fm1 try(nominal_test(fm1), silent=TRUE)[1] ## gives error OK scale_test(fm1) fm1 <- clm(rating ~ temp + contact, scale=~temp + contact, data=wine) fm1 try(scale_test(fm1), silent=TRUE)[1] ## gives error OK nominal_test(fm1) ## Using weights: set.seed(123454321) wt <- runif(nrow(wine)) fm1 <- clm(rating ~ temp * contact, data=wine, weigths=wt) nominal_test(fm1) scale_test(fm1) ## No nominal test for judge since that model is not identifiable: fm1 <- clm(rating ~ judge + temp + contact, data=wine) nominal_test(fm1) scale_test(fm1) fm1 <- clm(rating ~ judge + temp, nominal=~contact, data=wine) nominal_test(fm1) summary(fm1) ## A continuous variable: set.seed(123454321) x <- rnorm(nrow(wine), sd=1) fm <- clm(rating ~ temp, nominal=~contact * x, data=wine) nominal_test(fm) scale_test(fm) fm <- clm(rating ~ temp + x, nominal=~contact, data=wine) nominal_test(fm) scale_test(fm) ## poly: fm <- clm(rating ~ temp + poly(x, 2), nominal=~contact, data=wine) nominal_test(fm) scale_test(fm) ## another combination: fm1 <- clm(SURENESS ~ PRODID + DAY + SOUPTYPE + SOUPFREQ, scale=~PROD, nominal=~ DAY*GENDER, data=soup) fm1 nominal_test(fm1) scale_test(fm1) ################################# ordinal/tests/test.clm.profile.R0000644000176200001440000000264512431442003016357 0ustar liggesuserslibrary(ordinal) ## Testing that the profile remains the same - that the model object ## is not 'distorted' by update(object/fitted, doFit=FALSE) set.seed(1234) wts <- runif(nrow(wine), 0, 2) fm3 <- clm(rating ~ temp + contact, data=wine, weights=wts) pr <- profile(fm3) set.seed(1234) fm3 <- clm(rating ~ temp + contact, data=wine, weights=runif(nrow(wine), 0, 2)) pr3 <- profile(fm3) ## > set.seed(1234) ## > fm3 <- clm(rating ~ temp + contact, data=wine, ## + weights=runif(nrow(wine), 0, 2)) ## > pr3 <- profile(fm3) ## Warning messages: ## 1: In profile.clm.beta(fitted, which.beta, alpha, max.steps, nsteps, : ## profile may be unreliable for tempwarm because only 1 ## steps were taken down ## 2: In profile.clm.beta(fitted, which.beta, alpha, max.steps, nsteps, : ## profile may be unreliable for tempwarm because only 1 ## steps were taken up ## 3: In profile.clm.beta(fitted, which.beta, alpha, max.steps, nsteps, : ## profile may be unreliable for contactyes because only 1 ## steps were taken down ## 4: In profile.clm.beta(fitted, which.beta, alpha, max.steps, nsteps, : ## profile may be unreliable for contactyes because only 1 ## steps were taken up ## stopifnot(isTRUE(all.equal(pr, pr3, check.attributes=FALSE))) stopifnot( isTRUE(all.equal(pr$tempwarm[, "lroot"], pr3$tempwarm[, "lroot"])), isTRUE(all.equal(pr$contactyes[, "lroot"], pr3$contactyes[, "lroot"]))) ordinal/tests/testthat/0000755000176200001440000000000013575515131014712 5ustar liggesusersordinal/tests/testthat/test-clm-predict.R0000644000176200001440000000065612450062502020212 0ustar liggesuserscontext("Test that clm.predict gives warnings if prevars is absent") fm1 <- clm(rating ~ temp + contact, data=wine) newData <- expand.grid(temp=levels(wine$temp), contact=levels(wine$contact)) expect_false(givesWarnings( predict(fm1, newdata=newData) )) attr(fm1$terms, "predvars") <- NULL expect_warning( predict(fm1, newdata=newData) , "terms object does not have a predvars attribute") ordinal/tests/testthat/test-contrasts.R0000644000176200001440000000504113440713655020034 0ustar liggesuserscontext("Contrast specification") test_that("clm gives contrast warnings when it should", { ## No warnings: ## Different combinations of terms i various formulae. Note that the ## contrasts apply to e.g. 'contact' in both 'formula' and 'scale': contr <- c(temp="contr.sum", contact="contr.sum") expect_false(givesWarnings( fm1 <- clm(rating ~ temp + contact, scale=~contact, data=wine) ## OK )) # expect_false(givesWarnings( # fm1 <- clm(rating ~ temp + contact, scale=~contact, data=wine, # contrasts=contr) ## OK # )) # expect_false(givesWarnings( # fm1 <- clm(rating ~ temp, scale=~contact, data=wine, # contrasts=contr) ## OK # )) # expect_false(givesWarnings( # fm1 <- clm(rating ~ temp, nominal=~contact, data=wine, # contrasts=contr) ## OK # )) # expect_false(givesWarnings( # fm1 <- clm(rating~1, scale=~temp, nominal=~contact, data=wine, # contrasts=contr) ## OK # )) ## These should give warnings: ## A warning is given if a variable is not present in any of the ## formulae: expect_warning( fm <- clm(rating ~ temp, contrasts=c(contact="contr.sum"), data=wine) , "variable 'contact' is absent: its contrasts will be ignored") expect_warning( fm <- clm(rating ~ temp, contrasts=contr, data=wine) , "variable 'contact' is absent: its contrasts will be ignored") expect_warning( fm <- clm(rating ~ 1, scale=~contact, contrasts=c(temp="contr.sum"), data=wine) , "variable 'temp' is absent: its contrasts will be ignored") expect_warning( fm <- clm(rating ~ 1, scale=~contact, contrasts=list(temp="contr.sum"), data=wine) , "variable 'temp' is absent: its contrasts will be ignored") }) test_that("checkContrasts gives when it should", { ## No warnings: fm0 <- clm(rating ~ temp + contact, scale=~contact, data=wine) expect_false( givesWarnings(checkContrasts(fm0$S.terms, fm0$S.contrasts)) ) expect_false( givesWarnings(checkContrasts(fm0$terms, fm0$contrasts)) ) expect_false( givesWarnings(checkContrasts(fm0$terms, fm0$S.contrasts)) ) expect_false( givesWarnings(checkContrasts(fm0$terms, fm0$S.contrasts)) ) ## Warning: expect_warning( checkContrasts(fm0$S.terms, fm0$contrasts) , "variable 'temp' is absent: its contrasts will be ignored") }) ordinal/tests/testthat/test-clm.R0000644000176200001440000000537212447563614016602 0ustar liggesusers context("Appropriate error and warning messages from clm()") test_that("formula is specified in clm", { expect_error(clm(nominal=~contact, data=wine), "Model needs a formula") expect_error(clm(scale=~contact, data=wine), "Model needs a formula") expect_error(clm(), "Model needs a formula") }) test_that("response is not in scale or nominal", { ## No response in formula: expect_error( fm <- clm(~ temp + contact, data=wine) , "'formula' needs a response") ## response in scale: expect_error( fm <- clm(rating ~ temp, scale=rating ~ contact, data=wine) , "response not allowed in 'scale'") expect_error( fm <- clm(rating ~ temp, nominal=rating ~ contact, data=wine) , "response not allowed in 'nominal'") wine2 <- wine wine2$rate <- as.numeric(as.character(wine2$rating)) expect_error( fm <- clm(rate ~ temp + contact, data=wine2) , "response in 'formula' needs to be a factor") }) test_that("offset is allowed in formula, but not in scale and nominal", { wine2 <- wine set.seed(1) wine2$off <- runif(nrow(wine)) ## offset in formula is fine: expect_is( clm(rating ~ temp + contact + offset(off), data=wine2) , "clm") expect_is( clm(rating ~ offset(off), nominal=~contact, data=wine2) , "clm") ## no other terms in formula. ## offset in scale is also fine: expect_is( clm(rating ~ temp, scale=~contact + offset(off), data=wine2) , "clm") expect_is( clm(rating ~ contact + temp, scale=~offset(off), data=wine2) , "clm") ## no other terms in scale. ## offset as argument is not allowed: expect_error( clm(rating ~ temp + contact, offset=off, data=wine2) , "offset argument not allowed: specify 'offset' in formula or scale arguments instead") ## offset in nominal is not allowed: expect_error( clm(rating ~ temp, nominal=~contact + offset(off), data=wine2) , "offset not allowed in 'nominal'") expect_error( clm(rating ~ temp, nominal=~1 + offset(off), data=wine2) , "offset not allowed in 'nominal'") }) test_that("Intercept is needed and assumed", { expect_is( fm <- clm(rating ~ 1, data=wine) , "clm") expect_warning( fm <- clm(rating ~ -1 + temp, data=wine) , "an intercept is needed and assumed in 'formula'") expect_warning( fm <- clm(rating ~ 0 + temp, data=wine) , "an intercept is needed and assumed in 'formula'") expect_warning( fm <- clm(rating ~ 0, data=wine) , "an intercept is needed and assumed in 'formula'") ## and similar with scale (+nominal) }) ## test_that("", { ## ## }) ordinal/tests/testthat/test-clm-formula.R0000644000176200001440000001510212447563614020235 0ustar liggesuserscontext("Appropriate evaluation of formulae in clm()") ## These fail and give appropriate error messages: test_that("standard formulae are interpreted correctly/give right error messages", { expect_error( fm1 <- clm(rating ~ contact, scale=temp, data=wine) , "object 'temp' not found") expect_error( fm1 <- clm(rating ~ contact, scale=~Temp, data=wine) , "object 'Temp' not found") expect_error( fm1 <- clm(rating ~ contact, scale="temp", data=wine) , "unable to interpret 'formula', 'scale' or 'nominal'") sca <- "temp" expect_error( fm1 <- clm(rating ~ contact, scale=sca, data=wine) , "unable to interpret 'formula', 'scale' or 'nominal'") ## sca <- as.formula(sca) ## sca <- as.formula(temp) ## sca <- with(wine, as.formula(temp)) ## These all work as intended with no warnings or errors: fm1 <- clm(rating ~ contact, scale="~temp", data=wine) fm2 <- clm(rating ~ contact, scale=~temp, data=wine) sca <- "~temp" fm3 <- clm(rating ~ contact, scale=sca, data=wine) sca <- as.formula("~temp") fm4 <- clm(rating ~ contact, scale=sca, data=wine) fm5 <- clm(rating ~ contact, scale=as.formula(~temp), data=wine) fm6 <- clm(rating ~ contact, scale=as.formula("~temp"), data=wine) ## Test that they are all clm objects: for(txt in paste0("fm", 1:6)) expect_is(eval(parse(text=txt)), "clm") ################################# ## can evaluate if 'formula' is a character: f <- "rating ~ contact + temp" expect_is(clm(f, data=wine), "clm") expect_is(clm(as.formula(f), data=wine), "clm") ################################# }) test_that("variables are found in the right environments", { ## finding variables in the environment of the formula: makeform <- function() { f1 <- as.formula(rating ~ temp + contact) rating <- wine$rating temp <- wine$temp contact <- wine$contact f1 } ## 'makeform' makes are formula object in the environment of the ## function makeform: f1 <- makeform() f1 # print expect_is(f1, "formula") ## If we give the data, we can evaluate the model: expect_is(fm1 <- clm(f1, data=wine), "clm") ## We can also evaluate the model because the data are available in ## the environment associated with the formula: expect_is(fm1 <- clm(f1), "clm") ## For instance, the 'rating' variable is not found in the Global ## environment; we have to evaluate the 'name' of 'rating' in the ## appropriate environment: (try(rating, silent=TRUE)) expect_error( rating , "'rating' not found") expect_is( eval(as.name("rating"), envir=environment(f1)) , "factor") ## If instead we generate the formula in the Global environment where ## the variables are not found, we cannot evaluate the model: f2 <- as.formula(rating ~ temp + contact) expect_error( fm2 <- clm(f2) ) ## Setting the appropriate environment of the formula restores the ## ability to evaluate the model: environment(f2) <- environment(f1) expect_is( fm2 <- clm(f2) , "clm") ################################# ## Use of formula-objects in location, scale and nominal: ## Bug-report from Lluís Marco Almagro ## 5 May 2010 17:58 f <- formula(rating ~ temp) fs <- formula( ~ contact) expect_is( m2 <- clm(f, scale = fs, data = wine) , "clm") }) test_that("data indexing works in formulae", { ################################# ## Other ways to construct formulas: set.seed(12345) y <- factor(sample(1:4,20,replace=TRUE)) x <- rnorm(20) data <- data.frame(y=y,x=x) rm(x, y) expect_is( fit <- clm(data$y ~ data$x) , "clm") expect_is( fit <- clm(data[,1] ~ data[,2]) , "clm") ## This previously failed, but now works: expect_is( fit <- clm(data$y ~ data$x, ~data$x) , "clm") }) test_that("clm may be invoked within functions", { ################################# ## Evaluation within other functions: ## date: January 18th 2012. ## ## The problem was raised by Stefan Herzog (stefan.herzog@unibas.ch) ## January 12th 2012 in trying to make clm work with glmulti. fun.clm <- function(formula, data) ### This only works because clm via eclm.model.frame is careful to ### evaluate the 'formula' in the parent environment such it is not the ### character "formula" that is attempted evaluated. clm(formula, data = data) fun2.clm <- function(formula, data, weights, subset) { ### This should be the safe way to ensure evaluation of clm in the ### right environment. mc <- match.call() mc[[1]] <- as.name("clm") eval.parent(mc) } expect_is( fun.clm(rating ~ temp + contact, data=wine) ## works , "clm") expect_is( fun2.clm(rating ~ temp + contact, data=wine) ## works , "clm") form1 <- "rating ~ temp + contact" expect_is( fun.clm(form1, data=wine) ## works , "clm") expect_is( fun2.clm(form1, data=wine) ## works , "clm") form2 <- formula(rating ~ temp + contact) expect_is( fm1 <- fun.clm(form2, data=wine) ## works , "clm") expect_is( fm2 <- fun2.clm(form2, data=wine) ## works , "clm") ## Notice that clm is not able to get the name of the data (wine) ## correct when using fun.clm: expect_true(deparse(fm1$call$data) == "data") expect_true(deparse(fm2$call$data) == "wine") }) test_that("no line breacking in long formulae", { ################################# ## Evaluation of long formulas: no line breaking in getFullForm: rhs <- paste(names(soup)[c(3, 5:12)], collapse=" + ") Location <- as.formula(paste("SURENESS ~ ", rhs, sep=" ")) Scale <- as.formula("~ PROD") expect_is( fm5 <- clm(Location, scale=Scale, data=soup) , "clm") }) test_that("'.'-notation works in formula", { ################################# ## Check that "."-notation works in formula: ## December 25th 2014, RHBC data(wine) wine2 <- wine[c("rating", "contact", "temp")] ## str(wine2) fm0 <- clm(rating ~ ., data=wine2) fm1 <- clm(rating ~ contact + temp, data=wine2) keep <- c("coefficients", "logLik", "info") fun <- function(x, y) stopifnot(isTRUE(all.equal(x, y))) mapply(fun, fm0[keep], fm1[keep]) fun <- function(x, y) {expect_equal(x, y); invisible()} mapply(fun, fm0[keep], fm1[keep]) ################################# }) ordinal/tests/testthat/test-clm-profile.R0000644000176200001440000000046512450322423020217 0ustar liggesuserscontext("Testing error message from profile.clm") expect_warning( fm2 <- clm(rating ~ contact, scale=~contact, nominal=~contact, data=wine) , "\\(1\\) Hessian is numerically singular") expect_error(profile(fm2) , "Cannot get profile when vcov\\(fitted\\) contains NAs") ordinal/tests/testthat/test-clmm-checkRanef.R0000644000176200001440000000214713277541507021002 0ustar liggesuserscontext("Testing error-warning-message from clmm via checkRanef") ## Make example with more random effects than observations: wine$fake <- factor(c(1:65, 1:65)[1:nrow(wine)]) wine$fakeToo <- factor(1:nrow(wine)) ## Check warning, error and 'message' messages: expect_warning( fmm2 <- clmm(rating ~ temp + contact + (1|judge) + (1|fake), data=wine) , "no. random effects") expect_warning( fmm2 <- clmm(rating ~ temp + contact + (1|judge) + (1|fake), data=wine, checkRanef="warn") , "no. random effects") expect_error( fmm2 <- clmm(rating ~ temp + contact + (1|judge) + (1|fake), data=wine, checkRanef="error") , "no. random effects") expect_message( fmm2 <- clmm(rating ~ temp + contact + (1|judge) + (1|fake), data=wine, checkRanef="message") , "no. random effects") expect_error( fmm2 <- clmm(rating ~ temp + contact + (1|fakeToo), data=wine, checkRanef="error") , "no. random effects") expect_error( fmm2 <- clmm(rating ~ temp + contact + (1|judge) + (1|fakeToo), data=wine, checkRanef="error") , "no. random effects") ordinal/tests/testthat/test-utils.R0000644000176200001440000000105312450062502017137 0ustar liggesusers context("testing namedList") a <- 1 b <- 2 c <- 3 d <- list(e=2, f=factor(letters[rep(1:2, 2)])) g <- matrix(runif(9), 3) h <- NULL test_that("namedList returns a named list", { res <- namedList(a, b, c) expect_equal(names(res), c("a", "b", "c")) expect_equivalent(res, list(a, b, c)) res <- namedList(a, b, c, d, g) expect_equal(names(res), c("a", "b", "c", "d", "g")) expect_equivalent(res, list(a, b, c, d, g)) res <- namedList(a, h) expect_equal(names(res), c("a", "h")) expect_equivalent(res, list(a, h)) }) ordinal/tests/testthat/test-misc.R0000644000176200001440000000024112457750665016755 0ustar liggesuserscontext("Test of general functionality") test_that("citation reports year", { txt <- citation("ordinal") expect_true(as.logical(grep("year", txt))) }) ordinal/tests/clmm.R0000755000176200001440000000163711624463661014142 0ustar liggesuserslibrary(ordinal) data(wine) ################################# ## Estimation with a single simple RE term: ## Laplace: fmm1 <- clmm(rating ~ contact + temp + (1|judge), data=wine) summary(fmm1) ## GHQ: fmm.ghq <- clmm(rating ~ contact + temp + (1|judge), data=wine, nAGQ=-10) summary(fmm.ghq) ## AGQ: fmm.agq <- clmm(rating ~ contact + temp + (1|judge), data=wine, nAGQ=10) summary(fmm.agq) ## tests: ## Notice warning about Laplace with multiple REs when nAGQ != 1: fmm1 <- try(clmm(rating ~ contact + temp + (1|judge) + (1|bottle), data=wine, nAGQ=10)) stopifnot(inherits(fmm1, "try-error")) ################################# ## Estimation with several RE terms: data(soup, package="ordinal") fmm <- clmm(SURENESS ~ PROD + (1|RESP) + (1|PROD:RESP), data=soup, threshold="equidistant") summary(fmm) ################################# ordinal/tests/testAnova.clm2.R0000644000176200001440000000272412176227351016002 0ustar liggesuserslibrary(ordinal) options(contrasts = c("contr.treatment", "contr.poly")) ## More manageable data set: (tab26 <- with(soup, table("Product" = PROD, "Response" = SURENESS))) dimnames(tab26)[[2]] <- c("Sure", "Not Sure", "Guess", "Guess", "Not Sure", "Sure") dat26 <- expand.grid(sureness = as.factor(1:6), prod = c("Ref", "Test")) dat26$wghts <- c(t(tab26)) m1 <- clm(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logit") ## anova m2 <- update(m1, scale = ~1) anova(m1, m2) mN1 <- clm(sureness ~ 1, nominal = ~prod, data = dat26, link = "logit") anova(m1, mN1) anova(m1, m2, mN1) ## dropterm if(require(MASS)) { dropterm(m1, test = "Chi") mB1 <- clm(SURENESS ~ PROD + GENDER + SOUPTYPE, scale = ~ COLD, data = soup, link = "probit") dropterm(mB1, test = "Chi") # or ## addterm addterm(mB1, scope = ~.^2, test = "Chi") ## addterm(mB1, scope = ~ . + AGEGROUP + SOUPFREQ, ## test = "Chi", which = "location") ## addterm(mB1, scope = ~ . + GENDER + SOUPTYPE, ## test = "Chi", which = "scale") ## Fit model from polr example: ## data(housing, package = "MASS") fm1 <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) ## addterm(fm1, ~ Infl + Type + Cont, test= "Chisq", which = "scale") dropterm(fm1, test = "Chisq") fm2 <- update(fm1, scale =~ Cont) fm3 <- update(fm1, formula =~.-Cont, nominal =~ Cont) anova(fm1, fm2, fm3) } ordinal/tests/test.clm.Theta.R0000644000176200001440000001201012447075623015770 0ustar liggesuserslibrary(ordinal) ################################# ## 1 categorical variable in nominal: fm <- clm(rating ~ temp, nominal=~contact, data=wine) fm$Theta fm$alpha.mat ## Threshold effects: fm <- clm(rating ~ temp, nominal=~contact, data=wine, threshold="symmetric") fm$Theta fm$alpha.mat fm <- clm(rating ~ temp, nominal=~contact, data=wine, threshold="equidistant") fm$Theta fm$alpha.mat ## Singular fit is still ok (with a warning, though) fm <- clm(rating ~ contact, nominal=~temp, data=wine) fm$alpha.mat fm$Theta ################################# ## 1 continuous variable: set.seed(123) x <- rnorm(nrow(wine), sd=1) fm <- clm(rating ~ temp, nominal=~ x, data=wine) fm$alpha.mat fm$Theta fm <- clm(rating ~ temp, nominal=~ poly(x, 2), data=wine) fm$alpha.mat fm$Theta ################################# ## 1 categorical + 1 continuous variable: set.seed(123) x <- rnorm(nrow(wine), sd=1) fm <- clm(rating ~ temp, nominal=~contact + x, data=wine) fm$alpha.mat fm$Theta fm <- clm(rating ~ temp, nominal=~contact + x, data=wine, threshold="symmetric") fm$alpha.mat fm$Theta ################################# ### NOTE: To get the by-threshold nominal effects of continuous terms ## use: with(fm, t(apply(alpha.mat, 1, function(th) tJac %*% th))) ################################# ## Interactions: fm <- clm(rating ~ temp, nominal=~contact:x, data=wine) fm$alpha.mat fm$Theta fm <- clm(rating ~ temp, nominal=~contact+x+contact:x, data=wine) fm$alpha.mat fm$Theta fm <- clm(rating ~ temp, nominal=~contact*x, data=wine) fm$alpha.mat fm$Theta ## polynomial terms: fm <- clm(rating ~ temp, nominal=~contact + poly(x, 2), data=wine) fm$alpha.mat fm$Theta ## logical variables: (treated like numeric variables) wine$Con <- as.character(wine$contact) == "yes" fm <- clm(rating ~ temp, nominal=~Con, data=wine) fm$Theta fm$alpha.mat wine$Con.num <- 1 * wine$Con fm <- clm(rating ~ temp, nominal=~Con.num, data=wine) fm$Theta fm$alpha.mat ################################# ## Two continuous variables: set.seed(321) y <- rnorm(nrow(wine), sd=1) fm1 <- clm(rating ~ temp, nominal=~y + x, data=wine) fm1$alpha.mat fm1$Theta ## summary(fm1) ################################# ## 1 categorical + 2 continuous variables: fm1 <- clm(rating ~ temp, nominal=~y + contact + x, data=wine) fm1$alpha.mat fm1$Theta fm1 <- clm(rating ~ temp, nominal=~contact + x + contact:x + y, data=wine) summary(fm1) fm1$Theta fm1$alpha.mat fm1 <- clm(rating ~ temp, nominal=~contact*x + y, data=wine) fm1$Theta fm1$alpha.mat t(fm1$alpha.mat) fm1 ################################# ## ordered factors (behaves like numerical variables): data(soup, package="ordinal") fm2 <- clm(SURENESS ~ 1, nominal=~PRODID + DAY, data=soup) fm2$Theta fm2$alpha.mat prodid <- factor(soup$PRODID, ordered=TRUE) fm2 <- clm(SURENESS ~ 1, nominal=~prodid + DAY, data=soup) fm2$alpha.mat fm2$Theta fm2 <- clm(SURENESS ~ 1, nominal=~prodid, data=soup) fm2$alpha.mat fm2$Theta ################################# ## Aliased Coefficients: ## ## Example where the interaction in the nominal effects is aliased (by ## design). Here the two Theta matrices coincide. The alpha.mat ## matrices are similar except one has an extra row with NAs: soup2 <- soup levels(soup2$DAY) levels(soup2$GENDER) xx <- with(soup2, DAY == "2" & GENDER == "Female") ## Model with additive nominal effects: fm8 <- clm(SURENESS ~ PRODID, nominal= ~ DAY + GENDER, data=soup2, subset=!xx) fm8$alpha.mat fm8$Theta ## Model with non-additive, but aliased nominal effects: fm9 <- clm(SURENESS ~ PRODID, nominal= ~ DAY * GENDER, data=soup2, subset=!xx) fm9$alpha.mat fm9$Theta stopEqual <- function(x, y, ca=FALSE) stopifnot(isTRUE(all.equal(x, y, check.attributes=ca))) stopEqual(fm8$alpha.mat, fm9$alpha.mat[1:3, ]) stopEqual(fm8$Theta, fm9$Theta) stopEqual(logLik(fm8), logLik(fm9)) ################################# ## Weights: set.seed(12345) wts <- runif(nrow(soup)) fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE + DAY, data=soup, weights=wts) fm2$Theta ## Offset (correctly gives and error) fm2 <- try(clm(SURENESS ~ 1, nominal=~SOUPTYPE + DAY + offset(wts), data=soup), silent=TRUE) stopifnot(inherits(fm2, "try-error")) ################################# ### Other (misc) examples: fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE + DAY, data=soup) fm2$Theta fm2 fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE * DAY, data=soup) fm2$Theta fm2 fm2$alpha.mat fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE * DAY, data=soup, threshold="symmetric") fm2$Theta fm2$alpha.mat ################################# ### Check correctness of Theta matrix when intercept is removed in ### nominal formula: ### December 25th 2014, RHBC fm1 <- clm(rating ~ temp, nominal=~contact-1, data=wine) fm2 <- clm(rating ~ temp, nominal=~contact, data=wine) stopifnot(isTRUE(all.equal(fm1$Theta, fm2$Theta))) stopifnot(isTRUE(all.equal(fm1$logLik, fm2$logLik))) wine2 <- wine wine2$contact <- relevel(wine2$contact, "yes") fm3 <- clm(rating ~ temp, nominal=~contact, data=wine2) stopifnot(isTRUE(all.equal(coef(fm1, na.rm=TRUE), coef(fm3)))) ################################# ordinal/tests/test.clm.convergence.R0000644000176200001440000000343312431426564017227 0ustar liggesuserslibrary(ordinal) ## Testing that errors in chol() are caught soon enough: cy <- with(wine, which(temp == "cold" & contact == "yes")) wine2 <- subset(wine, subset=(!1:nrow(wine) %in% cy)) wine2[c(9, 15, 46), "rating"] <- NA fm1 <- clm(rating ~ temp, scale=~contact, nominal=~contact, data=wine2) fm1 <- try(clm(rating ~ temp, scale=~contact, nominal=~contact, data=wine2, control=list(gradTol=1e-12)), silent=TRUE) fm2 <- try(clm(rating ~ temp, scale=~contact, nominal=~contact, data=wine2, control=list(gradTol=1e-15)), silent=TRUE) ## These gave errors in version 2014.11-12. stopifnot(!inherits(fm1, "try-error")) stopifnot(!inherits(fm2, "try-error")) summary(fm1) summary(fm2) ## Error in convergence.clm() due to bad evaluation of model ## environment with update(object, doFit=FALSE): wine3 <- wine set.seed(1234) wts <- runif(nrow(wine3), 0, 2) fm3 <- clm(rating ~ temp + contact, data=wine3, weights=wts) c0 <- convergence(fm3) set.seed(1234) fm3 <- clm(rating ~ temp + contact, data=wine3, weights=runif(nrow(wine3), 0, 2)) c1 <- convergence(fm3) c0$info$logLik.Error c1$info$logLik.Error all.equal(c0$info$logLik.Error, c1$info$logLik.Error) ## In version 2014.11-14: ## > wine3 <- wine ## > set.seed(1234) ## > wts <- runif(nrow(wine3), 0, 2) ## > fm3 <- clm(rating ~ temp + contact, data=wine3, ## + weights=wts) ## > c0 <- convergence(fm3) ## > set.seed(1234) ## > fm3 <- clm(rating ~ temp + contact, data=wine3, ## + weights=runif(nrow(wine3), 0, 2)) ## > c1 <- convergence(fm3) ## > c0$info$logLik.Error ## [1] "<1e-10" ## > c1$info$logLik.Error ## [1] "4.80e+00" ## > all.equal(c0$info$logLik.Error, c1$info$logLik.Error) ## [1] "1 string mismatch" stopifnot(c0$info$logLik.Error == c1$info$logLik.Error) ordinal/tests/test.clm.model.matrix.R0000644000176200001440000001104213332050747017324 0ustar liggesuserslibrary(ordinal) ## source("test.clm.model.matrix.R") ## library(devtools) ## r2path <- "/Users/rhbc/Documents/Rpackages/ordinal/pkg/ordinal" ## clean_dll(pkg = r2path) ## load_all(r2path) ## Check that get_clmDesign works in standard setting: fm1 <- clm(rating ~ temp, scale=~contact, nominal=~contact, data=wine) contr <- c(fm1$contrasts, fm1$S.contrasts, fm1$nom.contrasts) XX <- ordinal:::get_clmDesign(fm1$model, terms(fm1, "all"), contrasts=contr) XX2 <- update(fm1, method="design") (keep <- intersect(names(XX), names(XX2))) (test <- mapply(function(x, y) isTRUE(all.equal(x, y)), XX[keep], XX2[keep])) stopifnot(all(test)) ## Check that get_clmDesign works with singular fit and NAs: cy <- with(wine, which(temp == "cold" & contact == "yes")) wine2 <- subset(wine, subset=(!1:nrow(wine) %in% cy)) wine2[c(9, 15, 46), "rating"] <- NA fm1 <- clm(rating ~ temp, scale=~contact, nominal=~contact, data=wine2) contr <- c(fm1$contrasts, fm1$S.contrasts, fm1$nom.contrasts) XX <- ordinal:::get_clmDesign(fm1$model, terms(fm1, "all"), contrasts=contr) XX2 <- update(fm1, method="design") (keep <- intersect(names(XX), names(XX2))) (test <- mapply(function(x, y) isTRUE(all.equal(x, y)), XX[keep], XX2[keep])) stopifnot(all(test)) ## In this situation update and get_clmRho give the same results: wine2 <- wine fm1 <- clm(rating ~ temp + contact, data=wine2) ## OK rho1 <- ordinal:::get_clmRho.clm(fm1) l1 <- as.list(rho1) l2 <- as.list(update(fm1, doFit=FALSE)) (test <- mapply(function(x, y) isTRUE(all.equal(x, y)), l1, l2[names(l1)])) stopifnot(all(test)) ## If we modify the data (or other subset, weights, formulae, etc.) ## used in the model call, the results from update no longer correspond ## to the elements of the fitted model object. get_clmRho gets it ## right on the other hand: wine2[10:13, "rating"] <- NA l3 <- as.list(ordinal:::get_clmRho.clm(fm1)) l4 <- as.list(update(fm1, doFit=FALSE)) (test <- mapply(function(x, y) isTRUE(all.equal(x, y)), l1, l3)) stopifnot(all(test)) ## same (test <- mapply(function(x, y) isTRUE(all.equal(x, y)), l3, l4[names(l3)])) stopifnot(sum(!test) == 8) ## not all the same anymore! ## In conclusion l1, l2, and l3 are identical. l4 is different. ################################# ## Test that checkContrasts give appropriate warnings: contr <- c(temp="contr.sum", contact="contr.sum") fm1 <- clm(rating ~ temp + contact, scale=~contact, data=wine) ## OK fm1 <- clm(rating ~ temp + contact, scale=~contact, data=wine, contrasts=contr) ## OK fm1 <- clm(rating ~ temp, scale=~contact, data=wine, contrasts=contr) ## OK ## These should give warnings: fm1 <- clm(rating ~ temp, contrasts=c(contact="contr.sum"), data=wine) fm1 <- clm(rating ~ temp, contrasts=contr, data=wine) fm1 <- clm(rating ~ 1, scale=~contact, contrasts=c(temp="contr.sum"), data=wine) fm1 <- clm(rating ~ 1, scale=~contact, contrasts=list(temp="contr.sum"), data=wine) fm0 <- clm(rating ~ temp + contact, scale=~contact, data=wine) ordinal:::checkContrasts(fm0$S.terms, fm0$contrasts) ordinal:::checkContrasts(fm0$S.terms, fm0$S.contrasts) ordinal:::checkContrasts(fm0$terms, fm0$contrasts) ordinal:::checkContrasts(fm0$terms, fm0$S.contrasts) ################################# ## Check that clm and model.matrix respects contrast settings: options("contrasts" = c("contr.treatment", "contr.poly")) fm0 <- clm(rating ~ temp + contact, data=wine) options("contrasts" = c("contr.sum", "contr.poly")) fm1 <- clm(rating ~ temp + contact, data=wine) stopifnot(all(model.matrix(fm0)$X[, 2] %in% c(0, 1))) stopifnot(all(model.matrix(fm1)$X[, 2] %in% c(1, -1))) ################################# ## Check that model.matrix results do not depend on global contrast ## setting: options("contrasts" = c("contr.sum", "contr.poly")) fm0 <- clm(rating ~ temp + contact, scale=~contact, data=wine) MM <- model.matrix(fm0) options("contrasts" = c("contr.treatment", "contr.poly")) MM2 <- model.matrix(fm0) for(x in MM) print(head(x)) for(x in MM2) print(head(x)) stopifnot(all(mapply(all.equal, MM, MM2))) ################################# ## This gave a warning before getContrasts was implemented: fm0 <- clm(rating ~ temp + contact, scale=~contact, data=wine) MM <- model.matrix(fm0) ## > fm0 <- clm(rating ~ temp + contact, scale=~contact, data=wine) ## > MM <- model.matrix(fm0) ## Warning message: ## In model.matrix.default(res$S.terms, data = fullmf, contrasts.arg = getContrasts(res$S.terms, : ## variable 'temp' is absent, its contrast will be ignored for(x in MM) print(head(x)) ordinal/tests/clmm.formula.R0000755000176200001440000001231111752454332015572 0ustar liggesuserslibrary(ordinal) data(wine) ################################# ## Appropriate evaluation of formulas: ## These all work as intended with no warnings or errors: fm1 <- clmm(rating ~ contact + (1|judge), data=wine) fm1 fm1 <- clmm("rating ~ contact + (1|judge)", data=wine) fm1 fm1 <- clmm(as.formula("rating ~ contact + (1|judge)"), data=wine) fm1 fm1 <- clmm(as.formula(rating ~ contact + (1|judge)), data=wine) fm1 ################################# ### finding variables in the environment of the formula: makeform <- function() { f1 <- as.formula(rating ~ temp + contact + (1|judge)) rating <- wine$rating temp <- wine$temp contact <- wine$contact judge <- wine$judge f1 } ## 'makeform' makes are formula object in the environment of the ## function makeform: f1 <- makeform() f1 # print class(f1) ## If we give the data, we can evaluate the model: fm1 <- clmm(f1, data=wine) ## We can also evaluate the model because the data are available in ## the environment associated with the formula: fm1 <- clmm(f1) ## For instance, the 'rating' variable is not found in the Global ## environment; we have to evaluate the 'name' of 'rating' in the ## appropriate environment: (try(rating, silent=TRUE)) eval(as.name("rating"), envir=environment(f1)) ## If instead we generate the formula in the Global environment where ## the variables are not found, we cannot evaluate the model: f2 <- as.formula(rating ~ temp + contact + (1|judge)) (try(fm2 <- clmm(f2), silent=TRUE)) environment(f2) <- environment(f1) fm2 <- clmm(f2) ################################# ## Use of formula-objects f <- formula(rating ~ temp + contact + (1|judge)) m2 <- clmm(f, data = wine) summary(m2) ################################# ## Other ways to construct formulas: set.seed(12345) y <- factor(sample(1:4,20,replace=TRUE)) x <- rnorm(20) b <- gl(5, 4, labels=letters[1:5]) data <- data.frame(y=y, x=x, b=b) rm(x, y, b) clmm(y ~ x + (1|b), data=data) fit <- clmm(data$y ~ data$x + (1|data$b)) fit fit <- clmm(data[, 1] ~ data[, 2] + (1|data[, 3])) fit ################################# ## Evaluation within other functions: ## date: January 18th 2012. ## ## The problem was raised by Stefan Herzog (stefan.herzog@unibas.ch) ## January 12th 2012 in trying to make clmm work with glmulti. fun.clmm <- function(formula, data) ### This only works because clmm via eclmm.model.frame is careful to ### evaluate the 'formula' in the parent environment such it is not the ### character "formula" that is attempted evaluated. clmm(formula, data = data) fun2.clmm <- function(formula, data, weights, subset) { ### This should be the safe way to ensure evaluation of clmm in the ### right environment. mc <- match.call() mc[[1]] <- as.name("clmm") eval.parent(mc) } fun.clmm(rating ~ temp + contact + (1|judge), data=wine) ## works fun2.clmm(rating ~ temp + contact + (1|judge), data=wine) ## works form1 <- "rating ~ temp + contact + (1|judge)" fun.clmm(form1, data=wine) ## works fun2.clmm(form1, data=wine) ## works form2 <- formula(rating ~ temp + contact + (1|judge)) fun.clmm(form2, data=wine) ## works fun2.clmm(form2, data=wine) ## works ## Notice that clmm is not able to get the name of the data (wine) ## correct when using fun.clmm. ################################# ## ## Example 2: using clmm function ## # ## ## Now I want to consider judge as a random effect to account for ## ## grouping structure of data ## mod2 <- clmm(rating ~ temp + contact + (1|judge), data=wine) ## ## ##Again, I started by using my own code to run all potential models: ## ## put names of all your variables in this vector: ## vl2 <- c("temp", "contact") ## ## generate list of possible combinations of variables: ## combos2 <- NULL ## for(i in 1:length(vl2)) { ## combos2 <- c(combos2, combn(vl2, i, simplify = F)) ## } ## ## create formulae and run models one by one, saving them as model1, ## ## model2 etc... ## for (i in 1:length(combos2)) { ## vs2 <- paste(combos2[[i]], collapse=" + ") ## f2 <- formula(paste("rating ~ ", vs2, "+(1|judge)", sep="")) ## print(f2) ## assign(paste("model", i, sep=""), clmm(f2, data=wine)) ## } ## summary(model1) # etc ## summary(model2) # etc ## summary(model3) # etc ## ## models <- vector("list", length(combos2)) ## for(i in 1:length(combos2)) { ## vs2 <- paste(combos2[[i]], collapse=" + ") ## f2 <- formula(paste("rating ~ ", vs2, "+(1|judge)", sep="")) ## print(f2) ## models[[i]] <- clmm(f2, data=wine) ## ## assign(paste("model", i, sep=""), clmm(f2, data=wine)) ## } ## ## ## Coefficients, AIC and BIC: ## lapply(models, function(m) coef(summary(m))) ## lapply(models, AIC) ## lapply(models, BIC) ## ## ## library(MuMIn) ## ## dd2 <- dredge(mod2) ## does not work ## ## ?dredge ## ## traceback() ## ## mod2$formula ## ## terms(as.formula(formula(mod2))) ## ## ## ## library(lme4) ## ## fmm1 <- lmer(response ~ temp + contact + (1|judge), data=wine) ## ## fmm1 ## ## terms(as.formula(lme4:::formula(fmm1))) ## ## terms(as.formula(formula(fmm1))) ordinal/tests/anova.R0000755000176200001440000000111213277532341014300 0ustar liggesuserslibrary(ordinal) data(wine) fm1 <- clm(rating ~ temp, data=wine) fmm1 <- clmm(rating ~ temp + (1|judge), data=wine) ## These now give identical printed results: ## Previously the printed model names were messed up when anova.clmm ## were called. anova(fm1, fmm1) anova(fmm1, fm1) ## Testing if 'test' and 'type' arguments are ignored properly: fm1 <- clm(rating ~ temp + contact, data=wine) fm2 <- clm(rating ~ temp, data=wine) anova(fm1, fm2, test="Chi") anova(fm1, fm2, type="Chi") anova(fm1, fm2) ## calling anova.clmm anova(fmm1, fm1, test="Chi") anova(fmm1, fm1, type="Chi") ordinal/tests/test.sign.R0000644000176200001440000000610313315421516015106 0ustar liggesusers# test.sign.R # Test the use of sign.location and sign.nominal in clm.control(): library(ordinal) fm1 <- clm(rating ~ temp + contact, data=wine) fm2 <- clm(rating ~ temp + contact, data=wine, sign.location="positive") # dput(names(fm1)) keep <- c("aliased", "alpha", "cond.H", "contrasts", "convergence", "df.residual", "edf", "fitted.values", "formula", "formulas", "gradient", "info", "link", "logLik", "maxGradient", "message", "model", "n", "niter", "nobs", "start", "terms", "Theta", "threshold", "tJac", "xlevels", "y", "y.levels") check <- mapply(function(x, y) isTRUE(all.equal(x, y)), fm1[keep], fm2[keep]) stopifnot(all(check)) stopifnot(isTRUE(all.equal( fm1$beta, - fm2$beta ))) fm1 <- clm(rating ~ temp, nominal=~ contact, data=wine) fm2 <- clm(rating ~ temp, nominal=~ contact, data=wine, sign.nominal="negative") keep <- c("aliased", "beta", "cond.H", "contrasts", "convergence", "df.residual", "edf", "fitted.values", "formula", "formulas", "gradient", "info", "link", "logLik", "maxGradient", "message", "model", "n", "niter", "nobs", "start", "terms", "Theta", "threshold", "tJac", "xlevels", "y", "y.levels") # check <- mapply(function(x, y) isTRUE(all.equal(x, y)), fm1, fm2) check <- mapply(function(x, y) isTRUE(all.equal(x, y)), fm1[keep], fm2[keep]) stopifnot(all(check)) stopifnot(isTRUE(all.equal( fm1$alpha[5:8], -fm2$alpha[5:8] ))) fm1 <- clm(rating ~ temp, nominal=~ contact, data=wine) fm2 <- clm(rating ~ temp, nominal=~ contact, data=wine, sign.nominal="negative", sign.location="positive") keep <- c("aliased", "cond.H", "contrasts", "convergence", "df.residual", "edf", "fitted.values", "formula", "formulas", "gradient", "info", "link", "logLik", "maxGradient", "message", "model", "n", "niter", "nobs", "start", "terms", "Theta", "threshold", "tJac", "xlevels", "y", "y.levels") # check <- mapply(function(x, y) isTRUE(all.equal(x, y)), fm1, fm2) check <- mapply(function(x, y) isTRUE(all.equal(x, y)), fm1[keep], fm2[keep]) stopifnot(all(check)) stopifnot( isTRUE(all.equal(fm1$alpha[5:8], -fm2$alpha[5:8])), isTRUE(all.equal(fm1$beta, -fm2$beta)) ) # Check predict method: newData <- with(wine, expand.grid(temp=levels(temp), contact=levels(contact))) (p1 <- predict(fm1, newdata=newData)) (p2 <- predict(fm2, newdata=newData)) stopifnot(isTRUE(all.equal(p1, p2))) stopifnot(isTRUE( all.equal(predict(fm1, newdata=wine, se=TRUE, interval=TRUE), predict(fm2, newdata=wine, se=TRUE, interval=TRUE)) )) # Check profile and confint methods: confint.default(fm1) confint.default(fm2) stopifnot( isTRUE(all.equal(confint(fm1), -confint(fm2)[, 2:1, drop=FALSE], check.attributes=FALSE)) ) fm1 <- clm(rating ~ temp + contact, data=wine) fm2 <- clm(rating ~ temp + contact, data=wine, sign.location="positive") pr1 <- profile(fm1) pr2 <- profile(fm2) stopifnot( isTRUE(all.equal(confint(fm1), - confint(fm2)[, 2:1], check.attributes=FALSE)) ) ordinal/tests/clm.formula.R0000644000176200001440000001126412447062614015420 0ustar liggesuserslibrary(ordinal) ## library(devtools) ## r2path <- "/Users/rhbc/Documents/Rpackages/ordinal/pkg/ordinal" ## clean_dll(pkg = r2path) ## load_all(r2path) ################################# ## Appropriate evaluation of formulas: ## These fail and give appropriate error messages: ## fm1 <- clm(rating ~ contact, scale=temp, data=wine) ## fm1 <- clm(rating ~ contact, scale=~Temp, data=wine) ## fm1 <- clm(rating ~ contact, scale="temp", data=wine) ## sca <- "temp" ## fm1 <- clm(rating ~ contact, scale=sca, data=wine) ## sca <- as.formula(sca) ## sca <- as.formula(temp) ## sca <- with(wine, as.formula(temp)) ## These all work as intended with no warnings or errors: fm1 <- clm(rating ~ contact, scale="~temp", data=wine) fm1 <- clm(rating ~ contact, scale=~temp, data=wine) sca <- "~temp" fm1 <- clm(rating ~ contact, scale=sca, data=wine) sca <- as.formula("~temp") fm1 <- clm(rating ~ contact, scale=sca, data=wine) fm1 <- clm(rating ~ contact, scale=as.formula(~temp), data=wine) fm1 <- clm(rating ~ contact, scale=as.formula("~temp"), data=wine) ################################# ## can evaluate if 'formula' is a character: f <- "rating ~ contact + temp" clm(f, data=wine) clm(as.formula(f), data=wine) ################################# ### finding variables in the environment of the formula: makeform <- function() { f1 <- as.formula(rating ~ temp + contact) rating <- wine$rating temp <- wine$temp contact <- wine$contact f1 } ## 'makeform' makes are formula object in the environment of the ## function makeform: f1 <- makeform() f1 # print class(f1) ## If we give the data, we can evaluate the model: fm1 <- clm(f1, data=wine) ## We can also evaluate the model because the data are available in ## the environment associated with the formula: fm1 <- clm(f1) ## For instance, the 'rating' variable is not found in the Global ## environment; we have to evaluate the 'name' of 'rating' in the ## appropriate environment: (try(rating, silent=TRUE)) eval(as.name("rating"), envir=environment(f1)) ## If instead we generate the formula in the Global environment where ## the variables are not found, we cannot evaluate the model: f2 <- as.formula(rating ~ temp + contact) (try(fm2 <- clm(f2), silent=TRUE)) environment(f2) <- environment(f1) fm2 <- clm(f2) ################################# ## Use of formula-objects in location, scale and nominal: ## Bug-report from Llus Marco Almagro ## 5 May 2010 17:58 f <- formula(rating ~ temp) fs <- formula( ~ contact) m2 <- clm(f, scale = fs, data = wine) summary(m2) ################################# ## Other ways to construct formulas: set.seed(12345) y <- factor(sample(1:4,20,replace=TRUE)) x <- rnorm(20) data <- data.frame(y=y,x=x) rm(x, y) fit <- clm(data$y ~ data$x) fit fit <- clm(data[,1] ~ data[,2]) fit ## This previously failed, but now works: fit <- clm(data$y ~ data$x, ~data$x) fit ################################# ## Evaluation within other functions: ## date: January 18th 2012. ## ## The problem was raised by Stefan Herzog (stefan.herzog@unibas.ch) ## January 12th 2012 in trying to make clm work with glmulti. fun.clm <- function(formula, data) ### This only works because clm via eclm.model.frame is careful to ### evaluate the 'formula' in the parent environment such it is not the ### character "formula" that is attempted evaluated. clm(formula, data = data) fun2.clm <- function(formula, data, weights, subset) { ### This should be the safe way to ensure evaluation of clm in the ### right environment. mc <- match.call() mc[[1]] <- as.name("clm") eval.parent(mc) } fun.clm(rating ~ temp + contact, data=wine) ## works fun2.clm(rating ~ temp + contact, data=wine) ## works form1 <- "rating ~ temp + contact" fun.clm(form1, data=wine) ## works fun2.clm(form1, data=wine) ## works form2 <- formula(rating ~ temp + contact) fun.clm(form2, data=wine) ## works fun2.clm(form2, data=wine) ## works ## Notice that clm is not able to get the name of the data (wine) ## correct when using fun.clm. ################################# ## Evaluation of long formulas: no line breaking in getFullForm: data(soup, package="ordinal") rhs <- paste(names(soup)[c(3, 5:12)], collapse=" + ") Location <- as.formula(paste("SURENESS ~ ", rhs, sep=" ")) Scale <- as.formula("~ PROD") fm5 <- clm(Location, scale=Scale, data=soup) summary(fm5) ################################# ## Check that "."-notation works in formula: ## December 25th 2014, RHBC data(wine) wine2 <- wine[c("rating", "contact", "temp")] str(wine2) fm0 <- clm(rating ~ ., data=wine2) fm1 <- clm(rating ~ contact + temp, data=wine2) keep <- c("coefficients", "logLik", "info") fun <- function(x, y) stopifnot(isTRUE(all.equal(x, y))) mapply(fun, fm0[keep], fm1[keep]) ################################# ordinal/tests/test.clm.flex.link.R0000644000176200001440000000431613332050747016621 0ustar liggesusers# test.clm.flex.link.R library(ordinal) fm <- clm(rating ~ contact + temp, data=wine, link="log-gamma") fm summary(fm) vcov(fm) logLik(fm) extractAIC(fm) fm2 <- update(fm, link="probit") anova(fm, fm2) head(model.matrix(fm)$X) head(model.frame(fm)) coef(fm) coef(summary(fm)) nobs(fm) terms(fm) # profile(fm) # not implemented confint(fm) predict(fm, se=TRUE, interval = TRUE) predict(fm, type="class") newData <- expand.grid(temp = c("cold", "warm"), contact = c("no", "yes")) ## Predicted probabilities in all five response categories for each of ## the four cases in newData: predict(fm, newdata=newData, type="prob") predict(fm, newdata=newData, type="class") predict(fm, newdata=newData, type="prob", se.fit = TRUE, interval = TRUE) ## Aranda-Ordaz link: fm <- clm(rating ~ contact + temp, data=wine, link="Aranda-Ordaz") fm summary(fm) vcov(fm) logLik(fm) extractAIC(fm) fm2 <- update(fm, link="logit") anova(fm, fm2) head(model.matrix(fm)$X) head(model.frame(fm)) coef(fm) coef(summary(fm)) nobs(fm) terms(fm) # profile(fm) # not implemented confint(fm) predict(fm, se=TRUE, interval = TRUE) predict(fm, type="class") newData <- expand.grid(temp = c("cold", "warm"), contact = c("no", "yes")) ## Predicted probabilities in all five response categories for each of ## the four cases in newData: predict(fm, newdata=newData, type="prob") predict(fm, newdata=newData, type="class") predict(fm, newdata=newData, type="prob", se.fit = TRUE, interval = TRUE) ######################################################################## ### clm.fit ######################################################################## ## Example with log-gamma: fm1 <- clm(rating ~ contact + temp, data=wine, link="log-gamma") summary(fm1) ## get the model frame containing y and X: mf1 <- update(fm1, method="design") names(mf1) res <- clm.fit(mf1$y, mf1$X, link="log-gamma") ## invoking the factor method coef(res) stopifnot(all.equal(coef(res), coef(fm1))) ## Example with Aranda-Ordaz: fm1 <- clm(rating ~ contact + temp, data=wine, link="Aranda-Ordaz") mf1 <- update(fm1, method="design") res <- clm.fit(mf1$y, mf1$X, link="Aranda") ## invoking the factor method stopifnot(all.equal(coef(res), coef(fm1))) ordinal/tests/test0weights.R0000644000176200001440000000431512450062502015620 0ustar liggesuserslibrary(ordinal) options(contrasts = c("contr.treatment", "contr.poly")) ## library(devtools) ## r2path <- "/Users/rhbc/Documents/Rpackages/ordinal/pkg/ordinal" ## clean_dll(pkg = r2path) ## load_all(r2path) ## one zero weight: data(wine, package="ordinal") wts <- rep(1, nrow(wine)) wine$rating wts[1] <- 0 fm1 <- clm(rating ~ contact + temp, data=wine, weights=wts) fm1 fm1$n ## 72 fm1$nobs ## 71 confint(fm1) plot(profile(fm1)) plot(slice(fm1), 5) convergence(fm1) drop1(fm1, test="Chi") add1(fm1, scope=~.^2, test="Chi") ## clm_anova(fm1) pred <- predict(fm1, newdata=wine) ## OK step.fm1 <- step(fm1, trace=0) fitted(fm1) dim(model.matrix(fm1)$X) dim(model.matrix(fm1, "B")$B1) mf <- update(fm1, method="model.frame") str(mf) wts <- mf$wts dim(model.matrix(fm1)$X[wts > 0, , drop=FALSE]) fm1b <- clm(rating ~ temp, scale=~contact, data=wine, weights=wts) summary(fm1b) pr <- profile(fm1b) confint(pr) plot(pr, 1) fm1c <- clm(rating ~ temp, nominal=~contact, data=wine, weights=wts) summary(fm1c) pr <- profile(fm1c) confint(pr) plot(pr, 1) ## nominal.test(fm1) ## scale.test(fm1) ## zero out an entire response category: wts2 <- 1 * with(wine, rating != "2") fm2 <- clm(rating ~ contact + temp, data=wine, weights=wts2) fm2 fm2$n ## 72 fm2$nobs ## 50 ## Dimension of X and B1, B2 differ: dim(model.matrix(fm2)$X) dim(model.matrix(fm2, "B")$B1) ## Cannot directly evaluate predictions on the original data: try(predict(fm2, newdata=wine), silent=TRUE)[1] confint(fm2) profile(fm2) plot(slice(fm2), 5) step.fm2 <- step(fm2, trace=0) fitted(fm2) ## Scale and nominal effects: fm2b <- clm(rating ~ temp, scale=~contact, data=wine, weights=wts2) summary(fm2b) pr <- profile(fm2b) confint(pr) plot(pr, 1) fm2c <- clm(rating ~ temp, nominal=~contact, data=wine, weights=wts2) summary(fm2c) pr <- profile(fm2c) confint(pr) plot(pr, 1) pred <- predict(fm2c, newdata=wine[!names(wine) %in% "rating"]) pred <- predict(fm2b, newdata=wine[!names(wine) %in% "rating"]) ## nominal.test(fm2) ## scale.test(fm2) ## Different data sets (error): try(anova(fm1, fm2), silent=TRUE)[1] ## OK ## Test clm.fit: wts2 <- 1 * with(wine, rating != "2") mf2 <- update(fm2, method="design") fm3 <- with(mf2, clm.fit(y, X, weights=wts)) ################################# ordinal/tests/test.general.R0000644000176200001440000000010512446243451015563 0ustar liggesusers txt <- citation("ordinal") stopifnot(as.logical(grep("year", txt))) ordinal/tests/test.clm.single.anova.R0000644000176200001440000000342213333001127017275 0ustar liggesusers# test.clm.single.anova.R library(ordinal) # WRE says "using if(requireNamespace("pkgname")) is preferred, if possible." # even in tests: assertError <- function(expr, ...) if(requireNamespace("tools")) tools::assertError(expr, ...) else invisible() assertWarning <- function(expr, ...) if(requireNamespace("tools")) tools::assertWarning(expr, ...) else invisible() fm <- clm(rating ~ temp * contact, scale=~contact, data=wine) anova(fm, type="I") anova(fm, type="II") anova(fm, type="III") anova(fm, type=1) anova(fm, type=2) anova(fm, type=3) anova(fm, type="1") anova(fm, type="2") anova(fm, type="3") anova(fm, type="marginal") # Nominal effects: fm <- clm(rating ~ temp, nominal=~contact, data=wine) anova(fm) # Flexible links: fm1 <- clm(rating ~ temp + contact, link="log-gamma", data=wine) anova(fm1, type=1) anova(fm1, type=2) anova(fm1, type=3) # Equivalence of tests irrespective of contrasts: fm1 <- clm(SURENESS ~ PRODID * SOUPFREQ, data=soup) # summary(fm1) (an1 <- anova(fm1, type=3)) fm2 <- clm(SURENESS ~ PRODID * SOUPFREQ, data=soup, contrasts = list(SOUPFREQ = "contr.sum", PRODID = "contr.SAS")) # summary(fm2) anova(fm1, fm2) (an2 <- anova(fm2, type=3)) stopifnot( isTRUE(all.equal(an1, an2, check.attributes=FALSE)) ) # Aliased coefficients: fm1 <- clm(SURENESS ~ PRODID * DAY, data=soup) anova(fm1, type=1) anova(fm1, type=2) anova(fm1, type=3) # Aliased term (due to nominal effects): fm <- clm(rating ~ temp * contact, nominal=~contact, data=wine) anova(fm, type=1) anova(fm, type=2) anova(fm, type=3) # model with all NA in vcov(object): fm <- clm(rating ~ temp * contact, nominal=~contact, scale=~contact, data=wine) assertError(anova(fm, type=1)) # error assertError(anova(fm, type=2)) # error assertError(anova(fm, type=3)) # error all(is.na(vcov(fm))) ordinal/tests/test.clm.predict.R0000644000176200001440000001376612447061401016365 0ustar liggesuserslibrary(ordinal) ## source("test.clm.predict.R") ## library(devtools) ## r2path <- "/Users/rhbc/Documents/Rpackages/ordinal/pkg/ordinal" ## clean_dll(pkg = r2path) ## load_all(r2path) cy <- with(wine, which(temp == "cold" & contact == "yes")) options("contrasts" = c("contr.treatment", "contr.poly")) getOption("contrasts") ## Example model wine1.clm <- clm(rating ~ temp*contact, subset = -cy, data = wine) summary(wine1.clm) names(wine1.clm) wine.clm <- clm(rating~temp*contact, data=wine) summary(wine.clm) names(wine.clm) ## Make sure the same elements are present with a rank deficient model ## fit: stopifnot(all(names(wine1.clm) == names(wine.clm))) ## With treatment contrasts: options("contrasts" = c("contr.treatment", "contr.poly")) wine.clm <- clm(rating~temp*contact, data=wine) coef(summary(wine.clm)) head(model.matrix(wine.clm)$X) wine.clm$contrasts head(pred1 <- predict(wine.clm)$fit) ## With sum contrasts: options("contrasts" = c("contr.sum", "contr.poly")) wine.clm <- clm(rating~temp*contact, data=wine) coef(summary(wine.clm)) head(model.matrix(wine.clm)$X) wine.clm$contrasts head(pred2 <- predict(wine.clm)$fit) ## Mixture of sum and treatment contrasts: options("contrasts" = c("contr.treatment", "contr.poly")) wine.clm <- clm(rating~temp*contact, data=wine, contrasts=list(temp="contr.sum")) coef(summary(wine.clm)) head(model.matrix(wine.clm)$X) wine.clm$contrasts head(pred3 <- predict(wine.clm)$fit) stopifnot(isTRUE(all.equal(pred1, pred2))) stopifnot(isTRUE(all.equal(pred1, pred3))) ################################# ### Now for a rank deficient fit: ################################# cy <- with(wine, which(temp == "cold" & contact == "yes")) options("contrasts" = c("contr.treatment", "contr.poly")) wine1.clm <- clm(rating ~ temp*contact, subset = -cy, data = wine) coef(summary(wine1.clm)) attributes(model.matrix(wine1.clm)$X)$contrasts wine1.clm$contrasts head(pred4 <- predict(wine1.clm)$fit) options("contrasts" = c("contr.sum", "contr.poly")) wine1.clm <- clm(rating ~ temp*contact, subset = -cy, data = wine) attributes(model.matrix(wine1.clm)$X)$contrasts options("contrasts" = c("contr.treatment", "contr.poly")) attributes(model.matrix(wine1.clm)$X)$contrasts ## Notice that the contrasts change in the attributes of the fit!!! coef(summary(wine1.clm)) wine1.clm$contrasts head(pred5 <- predict(wine1.clm)$fit) head(cbind(pred4, pred5)) stopifnot(isTRUE(all.equal(pred4, pred5))) options("contrasts" = c("contr.treatment", "contr.poly")) wine1.clm <- clm(rating ~ temp*contact, subset = -cy, data = wine, contrasts=list(temp="contr.sum")) coef(summary(wine1.clm)) head(model.matrix(wine1.clm)$X) attributes(model.matrix(wine1.clm)$X)$contrasts wine1.clm$contrasts head(pred6 <- predict(wine1.clm)$fit) head(cbind(pred4, pred5, pred6)) stopifnot(isTRUE(all.equal(pred4, pred6))) ################################################################## ## Compare equality of fitted values for models with different contrasts: options("contrasts" = c("contr.treatment", "contr.poly")) fm1 <- clm(rating ~ temp + contact, data=wine) fitted(fm1) options("contrasts" = c("contr.sum", "contr.poly")) fm2 <- clm(rating ~ temp + contact, data=wine) fitted(fm2) options("contrasts" = c("contr.treatment", "contr.poly")) fm3 <- clm(rating ~ temp + contact, data=wine, contrasts=list(contact="contr.sum")) fitted(fm3) stopifnot(isTRUE(all.equal(fitted(fm1), fitted(fm2)))) stopifnot(isTRUE(all.equal(fitted(fm1), fitted(fm3)))) ################################################################## ## Compare equality of fitted values for models with different ## contrasts in face of aliased coefficients: options("contrasts" = c("contr.treatment", "contr.poly")) cy <- with(wine, which(temp == "cold" & contact == "yes")) Wine <- subset(wine, subset=!(temp == "cold" & contact == "yes")) fm1 <- clm(rating ~ temp + contact, data=Wine) options("contrasts" = c("contr.sum", "contr.poly")) fm2 <- clm(rating ~ temp + contact, data=Wine) options("contrasts" = c("contr.treatment", "contr.poly")) fm3 <- clm(rating ~ temp + contact, data=Wine, contrasts=list(contact="contr.sum")) stopifnot(isTRUE(all.equal(fitted(fm1), fitted(fm2)))) stopifnot(isTRUE(all.equal(fitted(fm1), fitted(fm3)))) stopifnot(isTRUE(all.equal(predict(fm1)$fit, predict(fm2)$fit))) stopifnot(isTRUE(all.equal(predict(fm1)$fit, predict(fm3)$fit))) ################################# ## Does this also happen if the wine data has changed? options("contrasts" = c("contr.treatment", "contr.poly")) Wine <- subset(wine, subset=!(temp == "cold" & contact == "yes")) fm1 <- clm(rating ~ temp + contact, data=Wine) fit1 <- fitted(fm1) pred1 <- predict(fm1)$fit Wine <- wine pred2 <- predict(fm1)$fit stopifnot(isTRUE(all.equal(fit1, pred1))) stopifnot(isTRUE(all.equal(fit1, pred2))) ## What if weights, say, is an expression? ## Notice that updating the model object changes it: set.seed(123) fm1 <- clm(rating ~ temp + contact, data=wine, weights=runif(nrow(wine), .5, 1.5)) fm2 <- update(fm1) stopifnot(isTRUE(all.equal(fitted(fm1), predict(fm1)$fit))) stopifnot(!isTRUE(all.equal(fitted(fm1), fitted(fm2)))) ################################# ## Test equality of fits and predictions of models with: ## 'x + I(x^2)' and 'poly(x, 2)': ## December 25th 2014, RHBC. data(wine) set.seed(1) x <- rnorm(nrow(wine), sd=2) + as.numeric(wine$rating) range(x) ## Comparison of 'x + I(x^2)' and 'poly(x, 2)': fm3 <- clm(rating ~ temp + x + I(x^2), data=wine) fm4 <- clm(rating ~ temp + poly(x, 2), data=wine) ## Same model fits, but different parameterizations: stopifnot( !isTRUE(all.equal(coef(fm3), coef(fm4), check.names=FALSE)) ) stopifnot(isTRUE(all.equal(logLik(fm3), logLik(fm4)))) newData <- expand.grid(temp = levels(wine$temp), x=seq(-1, 7, 3)) predict(fm3, newdata=newData)$fit predict(fm4, newdata=newData)$fit stopifnot(isTRUE(all.equal(fitted(fm3), fitted(fm4)))) stopifnot(isTRUE( all.equal(predict(fm3, newdata=newData)$fit, predict(fm4, newdata=newData)$fit))) ################################# ordinal/tests/clm.fit.R0000644000176200001440000000223613331062435014526 0ustar liggesuserslibrary(ordinal) data(wine) ## clm.fit with nominal and scale effects: ## get simple model: fm1 <- clm(rating ~ temp, scale=~temp, nominal=~ contact, data=wine, method="design") str(fm1, give.attr=FALSE) fm1$control$method <- "Newton" res <- clm.fit(fm1) names(res) res$Theta ## construct some weights and offsets: set.seed(1) off1 <- runif(length(fm1$y)) set.seed(1) off2 <- rnorm(length(fm1$y)) set.seed(1) wet <- runif(length(fm1$y)) ## Fit various models: fit <- clm.fit(fm1$y, fm1$X, fm1$S, fm1$NOM, weights=wet) Coef <- c(-0.905224120279548, 1.31043498891987, 3.34235590523008, 4.52389661722693, -3.03954652971192, -1.56922389038976, -1.75662549320839, -1.16845464236365, 2.52988580848393, -0.0261457032829033) stopifnot(all.equal(coef(fit), Coef, check.attributes=FALSE, tol=1e-6)) str(fit) fit <- clm.fit(fm1$y, fm1$X, fm1$S, fm1$NOM, offset=off1) str(fit) fit <- clm.fit(fm1$y, fm1$X, fm1$S, fm1$NOM, offset=off1, S.offset=off2) str(fit) fit <- clm.fit(fm1$y, fm1$X, fm1$S) str(fit) fit <- clm.fit(fm1$y, fm1$X) str(fit) fit <- clm.fit(fm1$y) coef(fit) str(fit) ## Remember: compare with corresponding .Rout file ordinal/tests/test.makeThresholds.R0000644000176200001440000000115113333002400017104 0ustar liggesusers# test.makeThresholds.R library(ordinal) # Prvious bug which is now fixed: res <- ordinal:::makeThresholds(letters[1:3], "symmetric") stopifnot(length(res$alpha.names) == res$nalpha) # length(res$alpha.names) used to be 4 # Real data example: wine <- within(wine, { rating_comb3b <- rating levels(rating_comb3b) <- c("1-2", "1-2", "3", "4-5", "4-5") }) wine$rating_comb3b[1] <- "4-5" # Need to remove the zero here to avoid inf MLE ftable(rating_comb3b ~ temp + contact, data=wine) fm.comb3_c <- clm(rating_comb3b ~ contact, #scale=~contact, threshold = "symmetric", data=wine) # no error ordinal/tests/clmm.control.R0000755000176200001440000000227111761151324015604 0ustar liggesuserslibrary(ordinal) data(wine) ### 3 options for specifying control arguments: ## 1) control is a simple list, e.g. list(trace=-1) ## 2) control is a call to clmm.control ## 3) control is an empty list; list() ## all in combination with extra control arguments. ordinal:::getCtrlArgs(clmm.control(), list(maxIter=200)) ordinal:::getCtrlArgs(list(), list(maxIter=200)) ordinal:::getCtrlArgs(list(), list(trace=-1)) ordinal:::getCtrlArgs(list(), list(trace=1)) ordinal:::getCtrlArgs(list(), list()) ordinal:::getCtrlArgs(list(maxIter=2), list()) ordinal:::getCtrlArgs(clmm.control(), list()) ordinal:::getCtrlArgs(clmm.control(maxIter=100), list(maxIter=200)) ordinal:::getCtrlArgs(clmm.control(maxIter=100), list(maxIter=200)) ordinal:::getCtrlArgs(clmm.control(), list(trace=1)) ordinal:::getCtrlArgs(clmm.control(), list(trace=-1)) ordinal:::getCtrlArgs(clmm.control(trace=1), list()) ordinal:::getCtrlArgs(clmm.control(trace=-1), list()) ordinal:::getCtrlArgs(clmm.control(trace=0), list()) ## Don't specify trace twice - surprising behavior might occur: ordinal:::getCtrlArgs(clmm.control(trace=1), list(trace=-1)) ordinal:::getCtrlArgs(clmm.control(trace=-1), list(trace=1)) ordinal/src/0000755000176200001440000000000013575515131012477 5ustar liggesusersordinal/src/links.c0000755000176200001440000001624513332053617013773 0ustar liggesusers///////////////////////////////////////////////////////////////////////////// // Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen // // This file is part of the ordinal package for R (*ordinal*) // // *ordinal* is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // *ordinal* is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // A copy of the GNU General Public License is available at // and/or // . ///////////////////////////////////////////////////////////////////////////// #include "links.h" /* This file implements scalar distribution, density and gradient function */ /*-------------------------------------------------------*/ /* Scalar cumulative distribution functions (CDFs) */ /*-------------------------------------------------------*/ double d_pgumbel(double q, double loc, double scale, int lower_tail) // Consider implementing 'int give_log' to follow the convention from // pnorm etc. { if(ISNAN(q)) // true for NA and NaN return NA_REAL; if(q == R_PosInf) q = 1.; else if(q == R_NegInf) q = 0.; else { q = (q - loc) / scale; q = exp( -exp( -q)); } return !lower_tail ? 1 - q : q; } double d_pgumbel2(double q, double loc, double scale, int lower_tail) // this is (partly) redundant since d_pgumbel2(q) = 1 - d_pgumbel(-q) { if(ISNAN(q)) // true for NA and NaN return NA_REAL; if(q == R_PosInf) q = 1; else if(q == R_NegInf) q = 0; else { q = (-q - loc) / scale; q = exp(-exp(-q)); } return !lower_tail ? q : 1 - q; } double d_pAO(double q, double lambda, int lower_tail) { if(ISNAN(q) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(q == R_PosInf) q = 1; else if(q == R_NegInf) q = 0; else { if(lambda < 1.0e-6) error("'lambda' has to be positive. lambda = %e was supplied\n", lambda); q = 1 - R_pow(lambda * exp(q) + 1, -1/lambda); } return !lower_tail ? 1 - q : q; } double d_plgamma(double eta, double lambda, int lower_tail) { double v; if(ISNAN(eta) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(eta == R_PosInf) v = 1; else if(eta == R_NegInf) v = 0; else { v = R_pow_di(lambda, -2) * exp(lambda * eta); if(lambda < 1.0e-6) v = 1 - pgamma(v, R_pow_di(lambda, -2), /*scale = */ 1, 1 /*lower_tail*/, 0 /*give_log*/); if(lambda > -1.0e-6) v = pgamma(v, R_pow_di(lambda, -2), /*scale = */ 1, 1 /*lower_tail*/, 0 /*give_log*/); if(lambda >= -1.0e-6 && lambda <= 1.0e-6) // pnorm(x, mu, sigma, lower_tail, give_log); v = pnorm(eta, 0., 1., 1, 0); } return lower_tail ? v : 1 - v; } /*-------------------------------------------------------*/ /* Scalar probability density functions (PDFs) */ /*-------------------------------------------------------*/ double d_dgumbel(double x, double loc, double scale, int give_log) { if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) // if(x == INFINITE || x == -INFINITE) // seems to work as well. return 0; // this special case needs to be handled separately x = (x - loc) / scale; x = -exp(-x) - x - log(scale); return give_log ? x : exp(x); } double d_dgumbel2(double x, double loc, double scale, int give_log) { if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) return 0; x = (-x - loc) / scale; x = -exp(-x) - x - log(scale); return give_log ? x : exp(x); } double d_dAO(double eta, double lambda, int give_log) { if(ISNAN(eta) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(eta == R_PosInf || eta == R_NegInf) return 0; if(lambda < 1.0e-6) error("'lambda' has to be positive. lambda = %e was supplied\n", lambda); eta -= (1 + 1 / lambda) * log(lambda * exp(eta) + 1); return give_log ? eta : exp(eta); } double d_dlgamma(double x, double lambda, int give_log) { if(ISNAN(x) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) return 0; if(lambda < 1.0e-5 && lambda > -1.0e-5) // lambda close to zero return dnorm(x, 0. , 1., give_log); double q_2 = R_pow_di(lambda, -2); x *= lambda; x = log(fabs(lambda)) + q_2 * log(q_2) - lgammafn(q_2) + q_2 * (x - exp(x)); return !give_log ? exp(x) : x; } /*-------------------------------------------------------*/ /* Scalar gradients of probability density functions */ /*-------------------------------------------------------*/ double d_glogis(double x) { // Gradient of dlogis(x) wrt. x if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) // if(x == INFINITE || x == -INFINITE) // seems to work as well. return 0; // this special case needs to be handled separately /* Store the sign of x, compute the gradient for the absolute value and restore the sign. This is needed to avoid exp(LARGE) to blow up and the function to return NaN. */ int sign = x > 0; //could use fsign() instead... x = exp(-fabs(x)); x = 2 * x * x * R_pow_di(1 + x, -3) - x * R_pow_di(1 + x, -2); return sign ? x : -x; } double d_gnorm(double x) { if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == INFINITY || x == -INFINITY) return 0; else return -x * dnorm(x, 0., 1., 0); } double d_gcauchy(double x) { if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) return 0; return x = -2 * x / M_PI * R_pow_di(1 + x * x, -2); } double d_ggumbel(double x) { if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) return 0; x = exp(-x); if(x == INFINITY) return 0; double eq = exp(-x); return -eq * x + eq * x * x; } double d_ggumbel2(double x) // redundant function... { return -d_ggumbel(-x); } double d_gAO(double eta, double lambda) { if(ISNAN(eta) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(eta == R_PosInf || eta == R_NegInf) return 0; double lex = lambda * exp(eta); if(lex == R_PosInf || lex == 0) return 0.; double y = d_dAO(eta, lambda, 0/*give_log*/); return y == 0. ? 0. : y * (1 - (1 + 1/lambda) * lex / (1 + lex)); } double d_glgamma(double x, double lambda) { if(ISNAN(x) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) return 0.; if(lambda < 1.0e-5 && lambda > -1.0e-5) // lambda close to zero return -x * dnorm(x, 0., 1., 0/*give_log*/); double z = exp(lambda * x); if(z == R_PosInf || z == 0.) return 0.; double y = d_dlgamma(x, lambda, 0/*give_log*/); return y <= 0. ? 0.0 : y * (1 - exp(lambda * x)) / lambda; // Equivalent to: /* if(y <= 0) return 0.0; else return y * (1 - exp(lambda * x)) / lambda; */ } ordinal/src/init.c0000644000176200001440000001130313332053617013601 0ustar liggesusers///////////////////////////////////////////////////////////////////////////// // Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen // // This file is part of the ordinal package for R (*ordinal*) // // *ordinal* is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // *ordinal* is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // A copy of the GNU General Public License is available at // and/or // . ///////////////////////////////////////////////////////////////////////////// #include #include #include // for NULL #include /* .C calls */ extern void dAO_C(void *, void *, void *, void *); extern void dgumbel_C(void *, void *, void *, void *, void *); extern void dgumbel2_C(void *, void *, void *, void *, void *); extern void dlgamma_C(void *, void *, void *, void *); extern void gAO_C(void *, void *, void *); extern void gcauchy_C(void *, void *); extern void getNAGQ(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void getNGHQ_C(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void ggumbel_C(void *, void *); extern void ggumbel2_C(void *, void *); extern void glgamma_C(void *, void *, void *); extern void glogis_C(void *, void *); extern void gnorm_C(void *, void *); extern void grad_C(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void gradC(void *, void *, void *, void *, void *, void *, void *, void *); extern void grFacSum_C(void *, void *, void *, void *, void *); extern void hess(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void hessC(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void nll(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void NRalg(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void NRalgv3(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pAO_C(void *, void *, void *, void *); extern void pgumbel_C(void *, void *, void *, void *, void *); extern void pgumbel2_C(void *, void *, void *, void *, void *); extern void plgamma_C(void *, void *, void *, void *); /* .Call calls */ extern SEXP get_fitted(SEXP, SEXP, SEXP, SEXP); static const R_CMethodDef CEntries[] = { {"dAO_C", (DL_FUNC) &dAO_C, 4}, {"dgumbel_C", (DL_FUNC) &dgumbel_C, 5}, {"dgumbel2_C", (DL_FUNC) &dgumbel2_C, 5}, {"dlgamma_C", (DL_FUNC) &dlgamma_C, 4}, {"gAO_C", (DL_FUNC) &gAO_C, 3}, {"gcauchy_C", (DL_FUNC) &gcauchy_C, 2}, {"getNAGQ", (DL_FUNC) &getNAGQ, 19}, {"getNGHQ_C", (DL_FUNC) &getNGHQ_C, 17}, {"ggumbel_C", (DL_FUNC) &ggumbel_C, 2}, {"ggumbel2_C", (DL_FUNC) &ggumbel2_C, 2}, {"glgamma_C", (DL_FUNC) &glgamma_C, 3}, {"glogis_C", (DL_FUNC) &glogis_C, 2}, {"gnorm_C", (DL_FUNC) &gnorm_C, 2}, {"grad_C", (DL_FUNC) &grad_C, 16}, {"gradC", (DL_FUNC) &gradC, 8}, {"grFacSum_C", (DL_FUNC) &grFacSum_C, 5}, {"hess", (DL_FUNC) &hess, 13}, {"hessC", (DL_FUNC) &hessC, 11}, {"nll", (DL_FUNC) &nll, 17}, {"NRalg", (DL_FUNC) &NRalg, 29}, {"NRalgv3", (DL_FUNC) &NRalgv3, 24}, {"pAO_C", (DL_FUNC) &pAO_C, 4}, {"pgumbel_C", (DL_FUNC) &pgumbel_C, 5}, {"pgumbel2_C", (DL_FUNC) &pgumbel2_C, 5}, {"plgamma_C", (DL_FUNC) &plgamma_C, 4}, {NULL, NULL, 0} }; static const R_CallMethodDef CallEntries[] = { {"get_fitted", (DL_FUNC) &get_fitted, 4}, {NULL, NULL, 0} }; void R_init_ordinal(DllInfo *dll) { R_registerRoutines(dll, CEntries, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } ordinal/src/get_fitted.c0000755000176200001440000001150213332053617014760 0ustar liggesusers///////////////////////////////////////////////////////////////////////////// // Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen // // This file is part of the ordinal package for R (*ordinal*) // // *ordinal* is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // *ordinal* is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // A copy of the GNU General Public License is available at // and/or // . ///////////////////////////////////////////////////////////////////////////// #include #include #include #include "links.h" SEXP get_fitted(SEXP, SEXP, SEXP, SEXP); // ------------------------------------------------------- SEXP get_fitted(SEXP eta1p, SEXP eta2p, SEXP linkp, SEXP lambdap) { /* Compute fitted values (probabilities) from vectors of linear predictors (eta1 and eta2) given the link function (linkp) and an optional lambda parameter. eta1 and eta2 are required to be equal length numeric vectors, linkp a character vector and lambdap a numeric scalar. return: vector of fittec values of same length as eta1 and eta2. */ SEXP ans = PROTECT(duplicate(coerceVector(eta1p, REALSXP))); eta2p = PROTECT(coerceVector(eta2p, REALSXP)); linkp = PROTECT(coerceVector(linkp, STRSXP)); const char *linkc = CHAR(asChar(linkp)); double *eta1 = REAL(ans), *eta2 = REAL(eta2p), lambda = asReal(lambdap); int i, nans = LENGTH(ans); if(LENGTH(eta2p) != nans) { // ".. don't have to UNPROTECT before calling into "error"; it is not a bug to do so, but it is not needed either, error will result in a long jump that will UNPROTECT automatically." Email from Tomas Kalibra 19Apr2018. ; UNPROTECT(3); error("'eta1' and 'eta2' should have the same length"); } if(strcmp(linkc, "probit") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // pnorm(x, mu, sigma, lower_tail, give_log); eta1[i] = pnorm(eta1[i], 0.0, 1.0, 1, 0) - pnorm(eta2[i], 0.0, 1.0, 1, 0); else eta1[i] = pnorm(eta2[i], 0.0, 1.0, 0, 0) - pnorm(eta1[i], 0.0, 1.0, 0, 0); } } else if(strcmp(linkc, "logit") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // plogis(x, mu, sigma, lower_tail, give_log); eta1[i] = plogis(eta1[i], 0.0, 1.0, 1, 0) - plogis(eta2[i], 0.0, 1.0, 1, 0); else eta1[i] = plogis(eta2[i], 0.0, 1.0, 0, 0) - plogis(eta1[i], 0.0, 1.0, 0, 0); } } else if(strcmp(linkc, "loglog") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // d_pgumbel(double q, double loc, double scale, int lower_tail) eta1[i] = d_pgumbel(eta1[i], 0., 1., 1) - d_pgumbel(eta2[i], 0., 1., 1); else eta1[i] = d_pgumbel(eta2[i], 0., 1., 0) - d_pgumbel(eta1[i], 0., 1., 0); } } else if(strcmp(linkc, "cloglog") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // d_pgumbel2(double q, double loc, double scale, int lower_tail) eta1[i] = d_pgumbel2(eta1[i], 0., 1., 1) - d_pgumbel2(eta2[i], 0., 1., 1); else eta1[i] = d_pgumbel2(eta2[i], 0., 1., 0) - d_pgumbel2(eta1[i], 0., 1., 0); } } else if(strcmp(linkc, "cauchit") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // pcauchy(q, loc, scale, lower_tail, give_log) eta1[i] = pcauchy(eta1[i], 0., 1., 1, 0) - pcauchy(eta2[i], 0., 1., 1, 0); else eta1[i] = pcauchy(eta2[i], 0., 1., 0, 0) - pcauchy(eta1[i], 0., 1., 0, 0); } } else if(strcmp(linkc, "Aranda-Ordaz") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // d_pAO(q, lambda, lower_tail) eta1[i] = d_pAO(eta1[i], lambda, 1) - d_pAO(eta2[i], lambda, 1); else eta1[i] = d_pAO(eta2[i], lambda, 0) - d_pAO(eta1[i], lambda, 0); } } else if(strcmp(linkc, "log-gamma") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // d_plgamma(double eta, double lambda, int lower_tail) eta1[i] = d_plgamma(eta1[i], lambda, 1) - d_plgamma(eta2[i], lambda, 1); else eta1[i] = d_plgamma(eta2[i], lambda, 0) - d_plgamma(eta1[i], lambda, 0); } } else { // ".. don't have to UNPROTECT before calling into "error"; it is not a bug to do so, but it is not needed either, error will result in a long jump that will UNPROTECT automatically." Email from Tomas Kalibra 19Apr2018. ; UNPROTECT(3); // unprotecting before exiting with an error error("link not recognized"); } UNPROTECT(3); return ans; } ordinal/src/utilityFuns.c0000755000176200001440000005614013332053617015210 0ustar liggesusers///////////////////////////////////////////////////////////////////////////// // Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen // // This file is part of the ordinal package for R (*ordinal*) // // *ordinal* is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // *ordinal* is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // A copy of the GNU General Public License is available at // and/or // . ///////////////////////////////////////////////////////////////////////////// #include #include #include "links.h" double mu = 0, sigma = 1; int give_log = 0, lower_tail = 1; //--------------------------------- double d_pfun(); double d_pfun2(); // with lower_tail arg double d_dfun(); double d_gfun(); double d_gAO(); //--- negative log-likelihood: double d_nll(); //--- Utilities: double mmax(); double maxAbs(); void Trace(); //--------------------------------- //------------------------------------------------------------------ // CDFs: void pgumbel_C(double *q, int *nq, double *loc, double *scale, int *lower_tail) { // pgumbel() int i; // How can I handle if loc and scale are not of unit length? for(i = 0; i < *nq; i++) q[i] = d_pgumbel(q[i], *loc, *scale, *lower_tail); } void pgumbel2_C(double *q, int *nq, double *loc, double *scale, int *lower_tail) { int i; for(i = 0; i < *nq; i++) q[i] = 1 - d_pgumbel(-q[i], *loc, *scale, *lower_tail); } void pAO_C(double *q, int *nq, double *lambda, int *lower_tail) { int i; for(i = 0; i < *nq; i++) q[i] = d_pAO(q[i], *lambda, *lower_tail); } void plgamma_C(double *q, int *nq, double *lambda, int *lower_tail) { int i; for(i = 0; i < *nq; i++) q[i] = d_plgamma(q[i], *lambda, *lower_tail); } //------------------------------------------------------------------ // PDFs: void dgumbel_C(double *x, int *nx, double *loc, double *scale, int *give_log) { int i; for(i = 0; i < *nx; i++) x[i] = d_dgumbel(x[i], *loc, *scale, *give_log); } void dgumbel2_C(double *x, int *nx, double *loc, double *scale, int *give_log) { int i; for(i = 0; i < *nx; i++) x[i] = d_dgumbel2(x[i], *loc, *scale, *give_log); } void dAO_C(double *x, int *nx, double *lambda, int *give_log) { int i; for(i = 0; i < *nx; i++) x[i] = d_dAO(x[i], *lambda, *give_log); } void dlgamma_C(double *x, int *nx, double *lambda, int *give_log) { int i; for(i = 0; i < *nx; i++) x[i] = d_dlgamma(x[i], *lambda, *give_log); } //------------------------------------------------------------------ // gradients of PDFs: void glogis_C(double *x, int *nx) { int i; for(i = 0; i < *nx; i++) x[i] = d_glogis(x[i]); } void gnorm_C(double *x, int *nx) { // Gradient of dnorm(x) wrt. x int i; for(i = 0; i < *nx; i++) x[i] = d_gnorm(x[i]); } void gcauchy_C(double *x, int *n) { // Gradient of dcauchy(x) wrt. x int i; for(i = 0; i < *n; i++) x[i] = d_gcauchy(x[i]); } void ggumbel_C(double *x, int *nx) { int i; for(i = 0; i < *nx; i++) x[i] = d_ggumbel(x[i]); } void ggumbel2_C(double *x, int *nx) { int i; for(i = 0; i < *nx; i++) x[i] = -d_ggumbel(-x[i]); // or x[i] = d_ggumbel2(x[i]); } void gAO_C(double *x, int *nx, double *lambda) { int i; for(i = 0; i < *nx; i++) x[i] = d_gAO(x[i], *lambda); } void glgamma_C(double *x, int *nx, double *lambda) { int i; for(i = 0; i < *nx; i++) x[i] = d_glgamma(x[i], *lambda); } //------------------------------------------------------------------ // link utility functions: /* Link functions:: 1: logistic 2: probit 3: cloglog 4: loglog 5: cauchit 6: Aranda-Ordaz 7: log-gamma */ double d_pfun(double x, double lambda, int link) { switch(link) { case 1: // logistic return plogis(x, mu, sigma, lower_tail, give_log); case 2: // probit return pnorm(x, mu, sigma, lower_tail, give_log); case 3: // cloglog return d_pgumbel(x, mu, sigma, lower_tail); case 4: // loglog return d_pgumbel2(x, mu, sigma, lower_tail); case 5: // cauchit return pcauchy(x, mu, sigma, lower_tail, give_log); case 6: // Aranda-Ordaz return d_pAO(x, lambda, lower_tail); case 7: // log-gamma return d_plgamma(x, lambda, lower_tail); default : // all other // if(link == 6) // error("the Aranda-Ordaz link is not available"); // if(link == 7) // error("the log-gamma link is not available"); // else error("link not recognized\n"); return NA_REAL; } } double d_pfun2(double x, double lambda, int link, int lower_tail) // 2nd version of d_pfun with a lower_tail arg { switch(link) { case 1: // logistic return plogis(x, mu, sigma, lower_tail, give_log); case 2: // probit return pnorm(x, mu, sigma, lower_tail, give_log); case 3: // cloglog return d_pgumbel(x, mu, sigma, lower_tail); case 4: // loglog return d_pgumbel2(x, mu, sigma, lower_tail); case 5: // cauchit return pcauchy(x, mu, sigma, lower_tail, give_log); case 6: // Aranda-Ordaz return d_pAO(x, lambda, lower_tail); case 7: // log-gamma return d_plgamma(x, lambda, lower_tail); default : // all other // if(link == 6) // error("the Aranda-Ordaz link is not available"); // if(link == 7) // error("the log-gamma link is not available"); // else error("link not recognized\n"); return NA_REAL; } } void pfun(double *x, int *nx, double *lambda, int *link) { int i; for(i = 0; i < *nx; i++) x[i] = d_pfun(x[i], *lambda, *link); } double d_dfun(double x, double lambda, int link) { switch(link) { case 1: // logistic return dlogis(x, mu, sigma, give_log); case 2: // probit return dnorm(x, mu, sigma, give_log); case 3: // cloglog return d_dgumbel(x, mu, sigma, give_log); case 4: // loglog return d_dgumbel2(x, mu, sigma, give_log); case 5: // cauchit return dcauchy(x, mu, sigma, give_log); case 6: return d_dAO(x, lambda, give_log); case 7: return d_dlgamma(x, lambda, give_log); default : // all other error("link not recognized\n"); return NA_REAL; } } void dfun(double *x, int *nx, double *lambda, int *link) { int i; for(i = 0; i < *nx; i++) x[i] = d_dfun(x[i], *lambda, *link); } double d_gfun(double x, double lambda, int link) { switch(link) { case 1: // logistic return d_glogis(x); case 2: // probit return d_gnorm(x); case 3: // cloglog return d_ggumbel(x); case 4: // loglog return d_ggumbel2(x); case 5: // cauchit return d_gcauchy(x); case 6: return d_gAO(x, lambda); case 7: return d_glgamma(x, lambda); default : // all other error("link not recognized\n"); return NA_REAL; } } void gfun(double *x, int *nx, double *lambda, int *link) { int i; for(i = 0; i < *nx; i++) x[i] = d_gfun(x[i], *lambda, *link); } //------------------------------------------------------------------ void getFitted(double *eta1, double *eta2, int *neta) { // adjust for NA and NaN values? int i; for(i = 0; i < *neta; i++) { if(eta2[i] <= 0) // pnorm(x, mu, sigma, lower_tail, give_log); eta1[i] = pnorm(eta1[i], 0.0, 1.0, 1, 0) - pnorm(eta2[i], 0.0, 1.0, 1, 0); else eta1[i] = pnorm(eta2[i], 0.0, 1.0, 0, 0) - pnorm(eta1[i], 0.0, 1.0, 0, 0); } } void getFitted2(double *eta1, double *eta2, int *neta, double *lambda, int *link) // 2nd version now including a link arg { // adjust for NA and NaN values? int i; for(i = 0; i < *neta; i++) { if(eta2[i] <= 0) // d_pfun2(x, lambda, link, lower_tail) eta1[i] = d_pfun2(eta1[i], *lambda, *link, 1) - d_pfun2(eta2[i], *lambda, *link, 1); else eta1[i] = d_pfun2(eta2[i], *lambda, *link, 0) - d_pfun2(eta1[i], *lambda, *link, 0); } } //------------------------------------------------------------------ // Gradients and Hessians for update.b in clmm2(): void grFacSum_C(double *x, int *grFac, int *nx, double *u, int *nu) // compute tapply(x, grFac, sum) + u { int i, j; double z = 0; for(i = 0; i < *nu; i++) { for (j = 0; j < *nx; j++) { if(grFac[j] == i + 1) z = z + x[j]; } u[i] = u[i] + z; z = 0; } } // FIXME: grFacSum_C such that it can be used by gradC and hessC - this // should simplify the code double d_nll(double *u, int nu, int *grFac, double stDev, double *o1, double *o2, int no, double *eta1, double *eta2, double *eta1Fix, double *eta2Fix, double *sigma, double *pr, double *weights, double lambda, int *link) /* Returns: nll Updates: eta1, eta2, pr given the new value of u Leaves unchanged: u, grFac, stDev, o1, o2, eta1Fix, eta2Fix, sigma, weights */ { int i, j; double o, nll = 0.0; for(i = 0; i < no; i++) { o = u[grFac[i] - 1] * stDev; eta1[i] = (eta1Fix[i] + o1[i] - o) / sigma[i]; eta2[i] = (eta2Fix[i] + o2[i] - o) / sigma[i]; /* Accurate evaluation of pr (fitted values) even if eta1 and eta2 are both large: */ if(eta2[i] <= 0) pr[i] = d_pfun2(eta1[i], lambda, *link, 1) - d_pfun2(eta2[i], lambda, *link, 1); else pr[i] = d_pfun2(eta2[i], lambda, *link, 0) - d_pfun2(eta1[i], lambda, *link, 0); if(!R_FINITE(pr[i]) || pr[i] <= 0.) { return INFINITY; } nll -= weights[i] * log(pr[i]); } for(j = 0; j < nu; j++) nll -= dnorm(u[j], 0., 1., 1); return nll; } void nll(double *u, int *nu, int *grFac, double *stDev, double *o1, double *o2, int *no, double *eta1, double *eta2, double *eta1Fix, double *eta2Fix, double *sigma, double *pr, double *weights, double *lambda, int *link, double *nll) { *nll = d_nll(u, *nu, grFac, *stDev, o1, o2, *no, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); } void grad_C(double *stDev, double *p1, double *p2, double *pr, double *weights, double *sigma, double *wtprSig, double *eta1, double *eta2, double *gradValues, double *u, int *grFac, int *nx, int *ngv, double *lambda, int *link) /* Returns: void Updates: gradValues, p1, p2, wtprSig given the new values of eta1, eta2 Leaves unchanged: grFac, stDev, eta1, eta2, pr, sigma, weights, link, nx, ngv Assumes: nx: length of grFac, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2 ngv: length of gradValues */ { int i, j; // double tmp[*nx], z = 0; // update p1, p2, wtprSig: for(i = 0; i < *nx; i++) { p1[i] = d_dfun(eta1[i], *lambda, *link); p2[i] = d_dfun(eta2[i], *lambda, *link); wtprSig[i] = weights[i] / pr[i] / sigma[i]; } // sum for each level of the grouping factor: for(i = 0; i < *ngv; i++) { gradValues[i] = 0; // Could set these to for (j = 0; j < *nx; j++) { if(grFac[j] == i + 1) gradValues[i] += *stDev * wtprSig[j] * (p1[j] - p2[j]); } gradValues[i] += u[i]; } } void gradC(double *stDev, double *p1, double *p2, double *wtprSig, int *grFac, int *nx, double *u, int *nu) { // gradient for update.b int i, j; double z = 0; for(i = 0; i < *nx; i++) { wtprSig[i] = *stDev * wtprSig[i] * (p1[i] - p2[i]); } for(i = 0; i < *nu; i++) { for (j = 0; j < *nx; j++) { if(grFac[j] == i + 1) z += wtprSig[j]; } u[i] += z; z = 0; } } void hess(double *stDev, double *p1, double *p2, double *pr, double *wtprSig, double *eta1, double *eta2, int *link, int *grFac, int *nx, double *hessValues, double *lambda, int *nhv) /* Returns: void Updates: hessValues given the new values of eta1, eta2 Leaves unchanged: grFac, stDev, eta1, eta2, p1, p2, pr, sigma, weights, link, nx, ngv Assumes: nx: length of grFac, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2 nhv: length of hessValues */ { int i, j; // sum for each level of the grouping factor: for(i = 0; i < *nhv; i++) { hessValues[i] = 0; for (j = 0; j < *nx; j++) { if(grFac[j] == i + 1) hessValues[i] += (R_pow_di(p1[j] - p2[j], 2) / pr[j] - (d_gfun(eta1[j], *lambda, *link) - d_gfun(eta2[j], *lambda, *link))) * wtprSig[j]; } hessValues[i] = (hessValues[i] * *stDev * *stDev) + 1; } } void hessC(double *stDev, double *p1, double *p2, double *pr, double *g1, double *g2, double *wtprSig, int *grFac, int *nx, double *z, int *nz) { // hessian for update.b int i, j; double sigma2; sigma2 = R_pow_di(*stDev, 2); for(i = 0; i < *nx; i++) pr[i] = (R_pow_di(p1[i] - p2[i], 2) / pr[i] - (g1[i] - g2[i])) * wtprSig[i]; for(i = 0; i < *nz; i++) { for (j = 0; j < *nx; j++) { if(grFac[j] == i + 1) z[i] = z[i] + pr[j]; } z[i] = z[i] * sigma2 + 1; } } //------------------------------------------------------------------ // Trace function: void Trace(int iter, double stepFactor, double val, double maxGrad, double *par, int npar, int first) { int i; if(first) Rprintf("iter: step factor: Value: max|grad|: Parameters:\n"); Rprintf(" %3d: %1.3e: %.3f: %1.3e: ", iter, stepFactor, val, maxGrad); for(i = 0; i < npar; i++) Rprintf(" %.4f", par[i]); Rprintf("\n"); } //------------------------------------------------------------------ void NRalg(int *trace, int *maxIter, double *gradTol, int *maxLineIter, int *grFac, double *stDev, double *o1, double *o2, double *eta1Fix, double *eta2Fix, double *eta1, double *eta2, double *sigma, int *link, double *weights, double *u, double *pr, double *funValue, double *gradValues, double *hessValues, int *nx, int *nu, double *maxGrad, int *conv, double *p1, double *p2, double *wtprSig, double *lambda, int *Niter) { /* nx: length(pr) r: length(start) = length(u) updates: u, funValue, gradValues, hessValues, maxGrad, correct vector input: eta1, eta2, pr, funValue (grad is called before d_nll), u = 0, grFac, o1, o2, eta1Fix, eta2Fix, sigma, weights arbitrary input: p1, p2, wtprSig, gradValues, hessValues, needed output: u, funValue, gradValues, hessValues, conv, Niter, */ int lineIter, innerIter = 0, i, j; double stepFactor = 1, funValueTry, step[*nu]; *funValue = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); if(!R_FINITE(*funValue)) { *conv = 0; return ; } grad_C(stDev, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2, gradValues, u, grFac, nx, nu, lambda, link); *maxGrad = maxAbs(gradValues, *nu); *conv = -1; // Convergence flag if(*trace) Trace(0, stepFactor, *funValue, *maxGrad, u, *nu, 1); // Newton-Raphson algorithm: for(i = 0; i < *maxIter; i++) { if(*maxGrad < *gradTol) { *conv = 1; return ; } hess(stDev, p1, p2, pr, wtprSig, eta1, eta2, link, grFac, nx, hessValues, lambda, nu); for(j = 0; j < *nu; j++) { step[j] = gradValues[j] / hessValues[j]; u[j] -= stepFactor * step[j]; } funValueTry = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); lineIter = 0; // simple line search, i.e. step halfing: while(funValueTry > *funValue) { stepFactor *= 0.5; for(j = 0; j < *nu; j++) u[j] += stepFactor * step[j]; funValueTry = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); lineIter++; if(*trace) Trace(i+1+innerIter, stepFactor, *funValue, *maxGrad, u, *nu, 0); if(lineIter > *maxLineIter){ *conv = -2; return ; } innerIter++; } *funValue = funValueTry; grad_C(stDev, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2, gradValues, u, grFac, nx, nu, lambda, link); *maxGrad = maxAbs(gradValues, *nu); if(*trace) Trace(i+1+innerIter, stepFactor, *funValue, *maxGrad, u, *nu, 0); stepFactor = fmin2(1., stepFactor * 2.); (*Niter)++; } } void NRalgv3(int *trace, int *maxIter, double *gradTol, int *maxLineIter, int *grFac, double *stDev, double *o1, double *o2, double *eta1Fix, double *eta2Fix, double *sigma, int *link, double *weights, double *u, double *pr, double *funValue, double *gradValues, double *hessValues, int *nx, int *nu, double *maxGrad, int *conv, double *lambda, int *Niter) // Less input and slightly faster than NRalg(). { /* control arguments from clmm - see ?clmm.control: trace, maxIter, gradTol, maxLineIter all of length 1 length = nx: grFac, o1, o2, eta1Fix, eta2Fix, sigma, weights length = 1: stDev, funValue, nx, nu, maxGrad, conv, lambda, Niter length = nu: gradValues, hessValues, u updates: u, funValue, gradValues, hessValues, maxGrad, conv, Niter, pr, correct vector input: eta1, eta2, pr, u = 0, grFac, o1, o2, eta1Fix, eta2Fix, sigma, weights arbitrary input: gradValues, hessValues, needed output: u, funValue, gradValues, hessValues, conv, Niter, */ int lineIter, innerIter = 0, i, j; double stepFactor = 1, funValueTry, step[*nu]; double eta1[*nx], eta2[*nx], p1[*nx], p2[*nx], wtprSig[*nx]; *funValue = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); if(!R_FINITE(*funValue)) { *conv = 0; return ; } grad_C(stDev, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2, gradValues, u, grFac, nx, nu, lambda, link); *maxGrad = maxAbs(gradValues, *nu); *conv = -1; // Convergence flag if(*trace) Trace(0, stepFactor, *funValue, *maxGrad, u, *nu, 1); // Newton-Raphson algorithm: for(i = 0; i < *maxIter; i++) { if(*maxGrad < *gradTol) { *conv = 1; return ; } hess(stDev, p1, p2, pr, wtprSig, eta1, eta2, link, grFac, nx, hessValues, lambda, nu); for(j = 0; j < *nu; j++) { /* Actually there is no need to store 'step' since 'gradValues' could hold the step values (maintained here for code clarity) */ step[j] = gradValues[j] / hessValues[j]; u[j] -= stepFactor * step[j]; } funValueTry = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); lineIter = 0; // simple line search, i.e. step halfing: while(funValueTry > *funValue) { stepFactor *= 0.5; for(j = 0; j < *nu; j++) u[j] += stepFactor * step[j]; funValueTry = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); lineIter++; if(*trace) Trace(i+1+innerIter, stepFactor, *funValue, *maxGrad, u, *nu, 0); if(lineIter > *maxLineIter){ *conv = -2; return ; } innerIter++; } *funValue = funValueTry; grad_C(stDev, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2, gradValues, u, grFac, nx, nu, lambda, link); *maxGrad = maxAbs(gradValues, *nu); if(*trace) Trace(i+1+innerIter, stepFactor, *funValue, *maxGrad, u, *nu, 0); stepFactor = fmin2(1.0, stepFactor * 2.0); (*Niter)++; } (*Niter)--; } //------------------------------------------------------------------ void getNGHQ_C(double *nll, int *grFac, double *stDev, double *eta1Fix, double *eta2Fix, double *o1, double *o2, double *Sigma, double *weights, int *nx, int *nu, double *ghqns, /* double *ghqws,*/ double *lghqws, int *nGHQ, int *link, double *ns, double *lambda) { int i, j, h; double SS = 0, SS1 = 0, SS2 = 0, eta1tmp, eta2tmp, pr_tmp; for(i = 0; i < *nu; i++) { for(h = 0; h < *nGHQ; h++) { for(j = 0; j < *nx; j++) { if(grFac[j] == i + 1) { eta1tmp = (eta1Fix[j] + o1[j] - ns[h]) / Sigma[j]; eta2tmp = (eta2Fix[j] + o2[j] - ns[h]) / Sigma[j]; /* Accurate evaluation of differences of probabilities even if eta1tmp and eta2tmp are large: */ if(eta2tmp <= 0) pr_tmp = d_pfun2(eta1tmp, *lambda, *link, 1) - d_pfun2(eta2tmp, *lambda, *link, 1); else pr_tmp = d_pfun2(eta2tmp, *lambda, *link, 0) - d_pfun2(eta1tmp, *lambda, *link, 0); // sum up contributions: SS1 += weights[j] * log(pr_tmp); } } // SS2 += exp(SS1) * ghqws[h]; // SS2 += exp(SS1 + log(ghqws[h])); SS2 += exp(SS1 + lghqws[h]); SS1 = 0; } SS += log(SS2); SS2 = 0; } *nll = -SS + *nu * log(M_PI * 2) * 0.5; } void getNAGQ(double *nll, int *grFac, double *stDev, double *eta1Fix, double *eta2Fix, double *o1, double *o2, double *Sigma, double *weights, int *nx, int *nu, double *ghqns, double *lghqws, /* double *lghqws, */ double *ghqns2, double *u, double *D, int *nAGQ, int *link, double *lambda) /* nll: negative log-likelihood (return value) length = nx: grFac, o1, o2, eta1Fix, eta2Fix, Sigma, weights length = 1: stDev, nll, nx, nu, nAGQ, lambda, link length = nu: D, u length = nAGQ: ghqns, lghqws (log ghqws) / ghqws */ { int i, j, h; double SS1 = 0, SS2 = 0, eta1tmp, eta2tmp, K, ranNew, pr_tmp; *nll = 0; for(i = 0; i < *nu; i++) { K = sqrt(2. / D[i]); for(h = 0; h < *nAGQ; h++) { for(j = 0; j < *nx; j++) { if(grFac[j] == i + 1) { ranNew = *stDev * (u[i] + K * ghqns[h]); eta1tmp = (eta1Fix[j] + o1[j] - ranNew) / Sigma[j]; eta2tmp = (eta2Fix[j] + o2[j] - ranNew) / Sigma[j]; /* Accurate evaluation of differences of probabilities even if eta1tmp and eta2tmp are large: */ if(eta2tmp <= 0) pr_tmp = d_pfun2(eta1tmp, *lambda, *link, 1) - d_pfun2(eta2tmp, *lambda, *link, 1); else pr_tmp = d_pfun2(eta2tmp, *lambda, *link, 0) - d_pfun2(eta1tmp, *lambda, *link, 0); // sum up contributions: SS1 += weights[j] * log(pr_tmp); } } // SS2 += exp(SS1) * K * ghqws[h] * // dnorm(u[i] + K * ghqns[h], mu, sigma, give_log); // SS2 += exp(SS1 + lghqws[h] + ghqns2[h] - //R_pow_di(ghqns[h], 2) + // 0.5 * R_pow_di(u[i] + K * ghqns[h], 2)) * K; SS2 += exp(SS1 + lghqws[h] + ghqns2[h] - //R_pow_di(ghqns[h], 2) + 0.5 * R_pow_di(u[i] + K * ghqns[h], 2)); SS1 = 0; } // *nll -= log(SS2); *nll -= log(SS2) + log(K); SS2 = 0; } *nll += *nu * log(M_PI * 2) * 0.5; } //------------------------------------------------------------------ double mmax(double *x, int nx) /* Return the maximum of the elements in x nx: length of x ( >= 1) */ { int i; double cmax; // current max cmax = x[0]; if(nx == 1) return cmax; for(i = 1; i < nx; i++) { if(x[i] > cmax) cmax = x[i]; } return cmax; } double maxAbs(double *x, int nx) /* Return max(abs(x)) nx: length of x ( >= 1 ) */ { int i; double cmax; // current max cmax = fabs(x[0]); if(nx == 1) return cmax; for(i = 1; i < nx; i++) { if(fabs(x[i]) > cmax) cmax = fabs(x[i]); } return cmax; } ordinal/src/links.h0000755000176200001440000000407413332053617013775 0ustar liggesusers///////////////////////////////////////////////////////////////////////////// // Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen // // This file is part of the ordinal package for R (*ordinal*) // // *ordinal* is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // *ordinal* is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // A copy of the GNU General Public License is available at // and/or // . ///////////////////////////////////////////////////////////////////////////// #ifndef _ORDINAL_LINKS_H_ #define _ORDINAL_LINKS_H_ /* That ifndef, etc. is an idiom to prevent the body of the header * being read more than once. */ #include #include #ifdef __cplusplus extern "C" { #endif /* That stanza allows the same header file to be used by C and C++ * programs. There is a matching stanza at the end of this header * file. */ /* Additional scalar cumulative probability functions */ double d_pgumbel (double,double,double,int); double d_pgumbel2 (double,double,double,int); double d_pAO (double,double,int); double d_plgamma (double,double,int); /* Additional scalar density functions */ double d_dgumbel (double,double,double,int); double d_dgumbel2 (double,double,double,int); double d_dAO (double,double,int); double d_dlgamma (double,double,int); /* Scalar density gradients */ double d_glogis (double); double d_gnorm (double); double d_gcauchy (double); double d_ggumbel (double); double d_ggumbel2 (double); double d_gAO (double,double); double d_glgamma (double,double); #ifdef __cplusplus } #endif #endif ordinal/vignettes/0000755000176200001440000000000013575515131013720 5ustar liggesusersordinal/vignettes/clm_article.Rnw0000644000176200001440000030034413440713046016666 0ustar liggesusers% \documentclass[article]{article} % \documentclass[article]{jss} \documentclass[nojss]{jss} %% -- Latex packages and custom commands --------------------------------------- %% recommended packages \usepackage{thumbpdf,lmodern,amsmath,amssymb,bm,url} \usepackage{textcomp} \usepackage[utf8]{inputenc} %% another package (only for this demo article) \usepackage{framed} %% new custom commands \newcommand{\class}[1]{`\code{#1}'} \newcommand{\fct}[1]{\code{#1()}} %% For Sweave-based articles about R packages: %% need no \usepackage{Sweave} \SweaveOpts{engine=R, eps=FALSE, keep.source = TRUE, prefix.string=clmjss} <>= options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE) library("ordinal") library("xtable") @ %%\VignetteIndexEntry{Cumulative Link Models for Ordinal Regression} %%\VignetteDepends{ordinal, xtable} %% -- Article metainformation (author, title, ...) ----------------------------- %% - \author{} with primary affiliation %% - \Plainauthor{} without affiliations %% - Separate authors by \And or \AND (in \author) or by comma (in \Plainauthor). %% - \AND starts a new line, \And does not. \author{Rune Haubo B Christensen\\Technical University of Denmark\\ \& \\ Christensen Statistics} \Plainauthor{Rune Haubo B Christensen} %% - \title{} in title case %% - \Plaintitle{} without LaTeX markup (if any) %% - \Shorttitle{} with LaTeX markup (if any), used as running title \title{Cumulative Link Models for Ordinal Regression with the \proglang{R} Package \pkg{ordinal}} \Plaintitle{Cumulative Link Models for Ordinal Regression with the R Package ordinal} \Shorttitle{Cumulative Link Models with the \proglang{R} package \pkg{ordinal}} %% - \Abstract{} almost as usual \Abstract{ This paper introduces the R-package \pkg{ordinal} for the analysis of ordinal data using cumulative link models. The model framework implemented in \pkg{ordinal} includes partial proportional odds, structured thresholds, scale effects and flexible link functions. The package also support cumulative link models with random effects which are covered in a future paper. A speedy and reliable regularized Newton estimation scheme using analytical derivatives provides maximum likelihood estimation of the model class. The paper describes the implementation in the package as well as how to use the functionality in the package for analysis of ordinal data including topics on model identifiability and customized modelling. The package implements methods for profile likelihood confidence intervals, predictions of various kinds as well as methods for checking the convergence of the fitted models. } %% - \Keywords{} with LaTeX markup, at least one required %% - \Plainkeywords{} without LaTeX markup (if necessary) %% - Should be comma-separated and in sentence case. \Keywords{ordinal, cumulative link models, proportional odds, scale effects, \proglang{R}} \Plainkeywords{ordinal, cumulative link models, proportional odds, scale effects, R} %% - \Address{} of at least one author %% - May contain multiple affiliations for each author %% (in extra lines, separated by \emph{and}\\). %% - May contain multiple authors for the same affiliation %% (in the same first line, separated by comma). \Address{ Rune Haubo Bojesen Christensen\\ Section for Statistics and Data Analysis\\ Department of Applied Mathematics and Computer Science\\ DTU Compute\\ Technical University of Denmark\\ Richard Petersens Plads \\ Building 324 \\ DK-2800 Kgs. Lyngby, Denmark\\ \emph{and}\\ Christensen Statistics\\ Bringetoften 7\\ DK-3500 V\ae rl\o se, Denmark \\ E-mail: \email{Rune.Haubo@gmail.com}; \email{Rune@ChristensenStatistics.dk}\\ URL: \url{http://christensenstatistics.dk/} } \begin{document} This is a copy of an article submitted for publication in Journal of Statistical Software (\url{https://www.jstatsoft.org/}). %% -- Introduction ------------------------------------------------------------- %% - In principle "as usual". %% - But should typically have some discussion of both _software_ and _methods_. %% - Use \proglang{}, \pkg{}, and \code{} markup throughout the manuscript. %% - If such markup is in (sub)section titles, a plain text version has to be %% added as well. %% - All software mentioned should be properly \cite-d. %% - All abbreviations should be introduced. %% - Unless the expansions of abbreviations are proper names (like "Journal %% of Statistical Software" above) they should be in sentence case (like %% "generalized linear models" below). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} \label{sec:intro} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Ordered categorical data, or simply \emph{ordinal} data, are common in a multitude of empirical sciences and in particular in scientific disciplines where humans are used as measurement instruments. Examples include school grades, ratings of preference in consumer studies, degree of tumor involvement in MR images and animal fitness in ecology. Cumulative link models (CLMs) are a powerful model class for such data since observations are treated correctly as categorical, the ordered nature is exploited and the flexible regression framework allows for in-depth analyses. This paper introduces the \pkg{ordinal} package \citep{ordinal-pkg} for \proglang{R} \citep{R} for the analysis of ordinal data with cumulative link models. The paper describes how \pkg{ordinal} supports the fitting of CLMs with various models structures, model assessment and inferential options including tests of partial proportional odds and scale effects. The implementation and an effective fitting algorithm is also described. The \pkg{ordinal} package also supports cumulative link \emph{mixed} models (CLMMs); CLMs with normally distributed random effects. The support of this model class will not be given further treatment here but remain a topic for a future paper. The name \emph{cumulative link models} is adopted from \citet{agresti02}, but the model class has been referred to by several other names in the literatures, such as \emph{ordinal regression models} as well as \emph{ordered logit models} and \emph{ordered probit models} \citep{greene10} for the logit and probit link functions. The cumulative link model with a logit link is widely known as the \emph{proportional odds model} due to \citet{mccullagh80} and with a complementary log-log link, the model is sometimes referred to as the \emph{proportional hazards model} for grouped survival times. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \subsection{Software review} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Cumulative link models can be fitted by all the major software packages and while some software packages support scale effects, partial proportional odds (also referred to as unequal slopes, partial effects, and nominal effects), different link functions and structured thresholds all model structures are usually not available in any one package or implementation. The following brief software review is based on the publically available documentation at software packages websites retreived in june 2018. \proglang{IBM SPSS} \citep{SPSS} implements McCullagh's \pkg{PLUM} \citep{mccullagh80} procedure, allows for the five standard link functions (cf. Table~\ref{tab:linkFunctions}) and scale effects. Estimation is via Fisher-Scoring and a test for equal slopes is available for the location-only model while it is not possible to estimate a partial proportional odds model. \proglang{Stata} \citep{Stata} includes the \code{ologit} and \code{oprobit} procedures for CLMs with logistic and probit links but without support for scale effects, partial effect or structured thresholds. The add-on package \pkg{oglm} allows for all five standard link functions and scale effects. The \pkg{GLLAMM} package \citep{gllamm} also has some support for CLMs in addition to some support for random effects. \proglang{SAS} \citep{SAS} implements CLMs with logit links in \code{proc logistic} and CLMs with the 5 standard links in \code{prog genmod}. In \proglang{R}, several packages on CRAN implements CLMs. \code{polr} from \pkg{MASS} \citep{MASS} implements standard CLMs allowing for the 5 standard link functions but no further extensions; the \pkg{VGAM} package \citep{VGAM} includes CLMs via the \code{vglm} function using the \code{cumulative} link. \code{vglm} allows for several link functions as well as partial effects. The \code{lrm} and \code{orm} functions from the \pkg{rms} package \citep{rms} also implements CLMs with the 5 standard link functions but without scale effects, partial or structured thresholds. A Bayesian alternative is implemented in the \pkg{brms} package \citep{brms} which includes structured thresholds in addition to random-effects. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \subsection[ordinal package overview]{\pkg{ordinal} package overview} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \pkg{ordinal} package implements CLMs and CLMMs along with functions and methods to support these model classes. The two key functions in \pkg{ordinal} are \code{clm} and \code{clmm} which fits CLMs and CLMMs respectively. Additional functions in \pkg{ordinal} cover distributional support for Gumbel and log-gamma distributions as well as gradients\footnote{gradients with respect to $x$, the quantile; not the parameters of the distributions} of normal, logistic and Cauchy probability density functions which are used in the iterative methods implemented in \code{clm} and \code{clmm}. An overview over key functions in \pkg{ordinal} is provided in Table~\ref{tab:functions_in_ordinal}. \begin{table}[t!] \centering \renewcommand*{\arraystretch}{1.2} \begin{tabular}{llll} \hline \rotatebox{0}{Fitting} & \rotatebox{0}{Miscellaneous} & \rotatebox{0}{Former impl.} & \rotatebox{0}{Distributions} \\ \hline \code{clm} & \code{convergence} & \code{clm2} & \code{[pdqrg]gumbel}$^{\textsf{c}}$ \\ \code{clmm}$^{\textsf{c}}$ & \code{slice} & \code{clmm2}$^{\textsf{c}}$ & \code{[pdg]lgamma}$^{\textsf{c}}$ \\ \code{clm.fit} & \code{drop.coef} & \code{clm2.control} & \code{gnorm}$^{\textsf{c}}$ \\ \code{clm.control} & & \code{clmm2.control} & \code{glogis}$^{\textsf{c}}$ \\ \code{clmm.control} & & & \code{gcauchy}$^{\textsf{c}}$ \\ \hline \end{tabular} \\ \caption{Functions in \pkg{ordinal}. Superscript "c" indicates (partial or full) implementation in \proglang{C}.\label{tab:functions_in_ordinal}} \end{table} A number of standard methods are implemented for fitted CLMs, i.e., objects of class \code{clm} fitted with \code{ordinal::clm} which mostly correspond to methods also available for \code{glm} objects. Most extractor methods will not be explicitly discussed in this paper as they behave unsurprisingly but otherwise most methods will be discussed in the following sections. As CLMMs are not covered by this paper methods for \code{clmm} objects will not be discussed. \begin{table}[t!] \centering \renewcommand*{\arraystretch}{1.2} \begin{tabular}{llll} \hline \multicolumn{2}{l}{Extractor and Print} & Inference & Checking \\[3pt] \hline \code{coef} & \code{print} & \code{anova} & \code{slice} \\ \code{fitted} & \code{summary} & \code{drop1} & \code{convergence}\\ \code{logLik} & \code{model.frame} & \code{add1} & \\ \code{nobs} & \code{model.matrix} & \code{confint} & \\ \code{vcov} & \code{update} & \code{profile} & \\ \code{AIC}, \code{BIC} & & \code{predict} & \\ \code{extractAIC} & & \code{step}, \code{stepAIC} & \\ \hline \end{tabular} \caption{Methods for \code{clm} objects.\label{tab:clm_methods}} \end{table} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \subsection{Organization of the paper} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The remainder of the paper is organized as follows. The next section establishes notation by defining CLMs and associated log-likelihood functions, then describes the extended class of CLMs that is implemented in \pkg{ordinal} including details about scale effects, structured thresholds, partial proportional odds and flexible link functions. The third section describes how maximum likelihood (ML) estimation of CLMs is implemented in \pkg{ordinal}. The fourth section describes how CLMs are fitted and ordinal data are analysed with \pkg{ordinal} including sections on nominal effects, scale effects, structured thresholds, profile likelihoods, assessment of model convergence, fitted values and predictions, issues around model identifiability and finally how \pkg{ordinal} is prepared for customizable fitting of models not otherwise covered by the API. We end in section~\ref{sec:conclusions} with a brief conclusion. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Cumulative link models} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A cumulative link model is a model for ordinal-scale observations, i.e., observations that fall in an ordered finite set of categories. Ordinal observations can be represented by a random variable $Y_i$ that takes a value $j$ if the $i$th ordinal observations falls in the $j$'th category where $j = 1, \ldots, J$ and $J \geq 2$.\footnote{binomial models ($J = 2$) are also included.}% % A basic cumulative link model is \begin{equation} \label{eq:BasicCLM} \gamma_{ij} = F(\eta_{ij})~, \quad \eta_{ij} = \theta_j - \bm x_i^\top \bm\beta~, \quad i = 1,\ldots,n~, \quad j = 1, \ldots, J-1 ~, \end{equation} where \begin{equation*} %% \label{eq:cum} \gamma_{ij} = \Prob (Y_i \leq j) = \pi_{i1} + \ldots + \pi_{ij} \quad \mathrm{with} \quad \sum_{j=1}^J \pi_{ij} = 1 \end{equation*} are cumulative probabilities\footnote{we have suppressed the conditioning on the covariate vector, $\bm x_i$, i.e., $\gamma_{ij} = \gamma_j(\bm x_i)$ and $P(Y_i \leq j) = P(Y \leq j | \bm x_i)$.}, $\eta_{ij}$ is the linear predictor and $\bm x_i^\top$ is a $p$-vector of regression variables for the parameters, $\bm\beta$ without a leading column for an intercept and $F$ is the inverse link function. % The thresholds (also known as cut-points or intercepts) are strictly ordered: \begin{equation*} -\infty \equiv \theta_0 \leq \theta_1 \leq \ldots \leq \theta_{J-1} \leq \theta_J \equiv \infty. \end{equation*} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{The multinomial distribution and the log-likelihood function} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The ordinal observation $Y_i$ which assumes the value $j$ can be represented by a multinomially distributed variable $\bm Y_i^* \sim \mathrm{multinom}(\bm\pi_i, 1)$, where $\bm Y_i^*$ is a $J$-vector with a $1$ at the $j$'th entry and 0 otherwise, and with probability mass function % \begin{equation} \label{eq:multinom_pmf} \Prob(\bm Y_i^* = \bm y_i^*) = \prod_j \pi_{ij}^{y_{ij}^*} ~. \end{equation} % The log-likelihood function can therefore be written as % \begin{equation*} \ell(\bm\theta, \bm\beta; \bm y^*) = \sum_i \sum_j y_{ij}^* \log \pi_{ij} \end{equation*} % or equivalently % \begin{align*} \ell(\bm\theta, \bm\beta; \bm y) =~& \sum_i \sum_j \mathrm I (y_i = j) \log \pi_{ij} \\ =~& \sum_i \log \tilde\pi_i \end{align*} % where $\tilde\pi_i$ is the $j$'th entry in $\bm \pi_i$ and $\mathrm I(\cdot)$ is the indicator function. Allowing for observation-level weights (case weights), $w_i$ leads finally to % \begin{equation} \label{eq:clm-log-likelihood} \ell(\bm\theta, \bm\beta; \bm y) = \sum_i w_i \log \tilde\pi_i ~. \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Likelihood based inference} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Confidence intervals for model parameters are obtained by appealing to the asymptotic normal distribution of a statistic $s(\cdot)$ for a scalar parameter of interest $\beta_a$ and defined as \begin{equation*} CI:~\left\{ \beta_a; |s(\beta_a)| < z_{1 - \alpha/2} \right\} . \end{equation*} where $z_{1 - \alpha/2}$ is the $(1 - \alpha/2)$ quantile of the standard normal cumulative distribution function. Taking $s(\cdot)$ to be the Wald statistic $s(\beta_a):~ w(\beta_a) = (\hat\beta_a - \beta_a)/\hat{\mathrm{se}}(\hat\beta_a)$ leads to the classical symmetric intervals. Better confidence intervals can be obtained by choosing instead the likelihood root statistic \citep[see e.g.,][]{pawitan01, brazzale07}: \begin{equation*} s(\beta_a):~ r(\beta_a) = \mathrm{sign}(\hat\beta_a - \beta_a) \sqrt{-2 [ \ell(\hat{\bm\theta}, \hat{\bm\beta}; \bm y) - \ell_p(\beta_a; \bm y)]} \end{equation*} where \begin{equation*} \ell_p(\beta_a; \bm y) = \max_{\bm\theta, \bm\beta_{-a}} \ell(\bm\theta, \bm\beta; \bm y)~, \end{equation*} is the profile likelihood for the scalar parameter $\beta_a$ and $\bm\beta_{-a}$ is the vector of regression parameters without the $a$'th one. While the profile likelihood has to be optimized over all parameters except $\beta_a$ we define a \emph{log-likelihood slice} as \begin{equation} \label{eq:slice} \ell_{\mathrm{slice}}(\beta_a; \bm y) = \ell(\beta_a; \hat{\bm\theta}, \hat{\bm\beta}_{-a}, \bm y)~, \end{equation} which is the log-likelihood function evaluated at $\beta_a$ while keeping the remaining parameters fixed at their ML estimates. A quadratic approximation to the log-likelihood slice is $(\hat\beta_a - \beta_a)^2 / 2\tau_a^2$ where the \emph{curvature unit} $\tau_a$ is the squareroot of $a$'th diagonal element of the Hessian of $-\ell(\hat{\bm\theta}, \hat{\bm\beta}; \bm y)$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Link functions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A commonly used link function is the logit link which leads to % \begin{equation} \label{eq:cum_logit_model} \mathrm{logit}(\gamma_{ij}) = \log \frac{\Prob (Y_i \leq j)}{1 - \Prob(Y_i \leq j)} \end{equation} % The odds ratio (OR) of the event $Y_i \leq j$ at $\bm x_1$ relative to the same event at $\bm x_2$ is then % \begin{equation} \label{eq:odds_ratio} \mathrm{OR} = \frac{\gamma_j(\bm x_1) / [1 - \gamma_j(\bm x_1)]} {\gamma_j(\bm x_2) / [1 - \gamma_j(\bm x_2)]} = \frac{\exp(\theta_j - \bm x_1^\top \bm\beta)} {\exp(\theta_j - \bm x_2^\top \bm\beta)} %% =&~ \exp(\theta_j - \theta_j - \bm x_1 \bm\beta + \bm x_2 \bm\beta) = \exp[(\bm x_2^\top - \bm x_1^\top)\bm\beta] \end{equation} which is independent of $j$. Thus the cumulative odds ratio is proportional to the distance between $\bm x_1$ and $\bm x_2$ which motivated \citet{mccullagh80} to denote the cumulative logit model a \emph{proportional odds model}. If $x$ represent a treatment variable with two levels (e.g., placebo and treatment), then $x_2 - x_1 = 1$ and the odds ratio is $\exp(-\beta_\textup{treatment})$. Similarly the odds ratio of the event $Y \geq j$ is $\exp(\beta_\textup{treatment})$. The probit link has its own interpretation through a normal linear model for a latent variable which is considered in section~\ref{sec:latent-variable-motivation}. The complentary log-log (clog-log) link is also sometimes used because of its interpretation as a proportional hazards model for grouped survival times: \begin{equation*} -\log\{1 - \gamma_{j}(\bm x_i) \} = \exp( \theta_j - \bm x_i^T \bm\beta ) \end{equation*} Here $1 - \gamma_{j}(\bm x_i)$ is the probability or survival beyond category $j$ given $\bm x_i$. The proportional hazards model has the property that \begin{equation*} \log \{ \gamma_{j}(\bm x_1) \} = \exp[ (\bm x_2^T - \bm x_1^T) \bm\beta ] \log \{ \gamma_{j}(\bm x_2) \}~. \end{equation*} thus the ratio of hazards at $\bm x_1$ relative to $\bm x_2$ are proportional. If the log-log link is used on the response categories in the reverse order, this is equivalent to using the clog-log link on the response in the original order. This reverses the sign of $\bm\beta$ as well as the sign and order of $\{\theta_j\}$ while the likelihood and standard errors remain unchanged. % % Thus, similar to the proportional odds % model, the ratio of hazard functions beyond category $j$ at $\bm x_1$ % relative to $\bm x_2$ (the hazard ratio, $HR$) is: % \begin{equation*} % HR = \frac{-\log\{1 - \gamma_{j}(\bm x_2) \}} % {-\log\{1 - \gamma_{j}(\bm x_1) \}} = % \frac{\exp( \theta_j - \bm x_1^T \bm\beta )} % {\exp( \theta_j - \bm x_2^T \bm\beta )} = % \exp[(\bm x_2 - \bm x_1)\bm\beta] % \end{equation*} % Details of the most common link functions are described in Table~\ref{tab:linkFunctions}. \begin{table}[t!] \begin{center} %\footnotesize \begin{tabular}{llll} \hline Name & logit & probit & log-log \\ \hline Distribution & logistic & normal & Gumbel (max)$^b$ \\ Shape & symmetric & symmetric & right skew\\ Link function ($F^{-1}$) & $\log[\gamma / (1 - \gamma)]$ & $\Phi^{-1}(\gamma)$ & $-\log[-\log(\gamma)]$ \\ Inverse link ($F$) & $1 / [1 + \exp(\eta)]$ & $\Phi(\eta)$ & $\exp(-\exp(-\eta))$ \\ Density ($f = F'$) & $\exp(-\eta) / [1 + \exp(-\eta)]^2$ & $\phi(\eta)$ \\ \hline \hline Name & clog-log$^a$ & cauchit \\ \hline Distribution & Gumbel (min)$^b$ & Cauchy$^c$ \\ Shape & left skew & kurtotic \\ Link function ($F^{-1}$) & $\log[ -\log(1 - \gamma)]$ & $\tan[\pi (\gamma - 0.5)]$ \\ Inverse link ($F$) & $1 - \exp[-\exp(\eta)]$ & $\arctan(\eta)/\pi + 0.5$ \\ Density ($f = F'$) & $\exp[-\exp(\eta) + \eta]$ & $1 / [\pi(1 + \eta^2)]$ \\ \hline \end{tabular} \end{center} % \footnotesize % % $^a$: the \emph{complementary log-log} link \\ % $^b$: the Gumbel distribution is also known as the extreme value % (type I) distribution for extreme minima or maxima. It is also % sometimes referred to as the Weibull (or log-Weibull) distribution % (\url{http://en.wikipedia.org/wiki/Gumbel_distribution}). \\ % $^c$: the Cauchy distribution is a $t$-distribution with one df \caption{Summary of the five standard link functions. $^a$: the \emph{complementary log-log} link; $^b$: the Gumbel distribution is also known as the extreme value (type I) distribution for extreme minima or maxima. It is also sometimes referred to as the Weibull (or log-Weibull) distribution (\url{http://en.wikipedia.org/wiki/Gumbel_distribution}); $^c$: the Cauchy distribution is a $t$-distribution with one degree of freedom. \label{tab:linkFunctions}} \end{table} The \pkg{ordinal} package allows for the estimation of an extended class of cumulative link models in which the basic model~\eqref{eq:BasicCLM} is extended in a number of ways including structured thresholds, partial proportional odds, scale effects and flexible link functions. The following sections will describe these extensions of the basic CLM. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Extensions of cumulative link models} \label{sec:extensions-of-clms} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A general formulation of the class of models (excluding random effects) that is implemented in \pkg{ordinal} can be written % \begin{equation} \gamma_{ij} = F_{\lambda}(\eta_{ij}), \quad \eta_{ij} = \frac{g_{\bm\alpha} (\theta_i) - \bm x_i^\top \bm\beta - \bm w_i^\top \tilde{\bm\beta}_j}{\exp(\bm z_i\bm\zeta)} \end{equation} % where \begin{description} \item[$F_{\lambda}$] is the inverse link function. It may be parameterized by the scalar parameter $\lambda$ in which case we refer to $F_{\lambda}^{-1}$ as a \emph{flexible link function}, % \item[$g_{\bm\alpha}(\theta_i)$] parameterises thresholds $\{\theta_j\}$ by the vector $\bm\alpha$ such that $g$ restricts $\{\theta_j\}$ to be for example symmetric or equidistant. We denote this \emph{structured thresholds}. % \item[$\bm x_i^\top\bm\beta$] are the ordinary regression effects, % \item[$\bm w_i^\top \tilde{\bm\beta}_j$] are regression effects which are allowed to depend on the response category $j$ and they are denoted \emph{partial} or \emph{non-proportional odds} \citep{peterson90} when the logit link is applied. To include other link functions in the terminology we denote these effects \emph{nominal effects} (in text and code) because these effects are not integral to the ordinal nature of the data. % \item[$\exp(\bm z_i\bm\zeta)$] are \emph{scale effects} since in a latent variable view these effects model the scale of the underlying location-scale distribution. \end{description} With the exception of the structured thresholds, these extensions of the basic CLM have been considered individually in a number of sources but to the author's best knowledge not previously in a unified framework. % For example partial proportional odds have been considered by \citet{peterson90} and scale effect have been considered by \citet{mccullagh80} and \citet{cox95}. \citet{agresti02} is a good introduction to cumulative link models in the context of categorical data analysis and includes discussions of scale effects. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Latent variable motivation of CLMs} \label{sec:latent-variable-motivation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% It is natural to motivate the CLM from a linear model for a categorized version of a latent variable. Assume the following linear model for an unobserved latent variable: % \begin{equation} \label{eq:latent} S_i = \alpha^* + \bm x_i^\top \bm\beta^* + \varepsilon_i, \quad \varepsilon_i \sim N(0, \sigma^{*2}) \end{equation} % If $S_i$ falls between two thresholds, $\theta_{j-1}^* < S_i \leq \theta_j^*$ where % \begin{equation} \label{eq:thresholds} -\infty \equiv \theta_0^* < \theta_1^* < \ldots < \theta^*_{J-1} < \theta_{J}^* \equiv \infty \end{equation} % then $Y_i = j$ is observed and the cumulative probilities are: % \begin{equation*} \gamma_{ij} = \Prob (Y_i \leq j) = \Prob(S_i \leq \theta_j^*) = \Prob \left( Z \leq \frac{\theta_j^* - \alpha^* - \bm x_i^\top \bm\beta^*}{% \sigma^*} \right) = \Phi ( \theta_j - \bm x_i^\top \bm\beta ) \end{equation*} % where $Z$ follows a standard normal distribution, $\Phi$ denotes the standard normal cumulative distribution function, parameters with an ``$^*$'' exist on the latent scale, $\theta_j = (\theta_j^* - \alpha^*) / \sigma^*$ and $\bm\beta = \bm\beta^* / \sigma^*$. Note that $\alpha^*$, $\bm\beta^*$ and $\sigma^*$ would have been identifiable if the latent variable $S$ was directly observed, but they are not identifiable with ordinal observations. If we allow a log-linear model for the scale such that % \begin{equation*} \varepsilon_i \sim N(0, \sigma^{*2}_i), \quad \sigma_i^* = \exp(\mu + \bm z_i^\top \bm\zeta) = \sigma^* \exp(\bm z_i^\top \bm\zeta) \end{equation*} % where $\bm z_i$ is the $i$'th row of a design matrix $\bm Z$ without a leading column for an intercept and $\sigma^* = \exp(\mu)$, then \begin{equation*} \gamma_{ij} = \Prob \left( Z \leq \frac{\theta_j^* - \alpha^* - \bm x_i^\top \bm\beta^*}{% \sigma^*_i} \right) = \Phi \left( \frac{\theta_j - \bm x_i^T \bm\beta}{\sigma_i} \right) \end{equation*} where $\sigma_i = \sigma_i^* / \sigma^* = \exp(\bm z_i^\top \bm\zeta)$ is the \emph{relative} scale. The common link functions: probit, logit, log-log, c-log-log and cauchit correspond to inverse cumulative distribution functions of the normal, logistic, Gumbel(max), Gumbel(min) and Cauchy distributions respectively. These distributions are all members of the location-scale family with common form $F(\mu, \sigma)$, with location $\mu$ and non-negative scale $\sigma$, for example, the logistic distribution has mean $\mu$ and standard deviation $\sigma \pi / \sqrt{3}$. Choosing a link function therefore corresponds to assuming a particular distribution for the latent variable $S$ in which $\bm x_i^\top \bm\beta$ and $\exp(\bm z_i^\top \bm\zeta)$ models location \emph{differences} and scale \emph{ratios} respectively of that distribution. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Structured thresholds} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Structured thresholds, $\{ g(\bm\alpha)_j \}$ makes it possible to impose restrictions on the thresholds $\bm\theta = g(\bm\alpha)$. For instance restricting the thresholds to be equidistant means that only the location of, say, the first threshold and the spacing between adjacent thresholds has to be estimated, thus only two parameters are used to parameterize the thresholds irrespective of the number of response categories. \pkg{ordinal} takes $g(\bm\alpha)$ to be a linear function and operates with \begin{equation*} g(\bm\alpha) = \mathcal{J}^\top \bm\alpha = \bm \theta \end{equation*} where the Jacobian $\mathcal{J}$ defines the mapping from the parameters $\bm\alpha$ to the thresholds $\bm\theta$. The traditional ordered but otherwise unrestricted thresholds are denoted \emph{flexible thresholds} and obtained by taking $\mathcal{J}$ to be an identity matrix. Assuming $J=6$ ordered categories, the Jacobians for equidistant and symmetric thresholds (denoted \code{equidistant} and \code{symmetric} in \code{clm} argument \code{threshold}) are \begin{equation*} \mathcal{J}_{\mathrm{equidistant}} = \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 0 & 1 & 2 & 3 & 4 \\ \end{bmatrix}, \quad \mathcal{J}_{\mathrm{symmetric}} = \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 0 & -1 & 0 & 1 & 0 \\ -1 & 0 & 0 & 0 & 1 \\ \end{bmatrix}. \end{equation*} Another version of symmetric threshols (denoted \code{symmetric2}) is sometimes relevant with an unequal number of reponse categories here illustrated with $J=5$ together with the \code{symmetric} thresholds: \begin{equation*} \mathcal{J}_{\mathrm{symmetric2}} = \begin{bmatrix} 0 & -1 & 1 & 0 \\ -1 & 0 & 0 & 1 \\ \end{bmatrix}, \quad \mathcal{J}_{\mathrm{symmetric}} = \begin{bmatrix} 1 & 1 & 0 & 0 \\ 0 & 0 & 1 & 1 \\ -1 & 0 & 0 & 1 \\ \end{bmatrix} \end{equation*} The nature of $\mathcal{J}$ for a particular model can always be inspected by printing the \code{tJac} component of the \code{clm} fit. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Partial proportional odds and nominal effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The nominal effects $\bm w_i^\top\tilde{\bm\beta}_j$ can be considered an extension of the regression part of the model $\bm x_i^\top \bm\beta$ in which the regression effects are allowed to vary with $j$. % The nominal effects can also be considered an extension of the thresholds $\theta_j$ which allows them to depend on variables $\bm w_i^\top$: $\tilde{\theta}_{ij}(\bm w_i^\top) = \theta_j - \bm w_i^\top \tilde{\bm\beta}_j$ is the $j$'th threshold for the $i$'th observation. The following treatment assumes for latter view. In general let $\bm W$ denote the design matrix for the nominal effects without a leading column for an intercept; the nominal-effects parameter vector $\tilde{\bm\beta}_j$ is then $\mathrm{ncol}(\bm W)$ long and $\tilde{\bm\beta}$ is $\mathrm{ncol}(\bm W) \cdot (J-1)$ long. If $\bm W$ is the design matrix for the nominal effects containing a single column for a continuous variable then $\tilde{\beta}_j$ is the slope parameter corresponding to the $j$'th threshold and $\theta_j$ is the $j$'th intercept, i.e., the threshold when the covariate is zero. Looking at $\tilde{\theta}_{ij}(\bm w_i^\top) = \theta_j - \bm w_i^\top \tilde{\bm\beta}_j$ as a linear model for the thresholds facilitates the interpretation. If, on the other hand, $\bm W$ is the design matrix for a categorical variable (a \code{factor} in \proglang{R}) then the interpretation of $\tilde{\bm\beta}_j$ depends on the contrast-coding of $\bm W$. If we assume that the categorical variable has 3 levels, then $\tilde{\bm\beta}_j$ is a 2-vector. In the default treatment contrast-coding (\code{"contr.treatment"}) $\theta_j$ is the $j$'th threshold for the first (base) level of the factor, $\tilde{\beta}_{1j}$ is the differences between thresholds for the first and second level and $\tilde{\beta}_{2j}$ is the difference betwen the thresholds for the first and third level. In general we define $\bm\Theta$ as a matrix with $J-1$ columns and with 1 row for each combination of the levels of factors in $\bm W$. This matrix can be extracted from the model fit Note that variables in $\bm X$ cannot also be part of $\bm W$ if the model is to remain identifiable. \pkg{ordinal} detects this and automatically removes the offending variables from $\bm X$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Flexible link functions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \pkg{ordinal} package allows for two kinds of flexible link functions due to \citet{aranda-ordaz83} and \citet{genter85}. The link function proposed by \citet{aranda-ordaz83} reads % \begin{equation*} F^{-1}_\lambda (\gamma_{ij}) = \log \left\{ \frac{(1 - \gamma_{ij})^{-\lambda} - 1} {\lambda} \right\}~, \end{equation*} which depends on the auxiliary parameter $\lambda \in ]0, \infty[$. When $\lambda = 1$, the logistic link function arise, and when $\lambda \rightarrow 0$, \begin{equation*} \{ (1 - \gamma_{ij})^{-\lambda} - 1 \} / \lambda \rightarrow \log (1 - \gamma_{ij})^{-1}~, \end{equation*} so the log-log link arise. The inverse link function and its derivative are given by \begin{align*} F(\eta) =&~ 1 - (\lambda \exp(\eta) + 1)^{-\lambda^{-1}} \\ f(\eta) =&~ \exp(\eta) (\lambda \exp(\eta) + 1)^{-\lambda^{-1} - 1} \end{align*} The density implied by the inverse link function is left skewed if $0 < \lambda < 1$, symmetric if $\lambda = 1$ and right skewed if $\lambda > 1$, so the link function can be used to assess the evidence about possible skewness of the latent distribution. The log-gamma link function proposed by \citet{genter85} is based on the log-gamma density by \citet{farewell77}. The cumulative distribution function and hence inverse link function reads \begin{equation*} F_\lambda(\eta) = \begin{cases} 1 - G(q; \lambda^{-2}) & \lambda < 0 \\ \Phi(\eta) & \lambda = 0 \\ G(q; \lambda^{-2}) & \lambda > 0 \end{cases} \end{equation*} where $q = \lambda^{-2}\exp(\lambda \eta)$ and $G(\cdot; \alpha)$ denotes the Gamma distribution with shape parameter $\alpha$ and unit rate parameter and $\Phi$ denotes the cumulative standard normal distribution function. The corresponding density function reads \begin{equation*} f_\lambda(\eta) = \begin{cases} |\lambda| k^k \Gamma(k)^{-1} \exp\{ k(\lambda\eta - \exp(\lambda\eta)) \} & \lambda \neq 0 \\ \phi(\eta) & \lambda = 0 \end{cases} \end{equation*} where $k=\lambda^{-2}$, $\Gamma(\cdot)$ is the gamma function and $\phi$ is the standard normal density function. Note that choice and parameterization of the predictor, $\eta_{ij}$, e.g., the use of scale effects, can affect the evidence about the shape of the latent distribution. There are usually several link functions which provide essentially the same fit to the data and choosing among the good candidates is probably best done by appealing to arguments such as ease of interpretation rather than arguments related to fit. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section[Implementation of ML Estimation of CLMs in ordinal]{Implementation of ML Estimation of CLMs in \pkg{ordinal}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In \pkg{ordinal} cumulative link models are (by default) estimated with a regularized Newton-Raphson (NR) algorithm with step-halving (line search) using analytical expressions for the gradient and Hessian of the negative log-likelihood function. This NR algorithm with analytical derivatives is used irrespective of whether the model contains structured thresholds, nominal effects or scale effects, the only exception being models with flexible link functions for which a general-purpose quasi-Newton optimiser is used. Due to computationally cheap and efficient evaluation of the analytical derivatives, the relative well-behaved log-likelihood function (with exceptions described below) and the speedy convergence of the Newton-Raphson algorithm, the estimation of CLMs is virtually instant on a modern computer even with complicated models on large datasets. This also facilitates simulation studies. More important than speed is perhaps that the algorithm is reliable and accurate. Remainder of this section describes the regularized NR algorithm with step-halving (line search). The analytical gradients are described in detail in \citet{mythesis}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{A regularized Newton-Raphson algorithm with step halving} \label{sec:algorithm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The regularized NR algorithm is an iterative method that produce a sequence of estimates $\bm\psi^{(0)}, \ldots, \bm\psi^{(i)}, \ldots$ where parenthesized superscripts denote iterations. From the $i$th estimate, the $(i+1)$'th estimate is given by % \begin{equation*} \bm\psi^{(i+1)} = \bm\psi^{(i)} - c_1 \bm h^{(i)}, \quad \bm h^{(i)} = \tilde{\bm H}(\bm\psi^{(i)}; \bm y)^{-1} \bm g(\bm\psi^{(i)}; \bm y) \end{equation*} where \begin{equation*} \tilde{\bm H}(\bm\psi^{(i)}; \bm y) = \bm H(\bm\psi^{(i)}; \bm y) + c_2 (c_3 + \min(\bm e^{(i)})) \bm I, \end{equation*} % % where % $\bm h^{(i)}$ is the step of the $i$th iteration, $\bm H(\bm\psi^{(i)} ; \bm y)$ and $\bm g(\bm\psi^{(i)}; \bm y)$ are the Hessian and gradient of the negative log-likelihood function with respect to the parameters evaluated at the current estimates; $\bm e^{(i)}$ is vector of eigenvalues of $\bm H(\bm\psi^{(i)}; \bm y)$, $\bm h^{(i)}$ is the $i$'th step, $c_1$ is a scalar parameter which controls the step halving, and $c_2$, $c_3$ are scalar parameters which control the regularization of the Hessian. Regularization is only enforced when the Hessian is not positive definite, so $c_2 = 1$ when $\min(\bm e^{(i)}) < \tau$ and zero otherwise, were $\tau$ is an appropriate tolerance. The choice of $c_3$ is to some extent arbitrary (though positive) and the algorithm in \pkg{ordinal} sets $c_3 = 1$. Step-halving is enforced when the full step $\bm h^{(i)}$ causes a decrease in the likelihood function in which case $c_1$ is consecutively halved, $c_1 = \frac{1}{2}, \frac{1}{4}, \frac{1}{8}, \ldots$ until the step $c_1 \bm h^{(i)}$ is small enough to cause an increase in the likelihood or until the maximum allowed number of consecutive step-halvings has been reached. The algorithm in \pkg{ordinal} also deals with a couple of numerical issues that may occur. For example, the likelihood function may be sufficiently flat that the change in log-likelihood is smaller than what can be represented in double precision, and so, while the new parameters may be closer to the true ML estimates and be associated with a smaller gradient, it is not possible to measure progress by the change in log-likelihood. The NR algorithm in \pkg{ordinal} has two convergence criteria: (1) an absolute criterion requesting that $\max | \bm g(\bm\psi^{(i)}; \bm y) | < \tau_1$ and (2) a relative criterion requesting that $\max | \bm h^{(i)} | < \tau_2$ where the default thresholds are $\tau_1 = \tau_2 = 10^{-6}$. Here the first criterion attempts to establish closeness of $\bm\psi^{(i)}$ to the true ML estimates in absolute terms; the second criterion is an estimate of relative closeness of to the true ML estimates. % Both convergence criteria are needed if both small (e.g., $\approx 0.0001$) and large (e.g., $\approx 1000$) parameter estimates are to be determined accurately with an appropriate number of correct decimals as well as significant digits. The NR algorithm in \pkg{ordinal} attempts to satisfy the absolute criterion first and will then only attempt to satisfy the relative criterion if it can take the full un-regularized NR step and then only for a maximum of 5 steps. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Convergence proporties and parameter accuracy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Convergence to a well-defined optimum is achieved when the gradient of the negative log-likelihood function with respect to the parameters is small and the Hessian is positive definite i.e., having only positive eigenvalues away from zero. % Identifiability problems occur when the likelihood function is flat in directions of one or more parameters (or linear functions of the parameters) while well-defined, i.e., pointy in other directions. It may happen that a parameter is exactly unidentifiable and \code{clm} is in some cases (including rank-deficient design matrices) able to detect this and exclude the parameter from the optimization procedure. In other cases the likelihood is almost flat in one or more directions. These cases are not uncommon in practice and it is not possible to reduce the parameter space before optimizing the model. To measure the degree of empirical identifiability \code{clm} reports the condition number of the Hessian which is the ratio of the largest to the smallest eigenvalue. A large condition number of the Hessian does not necessarily mean there is a problem with the model, but it can be. A small condition number of the Hessian, say smaller than about $10^4$ or $10^6$, on the other hand is a good insurance that a well-defined optimum has been reached. A key problem for optimization methods is when to stop iterating: when have the parameters that determine optimum of the function been found with sufficient accuracy? The \emph{method independent error estimate} \citep{elden04} provides a way to approximate the error in the parameter estimates. Sufficiently close to the optimum the Newton-Raphson step provides this estimate: \begin{equation*} |\hat{\bm\alpha}^{(i)} - \bm\alpha^*| \lesssim \bm h^{(i)}, \quad \bm h^{(i)} = \bm H(\bm\psi^{(i)}; \bm y)^{-1} \bm g(\bm\psi^{(i)}; \bm y) \end{equation*} where $\bm\alpha^*$ is the exact (but unknown) value of the ML estimate, $\hat{\bm\alpha}^{(i)}$ is the ML estimator of $\bm\alpha$ at the $i$'th iteration and $\bm h^{(i)}$ is the full unregularized NR step at the $i$'th iteration. % Since the gradient and Hessian of the negative log-likelihood function with respect to the parameters is already evaluated and part of the model fit at convergence, it is essentially computationally cost-free to approximate the error in the parameter estimates. Based on the error estimate the number of corretly determined decimals and significant digits is determined for each parameter. The assessment of the number of correctly determined decimals and significant digits is only reliable sufficiently close to the optimum and when the NR algorithm converges without regularization and step-halving. In practice we caution against this assessment if the algorithm did not converge successfully. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Properties of the log-likelihood function for extended CLMs} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \citet{pratt81} and \citet{burridge81} showed (seemingly independent of each other) that the log-likelihood function of the basic cumulative link model~\eqref{eq:BasicCLM} is concave. This means that there is a unique global optimum of the log-likelihood function and therefore no risk of convergence to a local optimum. It also means that the Hessian matrix for the negative log-likelihood is strictly positive definite and therefore also that the Newton step is always in direction of higher likelihood. The genuine Newton step may be too long to actually cause an increase in likelihood from one iteration to the next (this is called ``overshoot''). This is easily overcome by successively halving the length of the Newton step until an increase in likelihood is achieved. Exceptions to the strict concavity of the log-likelihood function include models using the cauchit link, flexible link functions as well as models with scale effects. Notably models with structured thresholds as well as nominal effects do not affect the linearity of the predictor, $\eta_{ij}$ and so are also guaranteed to have concave log-likelihoods. The non-decreasing restriction on the threshold parameters $\{\theta_j\}$ is dealt with by defining $\ell(\bm\theta, \bm\beta; y) = \infty$ when $\{\theta_j\}$ is not in a non-decreasing sequence. If the algorithm attempts evaluation at such illegal values step-halving automatically brings the algorithm back on track. Other implementations of CLMs re-parameterize $\{\theta_j\}$ such that the non-decreasing nature of $\{\theta_j\}$ is enforced by the parameterization, for example, \code{MASS::polr} (package version 7.3.49) optimize the likelihood using \begin{equation*} \tilde\theta_1 = \theta_1, ~\tilde{\theta}_2 = \exp(\theta_2 - \theta_1),~\ldots, ~ \tilde{\theta}_{J-1} = \exp(\theta_{J-2} - \theta_{J-1}) \end{equation*} This is deliberately not used in \pkg{ordinal} because the log-likelihood function is generally closer to quadratic in the original parameterization in our experience which facilitates faster convergence. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Starting values} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% For the basic CLMs~\eqref{eq:BasicCLM} the threshold parameters are initialized to an increasing sequence such that the cumulative density of logistic distribution between consecutive thresholds (and below the lowest or above the highst threshold) is the same. The regression parameters $\bm\beta$, scale parameters $\bm\zeta$ as well as nominal effect $\bm\beta^*$ are initialized to 0. If the model specifies a cauchit link or includes scale parameters estimation starts at the parameter estimates of a model using the probit link and/or without the scale-part of the model. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Estimation problems} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% With many nominal effects it may be difficult to find a model in which the threshold parameters are strictly increasing for all combinations of the parameters. Upon convergence of the NR algorithm the model evaluates the $\bm\Theta$-matrix and checks that each row of threshold estimates are increasing. When a continuous variable is included among the nominal effects it is often helpful if the continuous variable is centered at an appropriate value (at least within the observed range of the data). This is because $\{\theta_j\}$ represent the thresholds when the continuous variable is zero and $\{\theta_j\}$ are enforced to be a non-decreasing sequence. Since the nominal effects represent different slopes for the continuous variable the thresholds will necessarily be ordered differently at some other value of the continuous variable. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Convergence codes} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Irrespective of the fitting algorithm, \pkg{ordinal} reports the following convergence codes for CLMs in which negative values indicate convergence failure: % \begin{description} \item[-3] Not all thresholds are increasing. This is only possible with nominal effects and constitutes an invalid fit. \item[-2] The Hessian has at least one negative eigenvalue. This means that the point at which the algorithm terminated does not represent an optimum. \item[-1] Absolute convergence criterion (maximum absolute gradient) was not satisfied. This means that the algorithm couldn't get close enough to a stationary point of the log-likelihood function. \item[0] Successful convergence. \item[1] The Hessian is singular (i.e., at least one eigenvalue is zero). This means that some parameters are not uniquely determined. \end{description} % Note that with convergence code \textbf{1} the optimum of the log-likelihood function has been found although it is not a single point but a line (or in general a (hyper) plane), so while some parameters are not uniquely determined the value of the likelihood is valid enough and can be compared to that of other models. In addition to these convergence codes, the NR algorithm in \pkg{ordinal} reports the following messages: \begin{description} \item[0] Absolute and relative convergence criteria were met \item[1] Absolute convergence criterion was met, but relative criterion was not met \item[2] iteration limit reached \item[3] step factor reduced below minimum \item[4] maximum number of consecutive Newton modifications reached \end{description} Note that convergence is assessed irrespective of messages from the fitting algorithm irrespective of whether the NR algorithm described above or a general-purpose quasi-Newton optimizer is used. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section[Fitting cumulative link models in ordinal with clm]{Fitting cumulative link models in \pkg{ordinal} with \code{clm}} \label{sec:fitting-clms} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \code{clm} function takes the following arguments: % <>= clm_args <- gsub("function ", "clm", deparse(args(clm))) cat(paste(clm_args[-length(clm_args)], "\n")) @ % Several arguments are standard and well-known from \code{lm} and \code{glm} and will not be described in detail; \code{formula}, \code{data}, \code{weights}, \code{subset} and \code{na.action} are all parts of the standard model specification in \proglang{R}. \code{scale} and \code{nominal} are interpreted as \proglang{R}-formulae with no left hand sides and specifies the scale and nominal effects of the model respectively, see sections~\ref{sec:scale-effects} and \ref{sec:nominal-effects} for details; \code{start} is an optional vector of starting values; \code{doFit} can be set to \code{FALSE} to prompt \code{clm} to return a model \emph{environment}, for details see section~\ref{sec:customized-modelling}; \code{model} controls whether the \code{model.frame} should be included in the returned model fit; \code{link} specifies the link function and \code{threshold} specifies an optional threshold structure, for details see section~\ref{sec:threshold-effects}. Note the absense of a separate \code{offset} argument. Since \code{clm} allows for different offsets in different \code{formula} and \code{scale} offsets have to be specified within a each formulae, e.g., \verb!scale = ~ x1 + offset(x2)!. Control parameters can either be specified as a named list, among the optional \code{...} arguments, or directly as a call to \code{clm.control} --- in the first two cases the arguments are passed on to \code{clm.control}. \code{clm.control} takes the following arguments: % <>= cc_args <- gsub("function ", "clm.control", deparse(args(clm.control))) cat(paste(cc_args[-length(cc_args)], "\n")) @ % The \code{method} argument specifies the optimization and/or return method. The default estimation method (\code{Newton}) is the regularized Newton-Raphson estimation scheme described in section~\ref{sec:algorithm}; options \code{model.frame} and \code{design} prompts \code{clm} to return respectively the \code{model.frame} and a list of objects that represent the internal representation instead of fitting the model; options \code{ucminf}, \code{nlminb} and \code{optim} represent different general-purpose optimizers which may be used to fit the model (the former from package \pkg{ucminf} \citep{ucminf}, the latter two from package \pkg{stats}). The \code{sign.location} and \code{sign.nominal} options allow the user to flip the signs on the location and nominal model terms. The \code{convergence} argument instructs \code{clm} how to alert the user of potential convergence problems; \code{...} are optional arguments passed on to the general purpose optimizers; \code{trace} applies across all optimizers and positive values lead to printing of progress during iterations; the remaining arguments (\code{maxIter, gradTol, maxLineIter, relTol, tol}) control the behavior of the regularized NR algorithm described in section~\ref{sec:algorithm}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection[Fitting a basic cumulative link model with clm]{Fitting a basic cumulative link model with \code{clm}} \label{sec:fitting-basic-clm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In the following examples we will use the wine data from \citet{randall89} available in the object \code{wine} in package \pkg{ordinal}, cf., Table~\ref{tab:wineData}. The data represent a factorial experiment on factors determining the bitterness of wine with 1 = ``least bitter'' and 5 = ``most bitter''. Two treatment factors (temperature and contact) each have two levels. Temperature and contact between juice and skins can be controlled when crushing grapes during wine production. Nine judges each assessed wine from two bottles from each of the four treatment conditions, hence there are 72 observations in all. The main objective is to examine the effect of contact and temperature on the perceived bitterness of wine. \begin{table}[t!] \centering \begin{tabular}{llrrrrr} \hline & & \multicolumn{5}{c}{Least---Most bitter} \\ \cline{3-7} <>= ## data(wine) tab <- with(wine, table(temp:contact, rating)) mat <- cbind(rep(c("cold", "warm"), each = 2), rep(c("no", "yes"), 2), tab) colnames(mat) <- c("Temperature", "Contact", paste("~~", 1:5, sep = "")) xtab <- xtable(mat) print(xtab, only.contents=TRUE, include.rownames=FALSE, sanitize.text.function = function(x) x) @ \end{tabular} \caption{The number of ratings from nine judges in bitterness categories 1 --- 5. Wine data from \citet{randall89} aggregated over bottles and judges.% \label{tab:wineData}} \end{table}% Initially we consider the following cumulative link model for the wine data: \begin{equation} \label{eq:CLM} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j - \beta_1 (\mathtt{temp}_i) - \beta_2(\mathtt{contact}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation}% % where $\beta_1(\mathtt{temp}_i)$ attains the values $\beta_1(\mathtt{cold})$ and $\beta_1(\mathtt{warm})$, and $\beta_2(\mathtt{contact}_i)$ attains the values $\beta_2(\mathtt{no})$ and $\beta_2(\mathtt{yes})$. The effect of temperature in this model is illustrated in Figure~\ref{fig:standard_clm}. This is a model for the cumulative probability of the $i$th rating falling in the $j$th category or below, where $i$ index all observations ($n=72$), $j = 1, \ldots, J$ index the response categories ($J = 5$) and $\theta_j$ is the intercept or threshold for the $j$th cumulative logit: $\textup{logit}(P(Y_i \leq j))$. Fitting the model with \code{clm} we obtain: <<>>= library("ordinal") fm1 <- clm(rating ~ temp + contact, data=wine) summary(fm1) @ The \code{summary} method prints basic information about the fitted model. % most of which is self explanatory. % The primary result is the coefficient table with parameter estimates, standard errors and Wald based $p$~values for tests of the parameters being zero. If one of the flexible link functions (\code{link = "log-gamma"} or \code{link = "Aranda-Ordaz"}) is used a coefficient table for the link parameter, $\lambda$ is also included. The maximum likelihood estimates of the model coefficients are:% % \begin{equation} \label{eq:parameters} \begin{gathered} \hat\beta_1(\mathtt{warm} - \mathtt{cold})= 2.50, ~~\hat\beta_2(\mathtt{yes} - \mathtt{no}) = 1.53, \\ \{\hat\theta_j\} = \{-1.34,~ 1.25,~ 3.47,~ 5.01\}. \end{gathered} \end{equation} % The coefficients for \code{temp} and \code{contact} are positive indicating that higher temperature and contact increase the bitterness of wine, i.e., rating in higher categories is more likely. % Because the treatment contrast coding which is the default in \proglang{R} was used, $\{\hat\theta_j\}$ refers to the thresholds at the setting with $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{no}$. % Three natural and complementing interpretations of this model are % \begin{enumerate} \item The thresholds $\{ \hat\theta_j \}$ at $\mathtt{contact}_i = \mathtt{yes}$ conditions have been shifted a constant amount $1.53$ relative to the thresholds $\{ \hat\theta_j \}$ at $\mathtt{contact}_i = \mathtt{no}$ conditions. \item The location of the latent distribution has been shifted $+1.53 \sigma^*$ (scale units) at $\mathtt{contact}_i = \mathtt{yes}$ relative to $\mathtt{contact}_i = \mathtt{no}$. \item The odds ratio of bitterness being rated in category $j$ or above ($\mathrm{OR}(Y \geq j)$) is $\exp(\hat\beta_2(\mathtt{yes} - \mathtt{no})) = 4.61$. \end{enumerate} % Note that there are no $p$~values displayed for the threshold coefficients because it usually does not make sense to test if they equal zero. \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-fig2} \caption{Illustration of the effect of temperature in the standard cumulative link model in Equation~\ref{eq:CLM} for the wine data in Table~\ref{tab:wineData} through a latent variable interpretation.\label{fig:standard_clm}} \end{figure} The number of Newton-Raphson iterations is given below \code{niter} with the number of step-halvings in parenthesis. \code{max.grad} is the maximum absolute gradient of the log-likelihood function with respect to the parameters. % The condition number of the Hessian (\code{cond.H}) is well below $10^4$ and so does not indicate a problem with the model. The \code{anova} method produces an analysis of deviance (ANODE) tables also based on Wald $\chi^2$-tests and provides tables with type I, II and III hypothesis tests using the \proglang{SAS} definitions. A type I table, the \proglang{R} default for linear models fitted with \code{lm}, sequentially tests terms from first to last, type II tests attempt to respect the principle of marginality and test each term after all others while ignoring higher order interactions, and type III tables are based on orthogonalized contrasts and tests of main effects or lower order terms can often be interpreted as averaged over higher order terms. Note that in this implementation any type of contrasts (e.g., \code{contr.treatment} or \code{contr.SAS} as well as \code{contr.sum}) can be used to produce type III tests. For further details on the interpretation and definition of type I, II and III tests, please see \citep{kuznetsova17} and \citep{SAStype}. Here we illustrate with a type III ANODE table, which in this case is equivalent to type I and II tables since the variables are balanced: <<>>= anova(fm1, type="III") @ Likelihood ratio tests, though asymptotically equivalent to the Wald tests usually better reflect the evidence in the data. These tests can be obtained by comparing nested models with the \code{anova} method, for example, the likelihood ratio test of \code{contact} is <<>>= fm2 <- clm(rating ~ temp, data=wine) anova(fm2, fm1) @ which in this case produces a slightly lower $p$~value. Equivalently we can use \code{drop1} to obtain likelihood ratio tests of the explanatory variables while \emph{controlling} for the remaining variables: <<>>= drop1(fm1, test="Chi") @ Likelihood ratio tests of the explanatory variables while \emph{ignoring} the remaining variables are provided by the \code{add1} method (outputs not shown): <>= fm0 <- clm(rating ~ 1, data=wine) add1(fm0, scope = ~ temp + contact, test="Chi") @ % Confidence intervals of the parameter estimates are provided by the \code{confint} method which by default compute the so-called profile likelihood confidence intervals: <<>>= confint(fm1) @ The cumulative link model in Equation~\ref{eq:CLM} assumes that the thresholds, $\{\theta_j\}$ are constant for all values of the remaining explanatory variables, here \code{temp} and \code{contact}. This is generally referred to as the \emph{proportional odds assumption} or \emph{equal slopes assumption}. We can relax this assumption in two general ways: with nominal effects and scale effects examples of which we will now present in turn. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Partial and non-proportional odds: nominal effects} \label{sec:nominal-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The CLM in \eqref{eq:CLM} specifies a structure in which the regression parameters, $\bm\beta$ are not allowed to vary with $j$ or equivalently that the threshold parameters $\{\theta_j\}$ are not allowed to depend on regression variables. In the following model we relax this assumption and allow the threshold parameters to depend on \code{contact} which leads to the so-called partial proportional odds for \code{contact}: % \begin{equation} \label{eq:CLM_nominal} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j + \tilde{\beta}_{j} (\mathtt{contact}_i) - \beta (\mathtt{temp}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation} % One way to view this model is to think of two sets of thresholds being applied at conditions with and without contact as illustrated in Figure~\ref{fig:clm_nominal}. The model is specified as follows with \code{clm}: <<>>= fm.nom <- clm(rating ~ temp, nominal = ~ contact, data=wine) summary(fm.nom) @ As can be seen from the output of \code{summary} there are no regression coefficient estimated for \code{contact}, but there are additional threshold coefficients estimated instead. % The naming and meaning of the threshold coefficients depend on the contrast coding applied to \code{contact}. Here the \proglang{R} default treatment contrasts (\code{"contr.treatment"}) are used. Here coefficients translate to the following parameter functions: \begin{equation} \label{eq:nom_parameters} \begin{gathered} \hat\beta(\mathtt{warm} - \mathtt{cold})= 2.52, \\ \{\hat\theta_j\} = \{-1.32,~ 1.25,~ 3.55,~ 4.66\}, \\ \{ \hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}) \} = \{-1.62,~ -1.51,~ -1.67,~ -1.05\}. \end{gathered} \end{equation} % Again $\{ \theta_j \}$ refer to the thresholds at $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{no}$ settings while the thresholds at $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{yes}$ are $\{ \hat\theta_j + \hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}) \}$. % The odds ratio of bitterness being rated in category $j$ or above ($\mathrm{OR}(Y \geq j)$) now depends on $j$: $\{\exp(-\hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}))\} = \{ 5.03,~ 4.53,~ 5.34,~ 2.86\}$. % \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figNom2} \caption{Illustration of nominal effects leading to different sets of thresholds being applied for each level of \code{contact} in a latent variable interpretation, cf., Equation~\ref{eq:CLM_nominal}.\label{fig:clm_nominal}} \end{figure} The resulting thresholds for each level of \code{contact}, i.e., the estimated $\bm\Theta$-matrix can be extracted with: <<>>= fm.nom$Theta @ As part of the convergence checks, \code{clm} checks the validity of $\bm\Theta$, i.e., that each row of the threshold matrix is non-decreasing. We can perform a likelihood ratio test of the proportional odds assumption for \code{contact} by comparing the likelihoods of models \eqref{eq:CLM} and \eqref{eq:CLM_nominal} as follows: <<>>= anova(fm1, fm.nom) @ There is only little difference in the log-likelihoods of the two models and the test is insignificant. Thus there is no evidence that the proportional odds assumption is violated for \code{contact}. It is not possible to estimate both $\beta_2(\mathtt{contact}_i)$ and $\tilde{\beta}_{j}(\mathtt{contact}_i)$ in the same model. Consequently variables that appear in \code{nominal} cannot enter in \code{formula} as well. For instance, not all parameters are identifiable in the following model: <<>>= fm.nom2 <- clm(rating ~ temp + contact, nominal = ~ contact, data=wine) @ We are made aware of this when summarizing or printing the model in which the coefficient for \code{contactyes} is \code{NA}: <<>>= fm.nom2 @ To test the proportional odds assumption for all variables, we can use <<>>= nominal_test(fm1) @ This function moves all terms in \code{formula} and copies all terms in \code{scale} to \code{nominal} one by one and produces an \code{add1}-like table with likelihood ratio tests of each term. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Modelling scale effects} \label{sec:scale-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % To allow the scale of the latent variable distribution to depend on explanatory variables we could for instance consider the following model where the scale is allowed to differ between cold and warm conditions. The location of the latent distribution is allowed to depend on both temperature and contact: \begin{equation} \label{eq:CLM_scale_wine} \begin{gathered} \textup{logit}(P(Y_i \leq j)) = \frac{\theta_j - \beta_1 (\mathtt{temp}_i) - \beta_{2} (\mathtt{contact}_i)} {\exp( \zeta (\mathtt{temp}_i))} \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{gathered} \end{equation} This model structure is illustrated in Figure~\ref{fig:clm_scale} and can be estimated with: <<>>= fm.sca <- clm(rating ~ temp + contact, scale = ~ temp, data=wine) summary(fm.sca) @ In a latent variable interpretation the location of the latent distribution is shifted $2.63\sigma^*$ (scale units) from cold to warm conditions and $1.59\sigma^*$ from absense to presence of contact. The scale of the latent distribution is $\sigma^*$ at cold conditions but $\sigma^* \exp(\zeta(\mathtt{warm} - \mathtt{cold})) = \sigma^*\exp(0.095) = 1.10 \sigma^*$, i.e., 10\% higher, at warm conditions. However, observe that the $p$~value for the scale effect in the summary output shows that the ratio of scales is not significantly different from 1 (or equivalently that the difference on the log-scale is not different from 0). Scale effects offer an alternative to nominal effects (partial proportional odds) when non-proportional odds structures are encountered in the data. Using scale effects is often a better approach because the model is well-defined for all values of the explanatory variables irrespective of translocation and scaling of covariates. Scale effects also use fewer parameters which often lead to more sensitive tests than nominal effects. Potential scale effects of variables already included in \code{formula} can be discovered using \code{scale_test}. This function adds each model term in \code{formula} to \code{scale} in turn and reports the likelihood ratio statistic in an \code{add1} fashion: <<>>= scale_test(fm1) @ \code{confint} and \code{anova} methods apply with no change to models with scale and nominal parts, but \code{drop1}, \code{add1} and \code{step} methods will only drop or add terms to the (location) \code{formula}. \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figSca} \caption{Illustration of scale effects leading to different scales of the latent variable, cf., Equation~\ref{eq:CLM_scale_wine}.\label{fig:clm_scale}} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Structured thresholds} \label{sec:threshold-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In section~\ref{sec:nominal-effects} we described nominal effects where the assumption that regression parameters have the same effect across all thresholds was relaxed. In this section we will instead impose additional restrictions on the thresholds. In the following model we require that the thresholds, $\{ \theta_j \}$ are equidistant or equally spaced. This allows us to assess an assumption that judges are using the response scale in such a way that there is the same distance between adjacent response categories, i.e., that $\theta_j - \theta_{j-1} = \textup{constant}$ for $j = 2, ..., J-1$. The effect of equidistant thresholds is illustrated in Figure~\ref{fig:clm_structured_thresholds} and can be fitted with: <<>>= fm.equi <- clm(rating ~ temp + contact, data=wine, threshold="equidistant") summary(fm.equi) @ The parameters determining the thresholds are now the first threshold (\code{threshold.1}) and the spacing among consecutive thresholds (\code{spacing}). The mapping to this parameterization is stored in the transpose of the Jacobian matrix (\code{tJac}) component of the model fit. This makes it possible to extract the thresholds imposed by the equidistance structure with <<>>= drop(fm.equi$tJac %*% coef(fm.equi)[c("threshold.1", "spacing")]) @ These thresholds are in fact already stored in the \code{Theta} component of the model fit. % The following shows that the average distance between consecutive thresholds in \code{fm1} which did not restrict the thresholds is very close to the \code{spacing} parameter from \code{fm.equi}: <<>>= mean(diff(coef(fm1)[1:4])) @ One advantage of imposing additional restrictions on the thresholds is the use of fewer parameters. Whether the restrictions are warranted by the data can be assessed in a likelihood ratio test: <<>>= anova(fm1, fm.equi) @ In this case the test is non-significant, so there is no considerable loss of fit at the gain of saving two parameters, hence we may retain the model with equally spaced thresholds. Note that the shape of the latent distribution (determined by the choice of link function) also affects the distances between the thresholds. If thresholds are equidistant under a normal distribution (i.e., with the logit link) they will in general\footnote{The exception is perfect fits such as CLMs with flexible thresholds and no predictors where models have the same likelihood irrespective of link function.} not be equidistant under a differently shaped latent distribution such as a skew latent distribution (e.g., with the log-log or clog-log link). \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figFlex} \includegraphics[width=6cm]{./static_figs/fig-figEqui} \caption{Illustration of flexible (left) and equidistant (right) thresholds being applied in a cumulative link model in a latent variable interpretation.\label{fig:clm_structured_thresholds}} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Profile likelihood} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In addition to facilitating the generally quite accurate profile likelihood confidence intervals which were illustrated in section~\ref{sec:fitting-basic-clm}, the profile likelihood function can also be used to illustrate the relative importance of parameter values. As an example, the profile likelihood of model coefficients for \code{temp} and \code{contact} in \code{fm1} can be obtained with % <>= pr1 <- profile(fm1, alpha=1e-4) plot(pr1) @ The resulting plots are provided in Figure~\ref{fig:ProfileLikelihood}. The \code{alpha} argument controls how far from the maximum likelihood estimate the likelihood function should be profiled: the profile strays no further from the MLE when values outside an (\code{1 - alpha})-level profile likelihood confidence interval. From the relative profile likelihood in Figure~\ref{fig:ProfileLikelihood} for \code{tempwarm} we see that parameter values between 1 and 4 are reasonably well supported by the data, and values outside this range has little likelihood. Values between 2 and 3 are very well supported by the data and have high likelihood. \setkeys{Gin}{width=.45\textwidth} \begin{figure} \centering <>= plot(pr1, which.par=1) @ <>= plot(pr1, which.par=2) @ \caption{Relative profile likelihoods for the regression parameters in \code{fm1} for the wine data. Horizontal lines indicate 95\% and 99\% confidence bounds.} \label{fig:ProfileLikelihood} \end{figure} Profiling is implemented for regression ($\beta$) and scale ($\zeta$) parameters but not available for threshold, nominal and flexible link parameters. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Assessment of model convergence} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Likelihood slices} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The maximum likelihood estimates of the parameters in cumulative link models do not have closed form expressions, so iterative methods have to be applied to fit the models. Further, CLMs are non-linear models and in general the likelihood function is not garanteed to be well-behaved or even uni-model. In addition the special role of the threshold parameters and the restriction on them being ordered can affect the appearance of the likelihood function. To confirm that an unequivocal optimum has been reached and that the likelihood function is reasonably well-behaved around the reported optimum we can inspect the likelihood function in a neighborhood around the reported optimum. For these purposes we can display slices of the likelihood function. The following code produces the slices shown in Figure~\ref{fig:slice1} which displays the shape of the log-likelihood function in a fairly wide neighborhood around the reported MLE; here we use $\lambda=5$ curvature units, as well as it's quadratic approximation. <<>>= slice.fm1 <- slice(fm1, lambda = 5) par(mfrow = c(2, 3)) plot(slice.fm1) @ Figure~\ref{fig:slice1} shows that log-likelihood function is fairly well behaved and relatively closely quadratic for most parameters. \setkeys{Gin}{width=.32\textwidth} \begin{figure} \centering <>= plot(slice.fm1, parm = 1) @ <>= plot(slice.fm1, parm = 2) @ <>= plot(slice.fm1, parm = 3) @ <>= plot(slice.fm1, parm = 4) @ <>= plot(slice.fm1, parm = 5) @ <>= plot(slice.fm1, parm = 6) @ \caption{Slices of the (negative) log-likelihood function (solid) for parameters in \code{fm1} for the wine data. Dashed lines indicate quadratic approximations to the log-likelihood function and vertical bars indicate maximum likelihood estimates.} \label{fig:slice1} \end{figure} Looking at the log-likelihood function much closer to the reported optimum (using $\lambda = 10^{-5}$) we can probe how accurately the parameter estimates are determined. The likelihood slices in Figure~\ref{fig:slice2} which are produced with the following code shows that the parameters are determined accurately with at least 5 correct decimals. Slices are shown for two parameters and the slices for the remaining 4 parameters are very similar. <>= slice2.fm1 <- slice(fm1, parm = 4:5, lambda = 1e-5) par(mfrow = c(1, 2)) plot(slice2.fm1) @ \setkeys{Gin}{width=.45\textwidth} \begin{figure} \centering <>= plot(slice2.fm1, parm = 1) @ <>= plot(slice2.fm1, parm = 2) @ \caption{Slices of the (negative) log-likelihood function (solid) for parameters in \code{fm1} for the wine data very close to the MLEs. Dashed lines (indistinguashable from the solid lines) indicate quadratic approximations to the log-likelihood function and vertical bars the indicate maximum likelihood estimates.} \label{fig:slice2} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Parameter accuracy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% As discussed in section~\ref{sec:algorithm} the method independent error estimate provides an assessment of the accucary with which the ML estimates of the parameters have been determined by the fitting algorithm. This error estimate is implemented in the \code{convergence} method which we now illustrate on a model fit: <<>>= convergence(fm1) @ The most important information is the number of correct decimals (\code{Cor.Dec}) and the number of significant digits (\code{Sig.Dig}) with which the parameters are determined. In this case all parameters are very accurately determined, so there is no reason to lower the convergence tolerance. The \code{logLik.error} shows that the error in the reported value of the log-likelihood is below $10^{-10}$, which is by far small enough that likelihood ratio tests based on this model are accurate. Note that the assessment of the number of correctly determined decimals and significant digits is only reliable sufficiently close to the optimum so in practice we caution against this assessment if the algorithm did not converge successfully. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Fitted values and predictions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Several types of fitted values and predictions can be extracted from a CLM depending on how it is viewed. By \emph{fitted values} we denote the values ($i=1, \ldots, n$) \begin{equation*} \hat{\tilde\pi}_i = \tilde\pi_i(\hat{\bm\psi}) \end{equation*} that is, the value of $\tilde\pi_i$, cf., Equation~\ref{eq:clm-log-likelihood} evaluated at the ML estimates $\hat{\bm\psi}$. These are the values returned by the \code{fitted} and \code{fitted.values} extractor methods and stored in the \code{fitted.values} component of the model fit. The values of $\pi_{ij}$ (cf., Equation~\ref{eq:multinom_pmf}) evaluated at the ML estimates of the parameters (i.e., $\hat\pi_{ij}$) can also be thought of as fitted values for the multinomially distributed variable $\bm Y_i^*$. These values can be obtained from the model fit by use of the \code{predict} method: <<>>= head(pred <- predict(fm1, newdata = subset(wine, select = -rating))$fit) @ Note that the original data set should be supplied in the \code{newdata} argument \emph{without} the response variable (here \code{rating}). If the response variable is \emph{present} in \code{newdata} predictions are produced for only those rating categories which were observed and we get back the fitted values: <<>>= stopifnot(isTRUE(all.equal(fitted(fm1), t(pred)[t(col(pred) == wine$rating)])), isTRUE(all.equal(fitted(fm1), predict(fm1, newdata=wine)$fit))) @ Class predictions are also available and defined here as the response class with the highest probability, that is, for the $i$'th observation the class prediction is the mode of $\bm\pi_{i}$. To obtain class predictions use \code{type = "class"} as illustrated in the following small table: <<>>= newData <- expand.grid(temp = levels(wine$temp), contact = levels(wine$contact)) cbind(newData, round(predict(fm1, newdata=newData)$fit, 3), "class"=predict(fm1, newdata=newData, type="class")$fit) @ Other definitions of class predictions can be applied, e.g., nearest mean predictions: <<>>= head(apply(pred, 1, function(x) round(weighted.mean(1:5, x)))) @ which in this case happens to be identical to the default class predictions. <>= p1 <- apply(predict(fm1, newdata = subset(wine, select=-rating))$fit, 1, function(x) round(weighted.mean(1:5, x))) p2 <- as.numeric(as.character(predict(fm1, type="class")$fit)) stopifnot(isTRUE(all.equal(p1, p2, check.attributes = FALSE))) @ Standard errors and confidence intervals of predictions are also available, for example: <<>>= predictions <- predict(fm1, se.fit=TRUE, interval=TRUE) head(do.call("cbind", predictions)) @ where the default 95\% confidence level can be changed with the \code{level} argument. Here the standard errors of fitted values or predictions, $\hat{\tilde{\pi}} = \tilde{\pi}(\hat{\bm\psi})$ are obtained by application of the delta method: \begin{equation*} \mathsf{Var}(\hat{\tilde{\bm\pi}}) = \bm C \mathsf{Var}(\hat{\bm\psi}) \bm C^\top, \quad \bm C = \frac{\partial \tilde{\bm\pi}(\bm\psi)}{\partial \bm\psi} \Big|_{\bm\psi = \hat{\bm\psi}} \end{equation*} where $\mathsf{Var}(\hat{\bm\psi})$ is the estimated variance-covariance matrix of the parameters $\bm\psi$ evaluated at the ML estimates $\hat{\bm\psi}$ as given by the observed Fisher Information matrix and finally the standard errors are extracted as the squareroot of the diagonal elements of $\mathsf{Var}(\hat{\tilde{\bm\pi}})$. Since symmetric confidence intervals for probabilities are not appropriate unless perhaps if they are close to one half a more generally applicable approach is to form symmetric Wald intervals on the logit scale and then subsequently transform the confindence bounds to the probability scale. \code{predict.clm} takes this approach and computes the standard error of $\hat\kappa_i = \mathrm{logit}(\hat{\tilde{\pi}}_i)$ by yet an application of the delta method: \begin{equation*} \mathrm{se}(\hat\kappa_i) = \frac{\partial g(\hat{\tilde{\pi}}_i)}{\partial \hat{\tilde{\pi}}_i} \mathrm{se}(\hat{\tilde{\pi}}_i) = \frac{\mathrm{se}(\hat{\tilde{\pi}}_i)}{% \hat{\tilde{\pi}}_i(1 - \hat{\tilde{\pi}}_i)}, \quad g(\hat{\tilde{\pi}}_i) = \log \frac{\hat{\tilde{\pi}}_i}{1 - \hat{\tilde{\pi}}_i}. \end{equation*} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Model identifiability} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Unidentifiable models or unidentifiable parameters may happen in CLMs for several reasons some of which are special to the model class. In this section we describe issues around model identifiability and how this is handled by \code{ordinal::clm}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Complete separation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In binary logistic regression the issue of \emph{complete separation} is well known. This may happen, for example if only ``success'' or only ``failure'' is observed for a level of a treatment factor. In CLMs the issue may appear even when outcomes are observed in more than one response category. This can be illustrated using the \code{wine} dataset if we combine the three central categories: <<>>= wine <- within(wine, { rating_comb3 <- rating levels(rating_comb3) <- c("1", "2-4", "2-4", "2-4", "5") }) ftable(rating_comb3 ~ temp, data=wine) fm.comb3 <- clm(rating_comb3 ~ temp, data=wine) summary(fm.comb3) @ Here the true ML estimates of the coefficients for \code{temp} and the second threshold are at infinity but the algorithm in \code{clm} terminates when the likelihood function is sufficiently flat. This means that the reported values of the coefficients for \code{temp} and the second threshold are arbitrary and will change if the convergence criteria are changed or a different optimization method is used. The standard errors of the coefficients are not available because the Hessian is effectively singular and so cannot be inverted to produce the variance-covariance matrix of the parameters. The ill-determined nature of the Hessian is seen from the very large condition number of the Hessian, \code{cond.H}. Note, however, that while the model parameters cannot be uniquely determined, the likelihood of the model is well defined and as such it can be compared to the likelihood of other models. For example, we could compare it to a model that excludes \code{temp} <<>>= fm.comb3_b <- clm(rating_comb3 ~ 1, data=wine) anova(fm.comb3, fm.comb3_b) @ The difference in log-likelihood is substantial, however, the criteria for the validity of the likelihood ratio test are not fulfilled, so the $p$~value should not be taken at face value. The complete-separation issue may also appear in less obvious situations. If, for example we consider the following model allowing for nominal effects of \code{temp} the issue shows up: <<>>= fm.nom2 <- clm(rating ~ contact, nominal = ~ temp, data=wine) summary(fm.nom2) @ Analytical detection of which coefficients suffer from unidentifiability due to \emph{complete separation} is a topic for future research and therefore unavailable in current versions of \pkg{ordinal}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Aliased coefficients} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Aliased coefficients can occurs in all kinds of models that build on a design matrix including linear models as well as generalized linear models. \code{lm} and \code{glm} determine the rank deficiency of the design matrix using the rank-revealing implementation of the QR-decomposition in \code{LINPACK} and displays the aliased coefficients as \code{NA}s\footnote{if the \code{singular.ok = TRUE} which is default}. Though the QR decomposition is not used during iterations in \code{clm}, it used initially to determine aliased coeffients. An example is provided using the \code{soup} data available in the \pkg{ordinal} package: <<>>= fm.soup <- clm(SURENESS ~ PRODID * DAY, data=soup) summary(fm.soup) @ The source of the singularity is revealed in the following table: <<>>= with(soup, table(DAY, PRODID)) @ which shows that the third \code{PRODID} was not presented at the third day. The issue of aliased coefficients extends in CLMs to nominal effects since the joint design matrix for location and nominal effects will be singular if the same variables are included in both location and nominal formulae. \code{clm} handles this by not estimating the offending coefficients in the location formula as illustrated with the \code{fm.nom2} model fit in section~\ref{sec:nominal-effects}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Over parameterization} \label{sec:over-parameterization} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The scope of model structures allowed in \code{clm} makes it possible to specify models which are over parameterized in ways that do not lead to rank deficient design matrices and as such are not easily detected before fitting the model. An example is given here which includes both additive (location) and multiplicative (scale) effects of \code{contact} for a binomial response variable but the issue can also occur with more than two response categories: <<>>= wine <- within(wine, { rating_comb2 <- rating levels(rating_comb2) <- c("1-2", "1-2", "3-5", "3-5", "3-5") }) ftable(rating_comb2 ~ contact, data=wine) fm.comb2 <- clm(rating_comb2 ~ contact, scale = ~ contact, data=wine) summary(fm.comb2) @ <>= ## Example with unidentified parameters with 3 response categories ## not shown in paper: wine <- within(wine, { rating_comb3b <- rating levels(rating_comb3b) <- c("1-2", "1-2", "3", "4-5", "4-5") }) wine$rating_comb3b[1] <- "4-5" # Need to remove the zero here to avoid inf MLE ftable(rating_comb3b ~ temp + contact, data=wine) fm.comb3_c <- clm(rating_comb3b ~ contact * temp, scale=~contact * temp, nominal=~contact, data=wine) summary(fm.comb3_c) convergence(fm.comb3_c) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Customized modelling} \label{sec:customized-modelling} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Using the \code{doFit} argument \code{clm} can be instructed to return a \emph{model environment} that we denote \code{rho}: <<>>= rho <- update(fm1, doFit=FALSE) names(rho) @ This environment holds a complete specification of the cumulative link models including design matrices \code{B1}, \code{B2}, \code{S} and other components. The environment also contains the cumulative distribution function that defines the inverse link function \code{pfun} and its first and second derivatives, i.e., the corresponding density function \code{dfun} and gradient \code{gfun}. Of direct interest here is the parameter vector \code{par} and functions that readily evaluate the negative log-likelihood (\code{clm.nll}), its gradient with respect to the parameters (\code{clm.grad}) and the Hessian (\code{clm.hess}). The negative log-likelihood and the gradient a the starting values is therefore <<>>= rho$clm.nll(rho) c(rho$clm.grad(rho)) @ Similarly at the MLE they are: <<>>= rho$clm.nll(rho, par=coef(fm1)) print(c(rho$clm.grad(rho)), digits=3) @ Note that the gradient function \code{clm.grad} assumes that \code{clm.nll} has been evaluated at the current parameter values; similarly, \code{clm.hess} assumes that \code{clm.grad} has been evaluated at the current parameter values. The NR algorithm in \pkg{ordinal} takes advantage of this so as to minimize the computational load. If interest is in fitting a \emph{custom} CLM with, say, restrictions on the parameter space, this can be achived by a combination of a general purpose optimizer and the functions \code{clm.nll} and optionally \code{clm.grad}. Assume for instance we know that the regression parameters can be no larger than 2, then the model can be fitted with the following code: <<>>= nll <- function(par, envir) { envir$par <- par envir$clm.nll(envir) } grad <- function(par, envir) { envir$par <- par envir$clm.nll(envir) envir$clm.grad(envir) } nlminb(rho$par, nll, grad, upper=c(rep(Inf, 4), 2, 2), envir=rho)$par @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Constrained partial proportional odds} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A type of models which are not implemented in full generality in \pkg{ordinal} are the so-called \emph{constrained} partial proportional odds models proposed by \citet{peterson90}. These models impose restrictions on the nominal effects considered in section~\ref{sec:nominal-effects} and are well suited to illustrate the costumizable modelling options available in the \pkg{ordinal} package. We consider an example from \citet{peterson90} in which disease status is tabulated by smoking status: <<>>= artery <- data.frame(disease = factor(rep(0:4, 2), ordered = TRUE), smoker = factor(rep(c("no", "yes"), each=5)), freq = c(334, 99, 117, 159, 30, 350, 307, 345, 481, 67)) addmargins(xtabs(freq ~ smoker + disease, data = artery), margin = 2) @ The overall odds-ratio of smoking is <<>>= fm <- clm(disease ~ smoker, weights=freq, data=artery) exp(fm$beta) @ showing that overall the odds of worse disease rating is twice as high for smokers compared to non-smokers. Allowing for nominal effects we see that the log odds-ratio for smoking clearly changes with disease status, and that it does so in an almost linearly decreasing manor: <<>>= fm.nom <- clm(disease ~ 1, nominal = ~ smoker, weights=freq, data=artery, sign.nominal = "negative") coef(fm.nom)[5:8] @ \citet{peterson90} suggested a model which restricts the log odds-ratios to be linearly decreasing with disease status modelling only the intercept (first threshold) and slope of the log odds-ratios: <<>>= coef(fm.lm <- lm(I(coef(fm.nom)[5:8]) ~ I(0:3))) @ We can implement the log-likelihood of this model as follows. As starting values we combine parameter estimates from \code{fm.nom} and the linear model \code{fm.lm}, and finally optimize the log-likelihood utilising the \code{fm.nom} model environment: <<>>= nll2 <- function(par, envir) { envir$par <- c(par[1:4], par[5] + par[6] * (0:3)) envir$clm.nll(envir) } start <- unname(c(coef(fm.nom)[1:4], coef(fm.lm))) fit <- nlminb(start, nll2, envir=update(fm.nom, doFit=FALSE)) round(fit$par[5:6], 2) @ Thus the log-odds decrease linearly from 1.02 for the first two disease categories by 0.3 per disease category. %% -- Illustrations ------------------------------------------------------------ %% - Virtually all JSS manuscripts list source code along with the generated %% output. The style files provide dedicated environments for this. %% - In R, the environments {Sinput} and {Soutput} - as produced by Sweave() or %% or knitr using the render_sweave() hook - are used (without the need to %% load Sweave.sty). %% - Equivalently, {CodeInput} and {CodeOutput} can be used. %% - The code input should use "the usual" command prompt in the respective %% software system. %% - For R code, the prompt "R> " should be used with "+ " as the %% continuation prompt. %% - Comments within the code chunks should be avoided - these should be made %% within the regular LaTeX text. %% -- Summary/conclusions/discussion ------------------------------------------- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Conclusions} \label{sec:conclusions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Cumulative link models is a very rich model class in between, in a sense, the perhaps the two most important model classes in statistics; linear models and logistic regression models. The greater flexibility of CLMs relative to binary logistic regression models also facilitates to the ability to check assumptions such as the partial proportional odds assumption. Non-linear structures such as scale effects arise naturally in a latent variable interpretation. In addition to nominal effects and the non-linear scale effects, the ordered nature of the thresholds gives rise to computational challanges which we have described and addressed in the \pkg{ordinal} package. In addition to computational challenges, practical data analysis with CLMs can also be challanging. In our experience a top-down approach in which a ``full'' model is fitted and gradually simplified is often problematic, not only because this easily leads to unidentifiable models but also because there are many different ways in which models can be reduced or expanded. A more pragmatic approach is often preferred; understanding the data through plots, tables, and even linear models can aid in finding a suitable intermediate ordinal starting model. Attempts to identify a ``correct'' model will also often lead to frustrations; the greater the model framework, the greater the risk that there are multiple models which fit the data (almost) equally well. It is well known statistical wisdom that with enough data many goodness of fit tests become sensitive to even minor deviations of little practical relevance. This is particularly true for tests of partial proportional odds; in the author's experience almost all CLMs on real data show some evidence of non-proportional odds for one or more variables while the most useful representation of the data is often a model that simply assumes proportional odds. %% -- Optional special unnumbered sections ------------------------------------- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section*{Computational details} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % \begin{leftbar} % If necessary or useful, information about certain computational details % such as version numbers, operating systems, or compilers could be included % in an unnumbered section. Also, auxiliary packages (say, for visualizations, % maps, tables, \dots) that are not cited in the main text can be credited here. % \end{leftbar} The results in this paper were obtained using \proglang{R}~\Sexpr{paste(R.Version()[6:7], collapse = ".")} with \pkg{ordinal}, version~\Sexpr{packageVersion("ordinal")}. \proglang{R} itself and all packages used are available from the Comprehensive \proglang{R} Archive Network (CRAN) at \url{https://CRAN.R-project.org/}. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \section*{Acknowledgments} % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % \begin{leftbar} % % All acknowledgments (note the AE spelling) should be collected in this % unnumbered section before the references. It may contain the usual information % about funding and feedback from colleagues/reviewers/etc. Furthermore, % information such as relative contributions of the authors may be added here % (if any). % \end{leftbar} %% -- Bibliography ------------------------------------------------------------- %% - References need to be provided in a .bib BibTeX database. %% - All references should be made with \cite, \citet, \citep, \citealp etc. %% (and never hard-coded). See the FAQ for details. %% - JSS-specific markup (\proglang, \pkg, \code) should be used in the .bib. %% - Titles in the .bib should be in title case. %% - DOIs should be included where available. \bibliography{clm_article_refs} %% -- Appendix (if any) -------------------------------------------------------- %% - After the bibliography with page break. %% - With proper section titles and _not_ just "Appendix". % \newpage % \begin{appendix} % % \section{More technical details} \label{app:technical} % % \begin{leftbar} % Appendices can be included after the bibliography (with a page break). Each % section within the appendix should have a proper section title (rather than % just \emph{Appendix}). % % For more technical style details, please check out JSS's style FAQ at % \url{https://www.jstatsoft.org/pages/view/style#frequently-asked-questions} % which includes the following topics: % \begin{itemize} % \item Title vs.\ sentence case. % \item Graphics formatting. % \item Naming conventions. % \item Turning JSS manuscripts into \proglang{R} package vignettes. % \item Trouble shooting. % \item Many other potentially helpful details\dots % \end{itemize} % \end{leftbar} % % % \section[Using BibTeX]{Using \textsc{Bib}{\TeX}} \label{app:bibtex} % % \begin{leftbar} % References need to be provided in a \textsc{Bib}{\TeX} file (\code{.bib}). All % references should be made with \verb|\cite|, \verb|\citet|, \verb|\citep|, % \verb|\citealp| etc.\ (and never hard-coded). This commands yield different % formats of author-year citations and allow to include additional details (e.g., % pages, chapters, \dots) in brackets. In case you are not familiar with these % commands see the JSS style FAQ for details. % % Cleaning up \textsc{Bib}{\TeX} files is a somewhat tedious task -- especially % when acquiring the entries automatically from mixed online sources. However, % it is important that informations are complete and presented in a consistent % style to avoid confusions. JSS requires the following format. % \begin{itemize} % \item JSS-specific markup (\verb|\proglang|, \verb|\pkg|, \verb|\code|) should % be used in the references. % \item Titles should be in title case. % \item Journal titles should not be abbreviated and in title case. % \item DOIs should be included where available. % \item Software should be properly cited as well. For \proglang{R} packages % \code{citation("pkgname")} typically provides a good starting point. % \end{itemize} % \end{leftbar} % % \end{appendix} %% ----------------------------------------------------------------------------- \end{document} ordinal/vignettes/ordinal.bib0000644000176200001440000002472112431434103016021 0ustar liggesusers@Book{brazzale07, author = {A R Brazzale and A C Davison and N Reid}, title = {Applied Asymptotics---case studies in small-sample statistics} , publisher = {Cambridge University Press}, year = 2007} @Book{pawitan01, author = {Yudi Pawitan}, title = {{In All Likelihood---Statistical Modelling and Inference Using Likelihood}}, publisher = {Oxford University Press}, year = 2001 } @Manual{R11, title = {R: A Language and Environment for Statistical Computing}, author = {{R Development Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2011}, note = {{ISBN} 3-900051-07-0}, url = {http://www.R-project.org/}, } @Article{tutz96, author = {Gerhard Tutz and Wolfgang Hennevogl}, title = {Random effects in ordinal regression models}, journal = {Computational Statistics \& Data Analysis}, year = 1996, volume = 22, pages = {537-557} } @Article{efron78, author = {Bradley Efron and David V Hinkley}, title = {{Assessing the accuracy of the maximum likelihood estimator: Observed versus expected Fisher information}}, journal = {Biometrika}, year = 1978, volume = 65, number = 3, pages = {457-487}} @article{bauer09, author = {Bauer, Daniel}, affiliation = {University of North Carolina Department of Psychology Chapel Hill NC 27599-3270 USA}, title = {A Note on Comparing the Estimates of Models for†Cluster-Correlated or Longitudinal Data with Binary or Ordinal†Outcomes}, journal = {Psychometrika}, publisher = {Springer New York}, issn = {0033-3123}, keyword = {Humanities, Social Sciences and Law}, pages = {97-105}, volume = {74}, issue = {1}, url = {http://dx.doi.org/10.1007/s11336-008-9080-1}, year = {2009} } @article{fielding04, author = {Fielding, Antony}, title = {Scaling for Residual Variance Components of Ordered Category Responses in Generalised Linear Mixed Multilevel Models}, journal = {Quality \& Quantity}, publisher = {Springer Netherlands}, issn = {0033-5177}, keyword = {Humanities, Social Sciences and Law}, pages = {425-433}, volume = {38}, issue = {4}, url = {http://dx.doi.org/10.1023/B:QUQU.0000043118.19835.6c}, year = {2004} } @article{winship84, jstor_articletype = {research-article}, title = {Regression Models with Ordinal Variables}, author = {Winship, Christopher and Mare, Robert D.}, journal = {American Sociological Review}, jstor_issuetitle = {}, volume = {49}, number = {4}, jstor_formatteddate = {Aug., 1984}, pages = {512-525}, url = {http://www.jstor.org/stable/2095465}, ISSN = {00031224}, abstract = {Most discussions of ordinal variables in the sociological literature debate the suitability of linear regression and structural equation methods when some variables are ordinal. Largely ignored in these discussions are methods for ordinal variables that are natural extensions of probit and logit models for dichotomous variables. If ordinal variables are discrete realizations of unmeasured continuous variables, these methods allow one to include ordinal dependent and independent variables into structural equation models in a way that (1) explicitly recognizes their ordinality, (2) avoids arbitrary assumptions about their scale, and (3) allows for analysis of continuous, dichotomous, and ordinal variables within a common statistical framework. These models rely on assumed probability distributions of the continuous variables that underly the observed ordinal variables, but these assumptions are testable. The models can be estimated using a number of commonly used statistical programs. As is illustrated by an empirical example, ordered probit and logit models, like their dichotomous counterparts, take account of the ceiling and floor restrictions on models that include ordinal variables, whereas the linear regression model does not.}, language = {English}, year = {1984}, publisher = {American Sociological Association}, copyright = {Copyright © 1984 American Sociological Association}, } @article{thompson81, jstor_articletype = {research-article}, title = {Composite Link Functions in Generalized Linear Models}, author = {Thompson, R. and Baker, R. J.}, journal = {Journal of the Royal Statistical Society. Series C (Applied Statistics)}, jstor_issuetitle = {}, volume = {30}, number = {2}, jstor_formatteddate = {1981}, pages = {125-131}, url = {http://www.jstor.org/stable/2346381}, ISSN = {00359254}, abstract = {In generalized linear models each observation is linked with a predicted value based on a linear function of some systematic effects. We sometimes require to link each observation with a linear function of more than one predicted value. We embed such models into the generalized linear model framework using composite link functions. The computer program GLIM-3 can be used to fit these models. Illustrative examples are given including a mixed-up contingency table and grouped normal data.}, language = {English}, year = {1981}, publisher = {Blackwell Publishing for the Royal Statistical Society}, copyright = {Copyright © 1981 Royal Statistical Society}, } @article{burridge81, jstor_articletype = {research-article}, title = {A Note on Maximum Likelihood Estimation for Regression Models Using Grouped Data}, author = {Burridge, J.}, journal = {Journal of the Royal Statistical Society. Series B (Methodological)}, jstor_issuetitle = {}, volume = {43}, number = {1}, jstor_formatteddate = {1981}, pages = {41-45}, url = {http://www.jstor.org/stable/2985147}, ISSN = {00359246}, abstract = {The estimation of parameters for a class of regression models using grouped or censored data is considered. It is shown that with a simple reparameterization some commonly used distributions, such as the normal and extreme value, result in a log-likelihood which is concave with respect to the transformed parameters. Apart from its theoretical implications for the existence and uniqueness of maximum likelihood estimates, this result suggests minor changes to some commonly used algorithms for maximum likelihood estimation from grouped data. Two simple examples are given.}, language = {English}, year = {1981}, publisher = {Blackwell Publishing for the Royal Statistical Society}, copyright = {Copyright © 1981 Royal Statistical Society}, } @article{pratt81, jstor_articletype = {research-article}, title = {Concavity of the Log Likelihood}, author = {Pratt, John W.}, journal = {Journal of the American Statistical Association}, jstor_issuetitle = {}, volume = {76}, number = {373}, jstor_formatteddate = {Mar., 1981}, pages = {103-106}, url = {http://www.jstor.org/stable/2287052}, ISSN = {01621459}, abstract = {For a very general regression model with an ordinal dependent variable, the log likelihood is proved concave if the derivative of the underlying response function has concave logarithm. For a binary dependent variable, a weaker condition suffices, namely, that the response function and its complement each have concave logarithm. The normal, logistic, sine, and extreme-value distributions, among others, satisfy the stronger condition, the t (including Cauchy) distributions only the weaker. Some converses and generalizations are also given. The model is that which arises from an ordinary linear regression model with a continuous dependent variable that is partly unobservable, being either grouped into intervals with unknown endpoints, or censored, or, more generally, grouped in some regions, censored in others, and observed exactly elsewhere.}, language = {English}, year = {1981}, publisher = {American Statistical Association}, copyright = {Copyright © 1981 American Statistical Association}, } @Manual{christensen11, title = {Analysis of ordinal data with cumulative link models --- estimation with the \textsf{ordinal} package}, author = {Rune Haubo Bojesen Christensen}, note = {R-package version 2011.09-13}, year = 2011} @Book{agresti10, author = {Alan Agresti}, title = {Analysis of ordinal categorical data}, publisher = {Wiley}, year = 2010, edition = {2nd}} @Book{agresti02, author = {Alan Agresti}, title = {Categorical Data Analysis}, publisher = {Wiley}, year = 2002, edition = {2nd} } @Article{mccullagh80, author = {Peter McCullagh}, title = {Regression Models for Ordinal Data}, journal = {Journal of the Royal Statistical Society, Series B}, year = 1980, volume = 42, pages = {109-142} } @Article{randall89, author = {J.H. Randall}, title = {The Analysis of Sensory Data by Generalised Linear Model}, journal = {Biometrical journal}, year = 1989, volume = 7, pages = {781-793} } @Book{fahrmeir01, author = {Ludwig Fahrmeir and Gerhard Tutz}, title = {Multivariate Statistical Modelling Based on Generalized Linear Models}, publisher = {Springer-Verlag New York, Inc.}, year = 2001, series = {Springer series in statistics}, edition = {2nd} } @Book{greene10, author = {William H Greene and David A Hensher}, title = {Modeling Ordered Choices: A Primer}, publisher = {Cambridge University Press}, year = 2010} @Book{mccullagh89, author = {Peter McCullagh and John Nelder}, title = {Generalized Linear Models}, publisher = {Chapman \& Hall/CRC}, year = 1989, edition = {Second} } @Book{collett02, author = {David Collett}, title = {Modelling binary data}, publisher = {London: Chapman \& Hall/CRC}, year = 2002, edition = {2nd} } ordinal/vignettes/clm_article_refs.bib0000644000176200001440000002511713340235161017671 0ustar liggesusers@Misc{ordinal-pkg, title = {ordinal---Regression Models for Ordinal Data }, author = {R. H. B. Christensen}, year = {2018}, note = {R package version 2018.8-25}, url = {http://www.cran.r-project.org/package=ordinal/}, } @article{kuznetsova17, author = {Alexandra Kuznetsova and Per Brockhoff and Rune Christensen}, title = {lmerTest Package: Tests in Linear Mixed Effects Models}, journal = {Journal of Statistical Software, Articles}, volume = {82}, number = {13}, year = {2017}, keywords = {denominator degree of freedom, Satterthwaite's approximation, ANOVA, R, linear mixed effects models, lme4}, abstract = {One of the frequent questions by users of the mixed model function lmer of the lme4 package has been: How can I get p values for the F and t tests for objects returned by lmer? The lmerTest package extends the 'lmerMod' class of the lme4 package, by overloading the anova and summary functions by providing p values for tests for fixed effects. We have implemented the Satterthwaite's method for approximating degrees of freedom for the t and F tests. We have also implemented the construction of Type I - III ANOVA tables. Furthermore, one may also obtain the summary as well as the anova table using the Kenward-Roger approximation for denominator degrees of freedom (based on the KRmodcomp function from the pbkrtest package). Some other convenient mixed model analysis tools such as a step method, that performs backward elimination of nonsignificant effects - both random and fixed, calculation of population means and multiple comparison tests together with plot facilities are provided by the package as well.}, issn = {1548-7660}, pages = {1--26}, doi = {10.18637/jss.v082.i13}, url = {https://www.jstatsoft.org/v082/i13} } @Article{cox95, author = {Christopher Cox}, title = {Location-Scale Cumulative Odds Models for Ordinal Data: A Generalized Non-Linear Model Approach}, journal = {Statistics in Medicine}, year = 1995, volume = 14, doi = {10.1002/sim.4780141105}, pages = {1191-1203} } @Book{elden04, author = {Lars Eld\'en and Linde Wittmeyer-Koch and Hans Bruun Nielsen}, title = {Introduction to Numerical Computation --- Analysis and MATLAB Illustrations}, publisher = {Studentlitteratur}, ISBN = {978-9144037271}, year = 2004} @Article{farewell77, author = {Vernon T Farewell and R L Prentice}, title = {A Study of Distributional Shape in Life Testing}, journal = {{Technometrics}}, year = 1977, volume = 19, doi = {10.2307/1268257}, pages = {69-77}} @Article{genter85, author = {Frederic C Genter and Vernon T Farewell}, title = {Goodness-of-Link Testing in Ordinal Regression Models}, journal = {{The Canadian Journal of Statistics}}, year = 1985, volume = 13, number = 1, doi = {10.2307/3315165}, pages = {37-44}, } @Article{aranda-ordaz83, author = {Francisco J Aranda-Ordaz}, title = {An Extension of the Proportional-Hazards Model for Grouped Data}, journal = {Biometrics}, year = 1983, volume = 39, doi = {10.2307/2530811}, pages = {109-117}} @Article{peterson90, author = {Bercedis Peterson and Frank E {Harrell Jr.}}, title = {Partial Proportional Odds Models for Ordinal Response Variables}, journal = {Applied Statistics}, year = 1990, volume = 39, doi = {10.2307/2347760}, pages = {205-217} } @Article{peterson92, author = {Bercedis Peterson and Frank E {Harrell Jr.}}, title = {Proportional Odds Model}, journal = {Biometrics}, year = 1992, month = {March}, note = {Letters to the Editor} } @Book{brazzale07, author = {A R Brazzale and A C Davison and N Reid}, title = {Applied Asymptotics---Case Studies in Small-Sample Statistics}, ISBN = {9780521847032}, publisher = {Cambridge University Press}, year = 2007} @Book{pawitan01, author = {Yudi Pawitan}, title = {{In All Likelihood---Statistical Modelling and Inference Using Likelihood}}, publisher = {Oxford University Press}, ISBN = {978-0198507659}, year = 2001 } @Article{efron78, author = {Bradley Efron and David V Hinkley}, title = {{Assessing the Accuracy of the Maximum Likelihood Estimator: Observed versus Expected Fisher Information}}, journal = {Biometrika}, year = 1978, volume = 65, number = 3, doi = {10.1093/biomet/65.3.457}, pages = {457-487}, } @article{burridge81, title = {A Note on Maximum Likelihood Estimation for Regression Models Using Grouped Data}, author = {Burridge, J.}, journal = {Journal of the Royal Statistical Society. Series B (Methodological)}, volume = {43}, number = {1}, pages = {41-45}, ISSN = {00359246}, language = {English}, year = {1981}, publisher = {Blackwell Publishing for the Royal Statistical Society}, } @article{pratt81, title = {Concavity of the Log Likelihood}, author = {Pratt, John W.}, journal = {Journal of the American Statistical Association}, volume = {76}, number = {373}, pages = {103-106}, ISSN = {01621459}, language = {English}, year = {1981}, doi = {10.2307/2287052}, } @Book{agresti10, author = {Alan Agresti}, title = {Analysis of ordinal categorical data}, publisher = {Wiley}, year = 2010, edition = {2nd}, doi = {10.1002/9780470594001} } @Book{agresti02, author = {Alan Agresti}, title = {Categorical Data Analysis}, publisher = {Wiley}, year = 2002, edition = {3rd}, ISBN = {978-0470463635}, } @Article{mccullagh80, author = {Peter McCullagh}, title = {Regression Models for Ordinal Data}, journal = {Journal of the Royal Statistical Society, Series B}, year = 1980, volume = 42, pages = {109-142} } @Article{randall89, author = {J.H. Randall}, title = {The Analysis of Sensory Data by Generalised Linear Model}, journal = {Biometrical journal}, year = 1989, volume = 7, pages = {781-793}, doi = {10.1002/bimj.4710310703}, } @phdthesis{mythesis, title = "Sensometrics: Thurstonian and Statistical Models", author = "Christensen, Rune Haubo Bojesen", year = "2012", publisher = "Technical University of Denmark (DTU)", school = "Technical University of Denmark (DTU)", url = "http://orbit.dtu.dk/files/12270008/phd271_Rune_Haubo_net.pdf" } @Manual{SAStype, title = {The Four Types of Estimable Functions -- \proglang{SAS/STAT} \textregistered 9.22 User's Guide}, author = {\proglang{SAS} Institute Inc.}, organization = {\proglang{SAS} Institute Inc.}, address = {Cary, NC}, year = {2008}, url = {https://support.sas.com/documentation/cdl/en/statugestimable/61763/PDF/default/statugestimable.pdf}, } @Manual{SAS, title = {\proglang{SAS/STAT} \textregistered 9.22 User's Guide}, author = {\proglang{SAS} Institute Inc.}, organization = {\proglang{SAS} Institute Inc.}, address = {Cary, NC}, year = {2010}, url = {https://support.sas.com/documentation/}, } @Manual{ucminf, title = {ucminf: General-Purpose Unconstrained Non-Linear Optimization}, author = {Hans Bruun Nielsen and Stig Bousgaard Mortensen}, year = {2016}, note = {R package version 1.1-4}, url = {https://CRAN.R-project.org/package=ucminf}, } @Book{fahrmeir01, author = {Ludwig Fahrmeir and Gerhard Tutz}, title = {Multivariate Statistical Modelling Based on Generalized Linear Models}, publisher = {Springer-Verlag New York, Inc.}, year = 2001, series = {Springer series in statistics}, edition = {2nd} } @Book{greene10, author = {William H Greene and David A Hensher}, title = {Modeling Ordered Choices: A Primer}, publisher = {Cambridge University Press}, year = 2010} @Book{mccullagh89, author = {Peter McCullagh and John A. Nelder}, title = {Generalized Linear Models}, edition = {2nd}, year = {1989}, publisher = {Chapman \& Hall}, address = {London}, doi = {10.1007/978-1-4899-3242-6}, } @Manual{Stata, title = {\proglang{Stata} 15 Base Reference Manual}, author = {{StataCorp}}, publisher = "Stata Press", address = "College Station, TX", year = {2017}, url = {https://www.stata.com/}, } @article{oglm, author = "Williams, R.", title = "Fitting heterogeneous choice models with oglm", journal = "Stata Journal", publisher = "Stata Press", address = "College Station, TX", volume = "10", number = "4", year = "2010", pages = "540-567(28)", url = "http://www.stata-journal.com/article.html?article=st0208" } @Article{gllamm, author="Rabe-Hesketh, Sophia and Skrondal, Anders and Pickles, Andrew", title="Generalized multilevel structural equation modeling", journal="Psychometrika", year="2004", month="Jun", day="01", volume="69", number="2", pages="167--190", issn="1860-0980", doi="10.1007/BF02295939", url="https://doi.org/10.1007/BF02295939" } @Manual{SPSS, title = {\proglang{IBM SPSS} Statistics for Windows, Version 25.0}, author = {{IBM Corp.}}, organization = {IBM Corp.}, address = {Armonk, NY}, year = {2017}, } @Manual{R, title = {\proglang{R}: {A} Language and Environment for Statistical Computing}, author = {{\proglang{R} Core Team}}, organization = {\proglang{R} Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2018}, url = {https://www.R-project.org/}, } @Article{brms, title = {{brms}: An {R} Package for {Bayesian} Multilevel Models Using {Stan}}, author = {Paul-Christian Bürkner}, journal = {Journal of Statistical Software}, year = {2017}, volume = {80}, number = {1}, pages = {1--28}, doi = {10.18637/jss.v080.i01}, encoding = {UTF-8}, } @Manual{rms, title = {rms: Regression Modeling Strategies}, author = {Frank E {Harrell Jr}}, year = {2018}, note = {R package version 5.1-2}, url = {https://CRAN.R-project.org/package=rms}, } @Book{MASS, author = {William N. Venables and Brian D. Ripley}, title = {Modern Applied Statistics with \proglang{S}}, edition = {4th}, year = {2002}, pages = {495}, publisher = {Springer-Verlag}, address = {New York}, doi = {10.1007/978-0-387-21706-2}, } @Article{VGAM, author = {Thomas W. Yee}, title = {The \pkg{VGAM} Package for Categorical Data Analysis}, journal = {Journal of Statistical Software}, year = {2010}, volume = {32}, number = {10}, pages = {1--34}, doi = {10.18637/jss.v032.i10}, } @Article{Zeileis+Kleiber+Jackman:2008, author = {Achim Zeileis and Christian Kleiber and Simon Jackman}, title = {Regression Models for Count Data in \proglang{R}}, journal = {Journal of Statistical Software}, year = {2008}, volume = {27}, number = {8}, pages = {1--25}, doi = {10.18637/jss.v027.i08}, } ordinal/vignettes/clmm2_tutorial.Rnw0000644000176200001440000004375212431104052017342 0ustar liggesusers\documentclass[a4paper]{article} \usepackage{amsmath}%the AMS math extension of LaTeX. \usepackage{amssymb}%the extended AMS math symbols. %% \usepackage{amsthm} \usepackage{bm}%Use 'bm.sty' to get `bold math' symbols \usepackage{natbib} \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{Sweave} \usepackage{url} \usepackage{float}%Use `float.sty' \usepackage[left=3.5cm,right=3.5cm]{geometry} \usepackage{algorithmic} \usepackage[amsmath,thmmarks,standard,thref]{ntheorem} %%\VignetteIndexEntry{clmm2 tutorial} %%\VignetteDepends{ordinal, xtable} \title{A Tutorial on fitting Cumulative Link Mixed Models with \texttt{clmm2} from the \textsf{ordinal} Package} \author{Rune Haubo B Christensen} %% \numberwithin{equation}{section} \setlength{\parskip}{2mm}%.8\baselineskip} \setlength{\parindent}{0in} %% \DefineVerbatimEnvironment{Sinput}{Verbatim}%{} %% {fontshape=sl, xleftmargin=1em} %% \DefineVerbatimEnvironment{Soutput}{Verbatim}%{} %% {xleftmargin=1em} %% \DefineVerbatimEnvironment{Scode}{Verbatim}%{} %% {fontshape=sl, xleftmargin=1em} \fvset{listparameters={\setlength{\topsep}{0pt}}} %% \fvset{listparameters={\setlength{\botsep}{0pt}}} \renewenvironment{Schunk}{\vspace{-1mm}}{\vspace{-1mm}} %RE-DEFINE marginpar \setlength{\marginparwidth}{1in} \let\oldmarginpar\marginpar \renewcommand\marginpar[1]{\oldmarginpar[\-\raggedleft\tiny #1]% {\tiny #1}} %uncomment to _HIDE_MARGINPAR_: %\renewcommand\marginpar[1]{} \newcommand{\var}{\textup{var}} \newcommand{\I}{\mathcal{I}} \newcommand{\bta}{\bm \theta} \newcommand{\ta}{\theta} \newcommand{\tah}{\hat \theta} \newcommand{\di}{~\textup{d}} \newcommand{\td}{\textup{d}} \newcommand{\Si}{\Sigma} \newcommand{\si}{\sigma} \newcommand{\bpi}{\bm \pi} \newcommand{\bmeta}{\bm \eta} \newcommand{\tdots}{\hspace{10mm} \texttt{....}} \newcommand{\FL}[1]{\fvset{firstline= #1}} \newcommand{\LL}[1]{\fvset{lastline= #1}} \newcommand{\s}{\square} \newcommand{\bs}{\blacksquare} % figurer bagerst i artikel %% \usepackage[tablesfirst, nolists]{endfloat} %% \renewcommand{\efloatseparator}{\vspace{.5cm}} \theoremstyle{plain} %% {break} \theoremseparator{:} \theoremsymbol{{\tiny $\square$}} %%\theoremstyle{plain} \theorembodyfont{\small} \theoremindent5mm \renewtheorem{example}{Example} %% \newtheoremstyle{example}{\topsep}{\topsep}% %% {}% Body font %% {}% Indent amount (empty = no indent, \parindent = para indent) %% {\bfseries}% Thm head font %% {}% Punctuation after thm head %% {\newline}% Space after thm head (\newline = linebreak) %% {\thmname{#1}\thmnumber{ #2}\thmnote{ #3}}% Thm head spec %% %% \theoremstyle{example} %% %% \newtheorem{example}{Example}[subsection] %% \newtheorem{example}{Example}[section] \usepackage{lineno} % \linenumbers \newcommand*\patchAmsMathEnvironmentForLineno[1]{% \expandafter\let\csname old#1\expandafter\endcsname\csname #1\endcsname \expandafter\let\csname oldend#1\expandafter\endcsname\csname end#1\endcsname \renewenvironment{#1}% {\linenomath\csname old#1\endcsname}% {\csname oldend#1\endcsname\endlinenomath}}% \newcommand*\patchBothAmsMathEnvironmentsForLineno[1]{% \patchAmsMathEnvironmentForLineno{#1}% \patchAmsMathEnvironmentForLineno{#1*}}% \AtBeginDocument{% \patchBothAmsMathEnvironmentsForLineno{equation}% \patchBothAmsMathEnvironmentsForLineno{align}% \patchBothAmsMathEnvironmentsForLineno{flalign}% \patchBothAmsMathEnvironmentsForLineno{alignat}% \patchBothAmsMathEnvironmentsForLineno{gather}% \patchBothAmsMathEnvironmentsForLineno{multline}% } \begin{document} \bibliographystyle{chicago} \maketitle \begin{abstract} It is shown by example how a cumulative link mixed model is fitted with the \texttt{clmm2} function in package \textsf{ordinal}. Model interpretation and inference is briefly discussed. A tutorial for the more recent \texttt{clmm} function is work in progress. \end{abstract} %% \newpage %% \tableofcontents %% \newpage \SweaveOpts{echo=TRUE, results=verb, width=4.5, height=4.5} \SweaveOpts{prefix.string=figs} \fvset{listparameters={\setlength{\topsep}{0pt}}, gobble=0, fontsize=\small} %% \fvset{gobble=0, fontsize=\small} \setkeys{Gin}{width=.49\textwidth} <>= ## Load common packages, functions and set settings: library(ordinal) library(xtable) ## RUN <- FALSE #redo computations and write .RData files ## Change options: op <- options() ## To be able to reset settings options("digits" = 7) options(help_type = "html") ## options("width" = 75) options("SweaveHooks" = list(fig=function() par(mar=c(4,4,.5,0)+.5))) options(continue=" ") @ We will consider the data on the bitterness of wine from \citet{randall89} presented in Table~\ref{tab:winedata} and available as the object \texttt{wine} in package \textsf{ordinal}. The data were also analyzed with mixed effects models by \citet{tutz96}. The following gives an impression of the wine data object: <<>>= data(wine) head(wine) str(wine) @ The data represent a factorial experiment on factors determining the bitterness of wine with 1 = ``least bitter'' and 5 = ``most bitter''. Two treatment factors (temperature and contact) each have two levels. Temperature and contact between juice and skins can be controlled when crushing grapes during wine production. Nine judges each assessed wine from two bottles from each of the four treatment conditions, hence there are 72 observations in all. For more information see the manual entry for the wine data: \texttt{help(wine)}. \begin{table} \centering \caption{Ratings of the bitterness of some white wines. Data are adopted from \citet{randall89}.} \label{tab:winedata} \begin{tabular}{lllrrrrrrrrr} \hline & & & \multicolumn{9}{c}{Judge} \\ \cline{4-12} <>= data(wine) temp.contact.bottle <- with(wine, temp:contact:bottle)[drop=TRUE] tab <- xtabs(as.numeric(rating) ~ temp.contact.bottle + judge, data=wine) class(tab) <- "matrix" attr(tab, "call") <- NULL mat <- cbind(rep(c("cold", "warm"), each = 4), rep(rep(c("no", "yes"), each=2), 2), 1:8, tab) colnames(mat) <- c("Temperature", "Contact", "Bottle", 1:9) xtab <- xtable(mat) print(xtab, only.contents=TRUE, include.rownames=FALSE, sanitize.text.function = function(x) x) @ \end{tabular} \end{table} We will fit the following cumulative link mixed model to the wine data: \begin{equation} \label{eq:mixedModel} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j - \beta_1 (\mathtt{temp}_i) - \beta_2(\mathtt{contact}_i) - u(\mathtt{judge}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation} This is a model for the cumulative probability of the $i$th rating falling in the $j$th category or below, where $i$ index all observations and $j = 1, \ldots, J$ index the response categories ($J = 5$). $\{\theta_j\}$ are known as threshold parameters or cut-points. We take the judge effects to be random, and assume that the judge effects are IID normal: $u(\mathtt{judge}_i) \sim N(0, \sigma_u^2)$. We fit this model with the \texttt{clmm2} function in package \textsf{ordinal}. Here we save the fitted \texttt{clmm2} model in the object \texttt{fm1} (short for \texttt{f}itted \texttt{m}odel \texttt{1}) and \texttt{print} the model by simply typing its name: <<>>= fm1 <- clmm2(rating ~ temp + contact, random=judge, data=wine) fm1 @ Maximum likelihood estimates of the parameters are provided using the Laplace approximation to compute the likelihood function. A more accurate approximation is provided by the adaptive Gauss-Hermite quadrature method. Here we use 10 quadrature nodes and use the \texttt{summary} method to display additional information: <<>>= fm2 <- clmm2(rating ~ temp + contact, random=judge, data=wine, Hess=TRUE, nAGQ=10) summary(fm2) @ The small changes in the parameter estimates show that the Laplace approximation was in fact rather accurate in this case. Observe that we set the option \texttt{Hess = TRUE}. This is needed if we want to use the \texttt{summary} method since the Hessian is needed to compute standard errors of the model coefficients. The results contain the maximum likelihood estimates of the parameters: \begin{equation} \label{eq:parameters} \hat\beta_1 = 3.06, ~~\hat\beta_2 = 1.83, ~~\hat\sigma_u^2 = 1.29 = 1.13^2, ~~\{\hat\theta_j\} = [-1.62,~ 1.51,~ 4.23,~ 6.09]. \end{equation} Observe the number under \texttt{Std.Dev} for the random effect is \textbf{not} the standard error of the random effects variance, \texttt{Var}. Rather, it is the standard deviation of the random effects, i.e., it is the square root of the variance. In our example $\sqrt{1.29} \simeq 1.13$. The condition number of the Hessian measures the empirical identifiability of the model. High numbers, say larger than $10^4$ or $10^6$ indicate that the model is ill defined. This would indicate that the model can be simplified, that possibly some parameters are not identifiable, and that optimization of the model can be difficult. In this case the condition number of the Hessian does not indicate a problem with the model. The coefficients for \texttt{temp} and \texttt{contact} are positive indicating that higher temperature and more contact increase the bitterness of wine, i.e., rating in higher categories is more likely. The odds ratio of the event $Y \geq j$ is $\exp(\beta_{\textup{treatment}})$, thus the odds ratio of bitterness being rated in category $j$ or above at warm relative to cold temperatures is <<>>= exp(coef(fm2)[5]) @ The $p$-values for the location coefficients provided by the \texttt{summary} method are based on the so-called Wald statistic. More accurate test are provided by likelihood ratio tests. These can be obtained with the \texttt{anova} method, for example, the likelihood ratio test of \texttt{contact} is <<>>= fm3 <- clmm2(rating ~ temp, random=judge, data=wine, nAGQ=10) anova(fm3, fm2) @ which in this case is slightly more significant. The Wald test is not reliable for variance parameters, so the \texttt{summary} method does not provide a test of $\sigma_u$, but a likelihood ratio test can be obtained with \texttt{anova}: <<>>= fm4 <- clm2(rating ~ temp + contact, data=wine) anova(fm4, fm2) @ showing that the judge term is significant. Since this test of $\sigma_u = 0$ is on the boundary of the parameter space (a variance cannot be negative), it is often argued that a more correct $p$-value is obtained by halving the $p$-value produced by the conventional likelihood ratio test. In this case halving the $p$-value is of little relevance. A profile likelihood confidence interval of $\sigma_u$ is obtained with: <<>>= pr2 <- profile(fm2, range=c(.1, 4), nSteps=30, trace=0) confint(pr2) @ The profile likelihood can also be plotted: <>= plot(pr2) @ The result is shown in Fig.~\ref{fig:PRsigma_u} where horizontal lines indicate 95\% and 99\% confindence bounds. Clearly the profile likelihood function is asymmetric and symmetric confidence intervals would be inaccurate. \begin{figure} \centering <>= <> @ \caption{Profile likelihood of $\sigma_u$.} \label{fig:PRsigma_u} \end{figure} The judge effects, $u(\mathtt{judge}_i)$ are not parameters, so they cannot be \emph{estimated} in the conventional sense, but a ``best guess'' is provided by the \emph{conditional modes}. Similarly the \emph{conditional variance} provides an uncertainty measure of the conditional modes. These quantities are included in \texttt{clmm2} objects as the \texttt{ranef} and \texttt{condVar} components. The following code generates the plot in Fig.~\ref{fig:ranef} illustrating judge effects via conditional modes with 95\% confidence intervals based on the conditional variance: <>= ci <- fm2$ranef + qnorm(0.975) * sqrt(fm2$condVar) %o% c(-1, 1) ord.re <- order(fm2$ranef) ci <- ci[order(fm2$ranef),] plot(1:9, fm2$ranef[ord.re], axes=FALSE, ylim=range(ci), xlab="Judge", ylab="Judge effect") axis(1, at=1:9, labels = ord.re) axis(2) for(i in 1:9) segments(i, ci[i,1], i, ci[i, 2]) abline(h = 0, lty=2) @ The seventh judge gave the lowest ratings of bitterness while the first judge gave the highest ratings of bitterness. The significant judge effect indicate that judges perceived the bitterness of the wines differently. Two natural interpretations are that either a bitterness of, say, 3 means different things to different judges, or the judges actually perceived the bitterness of the wines differently. Possibly both effects play their part. \begin{figure} \centering <>= <> @ \caption{Judge effects given by conditional modes with 95\% confidence intervals based on the conditional variance.} \label{fig:ranef} \end{figure} The fitted or predicted probabilites can be obtained with the judge effects at their conditional modes or for an average judge ($u = 0$). The former are available with \texttt{fitted(fm)} or with \texttt{predict(fm)}, where \texttt{fm} is a \texttt{f}itted \texttt{m}odel object. In our example we get <<>>= head(cbind(wine, fitted(fm2))) @ Predicted probabilities for an average judge can be obtained by including the data used to fit the model in the \texttt{newdata} argument of \texttt{predict}: <<>>= head(cbind(wine, pred=predict(fm2, newdata=wine))) @ Model~\eqref{eq:mixedModel} says that for an average judge at cold temperature the cumulative probability of a bitterness rating in category $j$ or below is \begin{equation*} P(Y_i \leq j) = \textup{logit}^{-1} [ \theta_j - \beta_2(\mathtt{contact}_i) ] \end{equation*} since $u$ is set to zero and $\beta_1(\mathtt{temp}_i) = 0$ at cold conditions. Further, $\textup{logit}^{-1}(\eta) = 1 / [1 + \exp(\eta)]$ is the cumulative distribution function of the logistic distribution available as the \texttt{plogis} function. The (non-cumulative) probability of a bitterness rating in category $j$ is $\pi_j = P(Y_i \leq j) - P(Y_i \leq j-1)$, for instance the probability of a bitterness rating in the third category at these conditions can be computed as <<>>= plogis(fm2$Theta[3] - fm2$beta[2]) - plogis(fm2$Theta[2] - fm2$beta[2]) @ This corresponds to the third entry of \texttt{predict(fm2, newdata=wine)} given above. Judge effects are random and normally distributed, so an average judge effect is 0. Extreme judge effects, say 5th and 95th percentile judge effects are given by <<>>= qnorm(0.95) * c(-1, 1) * fm2$stDev @ At the baseline experimental conditions (cold and no contact) the probabilites of bitterness ratings in the five categories for a 5th percentile judge is <<>>= pred <- function(eta, theta, cat = 1:(length(theta)+1), inv.link = plogis) { Theta <- c(-1e3, theta, 1e3) sapply(cat, function(j) inv.link(Theta[j+1] - eta) - inv.link(Theta[j] - eta) ) } pred(qnorm(0.05) * fm2$stDev, fm2$Theta) @ We can compute these probabilities for average, 5th and 95th percentile judges at the four experimental conditions. The following code plots these probabilities and the results are shown in Fig.~\ref{fig:ratingProb}. <>= mat <- expand.grid(judge = qnorm(0.95) * c(-1, 0, 1) * fm2$stDev, contact = c(0, fm2$beta[2]), temp = c(0, fm2$beta[1])) pred.mat <- pred(eta=rowSums(mat), theta=fm2$Theta) lab <- paste("contact=", rep(levels(wine$contact), 2), ", ", "temp=", rep(levels(wine$temp), each=2), sep="") par(mfrow=c(2, 2)) for(k in c(1, 4, 7, 10)) { plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") } @ \begin{figure} \centering <>= k <- 1 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 4 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 7 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 10 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ \caption{Rating probabilities for average and extreme judges at different experimental conditions.} \label{fig:ratingProb} \end{figure} At constant experimental conditions the odds ratio for a bitterness rating in category $j$ or above for a 95th percentile judge relative to a 5th percentile judge is <<>>= exp(2*qnorm(0.95) * fm2$stDev) @ The differences between judges can also be expressed in terms of the interquartile range: the odds ratio for a bitterness rating in category $j$ or above for a third quartile judge relative to a first quartile judge is <<>>= exp(2*qnorm(0.75) * fm2$stDev) @ \newpage \bibliography{ordinal} %% \newpage \end{document} <>= @ ordinal/vignettes/static_figs/0000755000176200001440000000000013575515131016217 5ustar liggesusersordinal/vignettes/static_figs/fig-figEqui.pdf0000755000176200001440000003467011772034312021055 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20120625120914) /ModDate (D:20120625120914) /Title (R Graphics Output) /Producer (R 2.15.0) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 10888 /Filter /FlateDecode >> stream xK-KRWa{%O ɖqd!`}x{؃yZ2*#222?7'Siܟ6o??շu]?ſo㿿VV>/hRI ~DmhvPD"U^xE)ln$s~Ԋ/R-g Z%u^we>EGG4([?/zLQcMzQ&~WGS<^":B?k^Dޗh(l! GSE>}sD52_U4:ߌvAf~MPVISW$c"h(5;W 4D{ۛcyDG :lo( ׳{zhlbLק*{}YO| AZ'o-'8e`SW)(&t=j "Xc\!5N^sԵFk:%ZlָtT.qɇϏ#,q4ָdw &q&[Sf{ ^.kܰqk7~'H޴-uQ\ X?~ZB,tDq,FOhBkxY57ͦ9uvgS~(y+j2vk5gq}wlrލWx͟u7=,az6 w5^'=\}.\ς>ƫoayr`AAaziz,{\y4 k .^_=Fm3yr_"^.b]xk\EsG5+hUX#^(l E}U2vW0kuOx4"WX\]vq<&=u=]ZD1╱~ǹs5eqwg}hx b4#O8d1:Y[uPYkk|{p٣{X}u†5{{xxU n_qV[п+b ָYx1u QxX8ZGHPXc|5*Q O8+75,\ zL|'촹 Mn{h S:(qOYGPXxsls؎[7=DDPXvĹ NQ"(m{/HweX h☏y0'1& k 5D}GV"C#cWwT3(1W>(TE{ºNpܜъLNq=v9tyvAYY78W+ƾ_)Wgh{Dx<=zL8~Jxī&ǫƳ|]8K:(,s|s((پJxw5\FG%~=7%6y(Uq?Wp8"1p\0i~``f a`7``ehmtb@yF1+9L\t/s/clU!xTsc^㱡G{ hż|>\}>94qS0NRu{6w6n^S '*WgB,9HDѸI0c9g8OL}Lcc#+i\n#p͈Kљ0ݸ?t^Z2XSr.sθ},mdl.b,'-cft`eЬ)1p0j.Jkz sӳsfd콽cr{ wN2^kÜ0U*0 [;H+$9;Z(yw'H,d݁XۥVvrZ-#wDu!ӹX;2 XU'ÜΆ::U1N.qЀk^+Bnh` сi\ M>pq)P݂u6@~2۸u֗瀍iތyn47s X\gX y ;S(rqr TL'"QdBCWHOj "xi,T8ؽ!; ( *BZɫrܾÜg޹=[EޤF3W HccMwAi00'sVc[95Gvœ.LG)I{p^=̍҃CqnclƑ=fT>`NQS_gl3[44m3Nk~1A2ų8&CRfsŗ}r?.(L1S֚&bLJ:(O9[Vk"R@=s`l\J{`&nf}-%;q_J1[y984tGWZ56I)r7v0 Nw"Zn!|xSd0eYzB ]ÁpkHW9 ԑ|:7qj77g䍹{ظcki̓p5j+M9prG䧇[r>k8/f^7.q+5`6vʁk5$b~hI[ MeV`^1949rMۯn in*\L`VWpFeekqbjs3Z5vAMhuc3OVn{_E8x>J ͝o %I8~6Սظi?'w5PJl9\`ƽ\BGk#Du1ݻB y~\AEKG qX}fgYѽ<3RlWKecp+%>9t_H>cU kh\&c}KQU*HZLE&֪S*TMAޚmXUQܮEVwj]Ю:.{F}b%x˃N~:Xt!EH"In~:Y@jE@F}xE,CO$L,_kwKGYt<A:_SМr6˜" +K9E)M"!DUwM1@U*.T$X oPU ̯rzŹ*XTY9w`ٻQBR)~]:7䧿CՏ˸U>sO<6gUcsQ|Y ®vQ|Xl7ʜ@n=ʜo9yTypv2eΚZ9<<ʜo9F3(=|Fqۜol zn<6gM؜o9(s1B2gc9 Wyy[m7B3ڛFM"7}=ۜo98 P<ʜo9^^8 ϳes<$r(/\S86gE}؜o9ߨID8@x`/9(s6mpjs^ Nsb?/\vƺ_8 fIsNlp?Na4Ķ^8 P1፽p)4>_ ϳݑ8 ˜a^8 <˜Uħ9vLxc;/|/sV9欂')ez^˜zh/|/s^eΫ9p?m o|j/s^eΫDZ+,&EVe~!#' ~bDTO ){bo/(O\C1zb/y خ%BFTOlㅳp?%9_xQ=.#'8 7X G~ˈꉭpQ=<FTOs_ ~ ŜOk=y/܏CI_8)H4IeOy #'9ӜiVt¾_ۜ4IӜdOli4g٘<>)|dD4gc<os|>q9m4IR~>9ӜԟOLs>q/ӜiF4-'^_8 mPJ Ds|Am̌J39|,RҜmΉ6Ԍڜ;'&w })OjC93m,u9 P%%O*DHsv 4Eb>q4aL ̹(/.(K)C%OEV­SI(ѩv$?)%OF_Q"M" h)pQjR◜>'-Y+A)pX5z]&%)%ZE:t LJTJ䑻ق JOKWJly!s.Jϫx>-sCMM"I^J\ԗ%iR4mQ͙4Dvc+&*K*Sb̔>3%G朊#snIMZS mPbS4g ?$7%OM/ń.h)͹ Tsheiס8$;-Y-)_n ]')q}Rz ؐhiաCrHS"28VThA֡] PY}B%OPqt%%DJ6_ХҢ'ŨOi^Jʪ{4x ET JT"凗[sҤbKJTŦő.a*-'[~JsFJYa*cB?WHTM}xSUqO*TKj$Q%OjTY>)R-ߺT#*k@L9<&**}IUY0R?U-)oT=Z2g J v)&)K1aji٥1}R FR2ȘOn84g K<6%sv ҹ44 p 4yHs^r2үF$`-Rh(QgUL4aMuqb-摌2cJF2R I \VҜ-#5jV mArVXʘЏ7듊VgVH <;aaN`S֚-Zj֚LiS۟nqY$^($oeQtwJXJ)\뗮1<6rOk?dIZ%W2WVmsEK::u_[zMVsbP:WW H+XҹU`)+X5vҹ#R"!UtIʒdUMkmuM?R& #fe4,JZuvz_rWT,TT?G‚Wt@RC%UK /IeM+*M~O.XEҾ-*#կ`+%Mq?PMӏ(Tr{VVG`nDۣ8XT5‚ L?R ˂a?h~ڐ`هU_X)Ie#I,5:|*Qlm*J%E֓X\ư"ׇ4FX3VTR芦)eIh4USNj)aH#˺m?h~:U` )eiC'ڦTCJYV7Re}S~>;-ǓYv} f5Z%fl',U_Y>LiβIN*go΂ue5C< 'L:ճzgYς!,˿KAdgbYC ɘ=eT7du~*[+ E|ꬵ5k1mgZM HN )?PՏ[ E-k?Ԃ-cRU nZjֻRTo*imͧZ[ V&q-kښ!E^͌5>j-ez>Aۚܒ$l\M|Z]Z-.eϟ]ߒ?>Ixb $ /㑔'$e<d:^K튏dR2[V2gQe?heIEPx-/2]f5W_ά k_31|Re~Jު0Gj{$e%.o2GJƿĿV(H/L</-7=,kQ R P-X T|Zt̗(2S Pk{)ĬjIM2,PFi}.=0x>^).f|63Ob&mM`滶QR~0 fBa((/u)I=*u0}bc}00͌X"Le\2_%fT”mxL3MOl)f>uJj%f>vJKY._K3`>x!f>yf0Oe:5Kf;uwo廯,o廯G1ǂWI7)f>?_T|4$h b7l3RK 1+`"JF  .N?L+=kEki_|jb$x`$(0+*coHS*w)*~47W1a]1D-ǤQb [Iۥqp?/W6Wb1TW'Ns^/%܏LA2UR|ix8cn@KVUoq|$:1/mX_}\8_aqnZx̭e^,~V;7ޫj;UxǜRbqrZiJ|[~l5ȬGXP!!E Ci]]!)2 -#مFfC"a0/+UUKY`̬ȿ$Q2Y٦ ?ŲdVDeǒ|G&,zq"Ne5~jjo{gU37Վ~Y\O}hV$X&_殢/=[`nMn-ǖ-3,_?e6~b||nM˿uTc2J u6(0/k+޳+n}-?ݚ?2f7RgXR22R-3uKm}eKzfr3Gw%wrKߟ@?۲8պv滴f2%.5+q\dq3yϣfe} qE+_K`kfk0zkβ^̮W|:N u\n]o]6o]Uo]to]o]o]o 4y_׬:k^E;"X}K BBkoзLj[Ea9-#K3_uN+x(XbWʲ!~ٕsg:f,~I</=OScKjNLR5uzgVMVI-`{+ _lSws/oNWOO)a>6?_o;W})E گ珿G~~xn&~g_~”~Ͽ SOϯ_txY1:tq\IV~ ..*}}a/Ѷ]oF^Gm/ha}O~_O3Mhc5ܚF،?{  _J`NS~X^?\r=Z?U"絺T,~2y~se?ε^6[~UM#hH+i\cR~.z|tbL7y-?M_ߞvԃ/_A;N??ǝ Ǐr3Fe }S@ s2;/]R<\,DXԟZ[L7u8͊6Fҗi&Uw/d7qެIo|˴ǜL~1_2ǯcr:o|1~(yVo{,=m?oe/2}endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 360] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000164 00000 n 0000011254 00000 n 0000011337 00000 n 0000011460 00000 n 0000011493 00000 n 0000000213 00000 n 0000000293 00000 n 0000014188 00000 n 0000014282 00000 n 0000014379 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 14457 %%EOF ordinal/vignettes/static_figs/fig-figFlex.pdf0000755000176200001440000003474011772034312021046 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20120625120914) /ModDate (D:20120625120914) /Title (R Graphics Output) /Producer (R 2.15.0) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 10928 /Filter /FlateDecode >> stream xK-KRWa{%O ɖqd!`}x{ڸ؃yvUU?7'Siܟ6o??շu]?ſo㿿VV>/hRI ~DmhvPD"U^xE)ln$s~Ԋ/R-g Z%u^we>EgG4([?/zLQcMzY&WgS<^":B?k^Dޗh,l! GSE>}sD52_U4:ɟvAd~MPVISw$c"h,5'w 4D{ۛcyDg :lo, ׳{簾zh>i^^Ov!XTuEh9A)kKAgE4yޗQ_ qj 5ZԵuw.b{ƥmr`K>$ָ|}aAE%Km|A6iKxG@G4ޚ6[rA X`[3A5ny>@q|5n7xt`y#lc)x4z`GZ7 5 xF4B5ݳ Xq_*IEƥ,=>6O3( Ϡ >?AAr<ˮb|6@=Qe3Aco&Z ƨݠ3ETG(@՟ kUsF`4 O|5h~&H-0X)cwS >D_gvxNi ܑ\ Y@/赽~PD톥IDOV.a9scpUO D;B/8 ڍ^S\Ʊdhi:]RHwp_-Um)1hf8vhh %Ƒ7H}9xC#l^LAl:MNHE̤=6=e.gAU׷aJ{x}9 zᠰy|?4==<5qf{/6^?<^9ۯWE\D5#W*]" Ͼfx+k5:D'VQXjc,grXqg~fuEdkk- X?ZXޚxAwwml1ZK'ց b:(⬵5>=zQ=q{X:na=\{/?8+-_qxkE,Z(G,GBP-#$(1c>wg,'E^wLqg.}VPXc=&>yv\E&7p=\)ָ#(Fv,96Clǭ"~"(q;܌'Ũ bahn { o,hcq{Y<5 >#CDr!NQ1G+cxs;xxk׽Da]ip8nNhg&km|;Gr[Pܻ ì@eW+qPc_/ה+34ὃhg"Ίb kf k<KWQtQt{xWj` { H ݂ w7EeK9V7o':J.S|t>SaC.^Kc^74ޒwvr+[eD2̓ΈS&pnV lkj;pүT {^9p/c?s ;6NNϬwe>at`gfӈhp_y8jc9\(a㣨ab^ƩLNX͈/?`MlG,<ӰTݞݫͩہW+o-`A1p1m4n 8ƘcN5S=1.F?S.s?Jq3ҍmt-Et5aq78tLm@EXHZ+yswrtL1;giӛ4z " bl̼)(<fdJ5#w(38NŔr29="i`˳gq4sCzpw9m s8̜"4*wLS#9cfk5&bm|w/&P3HUxdH _ٌq-O.e2s fZÜd}е ]irI#g|{MDj>gn^8mGOL,O5űd6+Ww ;f+3g\ Wf&t6EfӮU1 ^T P 2dƵ>KO!k8n N"g:3O0NFv1w!s}G<WNYt'wN=ܒ%tQ}s77qq$pX) _-V\! L-C(O  \]% F.B1y-Αk*L~uGf@0MsVb;\Ɔ4b-/H-]M^; =S[ Ѫsz%.nB~p*.Vyh|o(Mٮn4~oߜM8kRfs;7:r^e'evJ#deF,rX:DL3=#R`4fZZn.{[_,%ΡBYè fHXDs28U^rR@Қe*4VeT lWxh#jTv-S0ҧGvݧt3*8F+Ku=Xt`}ɣB(fEeb<:Y@jE@F}xE,SO$L,_kwKGYu<A:_KМr6Ü" +K9E)M"!DUwM1@U*)T$X oTU ̯rzŹ*XTY9w`ٻQBR)]:7䧿CՏ˸U>sO<6gUcsQ|Y ®vQ|Xl7ʜ@n=ʜo9yTypv2eΚZ9<<ʜo9F3(=|Fqۜol zn<6gM؜o9(s1B2gc9 Wyy[m7B3ڛFM"7}=ۜo98 T<ʜo9^^8 ϳes<$r(/\K86gE}؜o9ߨID8@x`/9(s6mpjs^ Nsb?/\vƺ_8 fIsNlp?.a4Ķ^8 T1፽p%4>_ ϳݑ8 ˜a^8 <˜Uħ9vLxc;/|/sV9欂'%ez^˜zh/|/s^eΫ9p?m o|j/s^eΫDZ+,&EVe~!#' ~bDTO ){bo/(O\S1zb/y خlr6^8 B5دSȈꉽp]/q u>9OJ;Ӝi4g+:kya/\vm_'9O<[ya1͙8os6v'՞_Ȉi4gyjlnU^L<9Ӝq*)?'9Ӝ6/s~a٘lEh1͙Iծhs>V{ڜ6ģv6D3>8ܮ>p9'ڜY*YmΒD]>|,9'ڜE<*P'ҜҒ~a9]"'&lV2/9ɥ%Z8Q'ŢI/J.s.2`X>}QR>)%OF㓲Q")veG^j$ҏ,8? %WG\" )3'UD/s^9X90/shhqř܀Ԥ%E.:b JRఈޓ>)*-#>)+%OJa)}2bY9NҒ(KSMM"E[ 'I)њR3IL_SeVV];͙O:S"{ҜU4&yԔhU*cBKA$6+Ji.ܴ:LzSe ]-)q|RrJܟԜ4!*)qRv LQRӪKNɜSzJҩ$}2gkڤ>%OOY2`K@JJdbBWKJ\TKJ֔lqDkO69(IQKt TS%O UY!vbBeH NwʜS;eVEK vɜ|帲^U R"Uw˜-hA(*QU֭| WZUs+/ΥIhPrYʘвMy1Ҝn|^՘U$@Mb¥++Ph2gw$c-^ɜ@VIZR'$)k]$f-5RRWƄGZzV  Zh-)LJVOZ:P(,!_VXVS+cnNVO[Y4j 1%oet2NoAuk\Y4U&+:NΩ, sEOI*l(u`J +غNV@Hҹhg#P:W4ҹ_\Q4@\Ybɪ$[R@V%MsQ+y- -Y:z-uc}JY2u,N٩ΕIż'ynTuԹ.:WpK),H+k7tK:WƇt`@O^IL{ Rfjm\Y̤R RL+غO)upцuйw߸g&S upg9PP!+ֽAVեt:X4Q {?("\*ٖ%LsKH +x:WC:WtWbo+ i]Y4auR YTXtՋ} ^YP֚KW4b*)W,D+9I&rR:Ե]dJRe?k)Ű5T%yP ,(br{C"UN,˜y]!5R‚E`K'Eҕr׺WRÂGe}I% bY-AX-20lV9m3A+*bQd)uWW-URƂU-i,n+vͥVǢfJ--A/> @,%,YtȲ)5m|Ij7#,+TRBYtRʲI:ʲܩg'<жH˂C/[[$}(k/R2 ,_T2tJmĬPܤbYųJ},A/Ή PRw:Ђ5eTjfzeCD *ZL?-GY iRI[Rښe֬wfeմ*K#`Ԃ}TԂo$χ{MVՂ})"~%%0Me+~UYik]#ŵH][7J-fG[s)lkƶjCGlY&Pق[2A)4ud?Rh xҖۭCk[~ضު*mkN嶬L{mђ<͈_{ڀ$d.֞1$-Y\kYwKN.~ϥL[rkΧķno&׷mnͳyA_ܯ%T_ [[w q||?yJqu=!AJ$C_?>?<ގyhI'Q.ǧ7[*7#unwO)si?]SS;uǏڷܚ AC[oɞ`o??8Yѭetk&e`ٯtW3)ZR[䯭m)R\G v-b*/IvY;G] ]ݚ $vkjJ]Aub>C-ݚG vRq//LP<~$zeqvjzjO,EOxkO彄y)>R{oK*)e2/oů yk/Zcފu%E|!M/xqƯ+_3~-)->uC^*$@mh[[[*%e|?ǯV2ͿWh/RNq?H[[G|XfF|)fį-LJ_DR۴l_]_^W-Kr_/`Sb}hl_ Z38b=z_jqRƣgWVVgW cyHIeQ v{TRr}n*ˡ~jN5`wqxȀkJ}'oRך )05e Ps=-10i&"߼M[F<)?(hTTTI, 0ߧ\]xۧb,$/sW~zx9?G+uwK*0|jg*eƛt\xSZ77- SXC)'%eGy1G:bq-𥒘%kEqi51DSMa<3wz*aMv<$8eMU-UCUsKsi?n/48^baꅋ߮fk954b>S2w'Hb\wB1[%2d5gZJ1ݔg77^4kf]_i&;oRӭ>t:N1}Yq\g-9枲몷8^WVs:EG \x}i1f^,W.j]W㒿i15}bgK9H*a*!<]dTsS+YpO%Cf96w0\.pNVa$Ef0ͮØ.ɮ⒥-ٵO$3 WUc"Y̰嘇KJ.3 2Fj*2YD3<~Kf90oճ8nir_64U>z2#f1M<4iH44"͂w(u|ެ_h{4j,Tfu27> Syz䟛5܅WfV k+{JcZr-[sS?E7AYa)3K,_d!e&Vo9e:nBKUߛ]zT[z|v+p;NyTs*'Xe1o]w,T<;JV]jK\'|n&=q\gk}32Oj>7-q&7skG"~ѳ_aXwuwwmwwewάfk3eceAceE۰B?l]n3y]կ8vk6vwdaP/>%a_ۼ_y^Y/2,zmZ*|+~%w 5wEз*[ `VX} BRkoзj['a9]/{#?ȂsdſuZcIEѷĪ_=W++2!]6<̮y>0Gcٌ/?pܜ)ߊ_lURws},~zH,~zWM-.zχʊl%[R\?T|";o _Oa_p{/_~~7ݯy뗊Wf'L0_˿q:eߥXTXA2{+kӹd铨펇B|[E%ϣy_@TE۪ '~g3Sy܄'OnC?kƣohX}=,BO &qF._,/knfej.ϕAkuc0wo`}\kl6o#IN͇MC4Ǚy8Es@/?׷g^$1w"x|/ޏ/rǗϘ*cǗvV$H.LϿ9.K)Z.n,1bO-}|:JfEMi&VYӷ}o7>eZs.|㜯O|935*[n= ?bY?{:_G?gBz3=\a?ɷ eTjendstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 360] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000164 00000 n 0000011294 00000 n 0000011377 00000 n 0000011500 00000 n 0000011533 00000 n 0000000213 00000 n 0000000293 00000 n 0000014228 00000 n 0000014322 00000 n 0000014419 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 14497 %%EOF ordinal/vignettes/static_figs/fig-figNom2.pdf0000755000176200001440000003464011772034313020763 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20120625120914) /ModDate (D:20120625120914) /Title (R Graphics Output) /Producer (R 2.15.0) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 10864 /Filter /FlateDecode >> stream xMqW41;CI H@ s%2`ߧޏ&mC>{qdWuuuO}O{?wݷ?o_}_y_W>|2>[i[}OqH'iRIG6\IUu^x3D%lQZHMiV#, }QRDhmϲqAKxz۫U^eZ-QU#VEgi^p%lU*:{U%q2谽1A1X8ΫUxՓ-4sUeU`{Gn1(9zWQUs ^5KYs逦(HxH]'TM/(FgY-h&~᧲ s>hE؞8 uw^l,Bς%jloOU"ڼj޳"X=Wt|P|*F{P"Xފ8u'`G5Z"uǾ-Ǒ:qj,{iSqxmԗ?ָKk\6i=%k~“0 Z|4ָdAg3ڴƅs:ۣ? Zq[^*ָNС5n$7oS ŨqZ֕5ُ((l4*:`G~)x܃6Q~Ϥ >۠h % _ zSy 13N < 3aꢰ U⼂g)A Ao1h6Q:hͳp3qj|Y m ϣHւԣ1w2kܭ =hqaA5ϲ(f= > (qx^6Ђ.#\S743**cV4(1:(<CƔUoïюp hnQXcGHv8,U1~r E8"A]n?)]ܦq4ſg-9g苟9/X㴍ſa^1{-QX~T~wi[#Ί_߰I9+΍#(1~oXɬuᚂ8E50:"7$~C GQGȰF#+11JP Ƒ)'qſE#8^:\㟦0 (8\ۦt΀5.{ k<]c"(5}cWA1nx]˄5̼/ p"qs"7g/z fUƸ9S~0(㙚Iaǫs7H99xD5uRdz4;ᝥIֱ*WI2MuFy0*WUh`8@`xG0ijxRgg 6qb rN^F}~ ch?`y*hdv5a wPYyT8t<Xypin:0$s > >'x}лI<̽yg<zz\oa'?Jfnw9R9fø84ZLׅҠa@ynL ^Yrl{mz@#V;AI-gS!Jgpe2NcNqVTNJp/{3N˚K7hS`q^ 7}FT֌^Ͽ249V.n{HrP8 l4To匷.ΞndR>pr=?u{SMsyWx ț"AC43DC9mq߫F;#DӍa؁Q+k7ۈʝ©;J`Rtpw҅6FfKA~voM db j5sÜ2+qta}nt8yO;Ƽ_.jbpn=-Ć5ʃ7z9ƨxw,qX lB Y."45Xa,cbD=pr5Y*0f,fP7Q,. HٙN@Θx_uH.1"Egq_v#z&漎=t4uⱦXpNc@R26`FV fʻ1p0nxEt5!uƳh[ay; {[XčL?r#Hsjy7\Aba 7d~eWޙaYnTCTesj@Z/E"d&>d2n]޲D>MHU3q5<=RJMc~J;mtsۺSz 2%}&*{r;Odd .6/X4БCzܓc:Bt09{bwb' T+6mNna0r m<=S80gK7h\.nh>Z\ CY\ٌܐ4[0fm&@Z1.qLS]6cWNFm <=#gg|JF.ѩX0r^⡲ƙpqPblˈ]i|18A-iq9d:rU?ǕrydT<.O{N0CR/cŞ;x^aLl FR9\-qTދV{%j_ֲeWlDmm+CƘ{#qk3׌{b%{: d+oo߫ac,# ÂJo\-"{Si^K[,e>Cbj]$Xy*e6z47nl L%2=|AX ,uٮ$l秓5GXS7n؍]K8y* `ٮ!2vL5b|&[] U^`&=X<%$,- jiTј慳jLWwNM> s*XgDJ՚TR;Uǩ`9 `W6Utxx*͹UZ6Uj4. .Uem*oawRHjcTq˘N8j30gZDvNyV$YWz]Z}f{Q侀CŬ͸T3u/cT8Zv9D+}; 'SI g;q s^vUIgBrUe삪A 7GifJSX*ٛøYw؛L$uxT֩tUŮf].+QuUX,-ەŮ4X@eI-Ò'vvwbd|aeU݄p jevw]/삊w}`ei `mp rA)/l4禥£vy7j9(sVs6eIh~^ݫisQ|̹ٸ>ʜremsQ\qcsQzl7ʜtOjwe7ʜ|ƥv@ kX e7ʜ|YS?y4 (sQ|Y5ڍAQ|8m7BM"FXՅf9(s6n2zn<6e7ʜo9+ ,xb^(sQl,6{*/y]/s^eΫ8&q>y՗92U_挒g1Os^eΫy9#\^85L]ê/dDpOQ}!qOR72zbDTO\S1zb/FTOl z~ˈmp Jhnz/< lydDĺ^8 ]FTOlcp?OňmpgQ=l}pg($Z9 z #'1^f󉭿p^/LsR3K_x2zbszi_'os69Ӝy*FTIӜiy½Ȉi㓂'9[lㅫ0͙s1٘ u0y}R6gc1'v_ '9ӜiT{1¹_hsffEi'ڜmΒF2BFTO9'ڜE6Dsp퓚/\i`i6D3sc~>Im“,IsN$lkox>}YÒhs~R596?)%RUɈZisfVr/iP33R%$ %OBֈڜ,hy| C% %OjCP:KJ3&s93ӯ""/9s@QH?%Rfx9swB:QbPhMyYI"Ҋ'ŢDJmΒ04g)/kī5͙;9R$I(R3w%Z(jstI(%ZXJsO(q}R>Jblٖ/)q~RBJt]'UD G@:RnGm:ݗI-)PK IrRH?(%OJJ]nR"2.U)KV \>]Lb­g)KY`R8T 䞶ĥIu)K^ KҜHi"niLc%2%ZJsvdD+KiΖYHh 3Z ]p')PZSؔt ӟJ֣IX4{R)|dG&)i0EO9WJvʂI) )q~RyZR(iɺ{iOYlbaTR²ԌhQ)X72q ݦ\K Tn|RJPR˗ Ce(M.,01+E%OQ͊Fˤ'(/}mI,ٟTiJZIQj3U%5~R F3 sp'L-ȓT1b9M&)q*g%OuQOSLjIx]S o4"Uf|\g!*냬a9׆H e8,\9*SjUsG&*$\E֤"&<~%֡R TJK.jP[t}V["n WKj\>oZ1!'ūKRCGձ@UJʢfA;J|iV)OVT2 M"VuEV]%+XrұG Y˭q?Rڨbma6wfeU?KGRzeUJkXxuCʊb 鮒*^>~$keUVYd0t*ƤkEԕ:ȷu:3uW#|ԵVke)V:kg\['K$]+ _nX!]+RYҵ-D,XVlU<еK/]+CUxn*Y֕GZ?ҵ!]+v7u,B5[%uC NVҵnKGVVyH:bW1?p׏TG[Yr׎UZJ vb0?`W1?`Wj^E)k&zK\Y5 %t[5ʎGJ]YWeKsrgW[;>]YGcWt)W/+X(+X#+ʸT>/k-^XZwj[?eXu_ P/$eW1@`,ZfZ<%&*MTuN jG`QC!,.3Am)W,)-,KRZY%RÂgejKTԬ"Oت#5`bV\2"*Y5`=Ya ؗXwi>6|`ك`)'WHI-x{lQ-k׺g|)Y~,&a-R; 3ޔl I[ q-R?|.y-XCS&+~[֞5,/-kzK.חt:ܡi|HwYS ERk2AKwョ,񂮇[A'+>?,{l5Q|*I y<\Z^E<&{,*=/bf-E/R2V` ׌_w*uJ[l^ƻV,g~/6j`R\)u0X9Ƀc #~-5U-a5 .7巼LlnW~”[Oi$(+?|ulja*n>>3R)d|C$fR?J6tݷn?nԃ-B \_۷>K+"3_̈_'R-wԧS.Ts?s鸸QW n$~e_܏)n#?RN8ljȚڡǗf*T4mگ6{NiLtI~K'Z5s[M=1b]a}1ߣ؝ ؝kJ_mJK1yh?SM2c_Q4~h3o+cZ1nެPSEڗ-fnNsXu,:Iu\ՕsZܩKlڧ-?檝w7{'/a6my.U_|# 2@!I"dżUG%) K`hG{JY"|ȬHv%ƃ ]!UTQ=>H&_fcT+iY%R[5%6UdT{ɒ=tɬPYz]t4'W%6VtuL3O^,<|*d|ϭPf]2w\_0J%F.gr7WaI~K2]oa3]nU2F9f.vܙr~qs[[xi hI|׀[Lf]@h|[tBO>?V?yIf|۟Sb8ͷ=/~ϯg~^i_~ _~S/~_~8kX&? e?}UK.Gy_vdۨqve3k?}gGܟ9POfPSC4> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000164 00000 n 0000011230 00000 n 0000011313 00000 n 0000011436 00000 n 0000011469 00000 n 0000000213 00000 n 0000000293 00000 n 0000014164 00000 n 0000014258 00000 n 0000014355 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 14433 %%EOF ordinal/vignettes/static_figs/fig-fig2.pdf0000755000176200001440000003453411772034312020312 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20120625120914) /ModDate (D:20120625120914) /Title (R Graphics Output) /Producer (R 2.15.0) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 10796 /Filter /FlateDecode >> stream xˮ,q)zHUy4I$ dȐ8/Q(|+2"##U>ϯ?Osk8}?u]/?ǿoW߯|󟾕OK??@{-Qۤ#T/ѹH4vQg{u&۫[tk"Dgiµ~{ݫhם}/OQY 7hSEEy{Yu7h׫{Zz-Qg%Z͠8>4Oq N,;ᵊWӈ\cqu=ŕcY1SZD1╱~ZXޚ8YD8n-FCki>qh8VdmEAgGcXscX:na^.^ۗA8zBwQ<9 ;^([q5q1qWyh|&b_L}rPYAuWtg|g5c3ўg\MP>h{CkȞAa{:bm›cc/_;=vܺ!'#(xowR FA1n;q@z*ָƠ @6F{|#:Ę((1ֈola wYz Cq"8OT3(1w>(TEq k׽Da]ip8nNhg&km|;Gr[Pܻ ì@eW+qPc_/ה+34A3gk15k358Fwpp/qI9pmr kVN/G5)>Usηt8\<Ⱦ s\u=W3%\en]Ob |̗q5OO"fKi7:zǀ\Q Cx^jvLnn2?i~>:47}1DG=޺y|}'{* p=ޫyi~,ڋxӞ[.1Pn?P.6@rЃz?x Oyqn:#J>^~MmN5ݚJd+<ezt织~y껍Sv2:3+]"Oc*f.qp6F4+W-N|oJ(*s׆q*G>V3b_C'85 4!Ugsjs*v9@~rr~z>}&"Xs \Dd$31|ECTO ԇ 46?6m܌tc)~qK] cX)~܍C'5N9!S[5U(xr;g;I;gnNaZAV2.rb2fMV͚Q4ў*0==;n@; kow{$&0y{! s^ým/a r9(s;B }|Dn2L]zieo!2byJNYgr; 답#s0ulaZu;ylX  浲!ZȚ6ښЄ, E-Xk a) $' CIYYj}yؘ8NFiyS:uŁ@w@͏H3"8.gـHdp\n/\f6cH[#Tjv9̀`vnu7 i[f_Zfwz.60UcJ\܄V763*dMUӏ]P"䉃{'qk]h޾93qr^Odߝvn!tνF;NT7Xӽ,G$Yt托x%g{FP[Si<#Uv\6 "X2K`COJ=Q*͐fqe2ۇqU5T4i"k:˨B=ٮ.FըZZdufaޥOOgTpߧ.Vz<仃A',RD4+}.+滓5IV$ dчWbk#])gLnSi>Y/ґ`as\̝ڄz1xN+BTzT"BEҊExq oO[*竮W˨EsW ֋k/."UsC~l8T[3ij]* TbF/6uczuc%jvO ;\XQ%W(`Tʋ4}O븐tc]n%%SqUdẩ;tBT2x 0gaՁrijlυi_40KϩmFsn.MLr>qa.Wu Ÿ@/c?/\yBd z6Ř0qMs~`өqcsVq=6e7ʜ0_/je7ʜ|̙}ZvyFٜG5ʜo |`9(sQ欩u{{Fqڜo9s\<ʜo9vpcsTގF2ga>_/9(s6󍽿pg|c?/93ڬٜo$r/Fcp?O%zFqڜos<]6|/q@FTOcp?N%M[^ ]~as Q=1٘lLsl|bs Q=9Ӝ6gc/nw|RlLs6mv-]is1ٸ>)|b1͙Xns>1'/ۜiV9ӜD]J1ʈ6c͙ KIAhsN, I$IimǒOs͙YdCxЮ%}"͹(!-K%}hI(j*sBsQ\JQ=q}R,JܟT>q2-# F哊'+* 퓢Qbj8>)%S\ (qR9ZR:#(7$OjGQoWzhQvI"h"P H R"{udE/Ґ9#)|REJ 9%/sE/s 9&WIK /1)KMZRX$9)ҡS1({(H.=iJ퓢"=RJ듲R!s.e --)<$R y)q|R_J\)93Ĕ1%ZVzm%i%H Ӝ3%'͙^EJSnOJMV2&DbS`MKä7%ZFʘR'%I)YXJsR'eT-54̹>DK/J.sMS%V^>n P]P T $Dj*&tDI*dDkMiIJ1aw̹oHJ4&w&g%H-"d$IM*pY:t*+RKKZ79K%O SQ@KTŨ4g( hULMlSY7ҥO%~ T^)TYntM"]AUHE֥^ca*xҩ'aS12$UJ;e)2gVJddCNzr\Y/*K)VY{b*yZ˻eKJ j، ](*Va*9ŕEF$`E4[PeLhف ּiKN>j̪IdiMVU1n4;[d[OPBV +Τd- .szFXYYkc+c#w-=+EzTBIZR&%M+ s'Eck (\Ɛ/]+v,e kҕnNVO[YwibJ)ce|_W!؃0ָ h! M*WuSY4uC⟒:}Un%+غQ\*:WuA*ϥse94FVt`IisԹhHҹ(uU)+x%I.:J>Q ɣWZd?/Z)u[^ zYZd\Y ׳SK+˓yOLjd6sI](u:S>.Y:W,Msod=O\Qd&u`\ց2Խƃe!>O+RL<*YҹIc5˙:Wu,(gASX \s|zz~E`O+ֽ@AƇt[YUҹO`ЬG+wsd[:W0Շ,!t?PvG\\7ҹw^,t*fr<^ue4I3gS1c1T/-xeCZk/I^Ҽ$6>BV^V@K?X$(IYRvU++KEJbԬRA+XWXd$RQ  ,"XV9=T,s*uCBThJ v{-KW]"_I )?%=,_$eS3IbQt\ðYw$EUł[J_1_T]J V_կ14"Z+-TX~'TY,rzHdaH#2%h?ǏdtjfRI eK)b'o0S+rb|}G\zR',3VRH[~ϗJښޖ,;fŴ5+뭦EUY+맢,#I-|>kKY-x-) m*k]#H[뒮)pFښQjy-X'}m5;؂Ka[-uDVo}4U ?Rd2ܒN)u#B[ƋTnZ۪#ŶVUIm[wJ-eeCo[oiF$%s!?!nr=]Ϻ[rpc.mgޒS]t>$%w}5%oso{=_~/Wd*pVB~GU245;"ݚlndL,NOҢjܔBS|ʿ|˿^)-ֶVIK͈_U{#I[o$B..nMe5?rK. VGJwNWZ@w1_n#e?(?}8;5I5O'iou^B^Gż]=uH}7 I-e1oŏ"<}Ư+_3~-)->uC^*$@mh[[[*%e|?ǯV2ͿWh/RNq?H[[G|XfF|)fį-LJ_DR۴l_]_^W-Kr_/`Sb}hl_ Z38b=z_jqRƣgWVVgW cyHIeQ v{TRr}n*ˡ~jN5`wqxȀkJ}'oRך )05e Ps=-10i&"߼M[F<)?(hTTTI, 0ߧ\]xۧb,$/sW~zx9?G+uwK*0|jg*eo:.T-0M{K/2P b$`I!exk?BQ*bަc~|-3Av |$cI_t\ZbpM~Ts$ bkX.E12/* e%*0Nq~UK~Ui\ڏyhF*$fz.ZNMy1ͺ>]{9PkVt~?YCRfL7%e-+M)_WZcfάƛt1ݮާޘ۾b 8.3sOYu[laj.L3/o?SeK]\+qӚTzڋO$@f0oH. 2k@DȬ@[ !a da?'0do"3 fadbdWbtqRZ ' 1w,Ifr%%# M,?%eY_ho4/[R*WHo=ToW4K$r]zyfAM;iɺ^Kjr>o/4j=U5 j*iWReݼu=Қv«a*)XfU2w}T՗nMR@&-5If-[ڷ|N[ilO/',8:f.yzddѥQR]?ku3YY:[ɗyVǿh3淘]ï?z_Uê$tt-ukuK-v+v -wf]6_ŘY/+ ++5ކ:Qdp~DZ_뵱' 5^:, 땵~yeykrW[]-kK-VAˠouзB[`%U2X }+,VC: ˡ_rY/9G+^#+e K:, VEUﹲ_\Y=/銵av!<f~ևHVX$bK/CMdCd-Sj*n@lquQKx>DVVL?^fDOWO(Ow)R6?;'!fE گ~oWųK_TJ7?a*?߄짟WbQce?:MO~; mu>>?qSim'_}q[zJtʹ%5GalFoͥ]eZn75pby}w_s~hVFݭ?2Z]G f[9ĵ2=X(-n?}mi%SۦC4=g޲=>.[,M.ٿ=_'A;=wݢ_~|_VrҨǗ2ՏH\9@/S_<;]zR<\7YRc_Z[oI(w`}ӆendstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 360] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000164 00000 n 0000011162 00000 n 0000011245 00000 n 0000011368 00000 n 0000011401 00000 n 0000000213 00000 n 0000000293 00000 n 0000014096 00000 n 0000014190 00000 n 0000014287 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 14365 %%EOF ordinal/vignettes/static_figs/fig-figSca.pdf0000755000176200001440000003474611763611260020667 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20120606102000) /ModDate (D:20120606102000) /Title (R Graphics Output) /Producer (R 2.15.0) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 10934 /Filter /FlateDecode >> stream xM,9rW{76`O h! i#ƃQ^9"`0or*_U}}׿___oq뷿[~[]}4i\_=I ԿJ:AN+VuwhsD޿4BjHC,tDuJJjIle.zU_z-QU#VE4|޸ u׎h{^uuu^E&:oLPL7g-^d tD*o} {ΫUx՜=AWu<O4 7JBVb_A['5Q?lAS4(tiмEW{^k~# ڷ=EW}hy`S_AAyCAu`{+"Xԝ5N#'?=h?άiSs)8=Nk/wv7pM}`{E%mָ۠XhZ - _ zSy ޱ2N < 3aꢰ U⺂y z y̷ R 4(FVG۸Q5?6Q@x y`kAсUց;5VŌMLwd4° k#Ucxe⪽nb,ħ/ k^Wp `GH ̸ ~g1f" k  >א1eUkA#6@|~|- k acR ΉKl'g޳AV2! Mt4Ž8ֿO ~"?)#~VQ̾q5NXk;Da[PAO-LJ8+~2~aOsVwlQXcwXɬuᚂE5(1# OIkq~=Ž Kxٻk\c:(1=(zZkX1(ⲗ k(,~/!Rܻ Aq֒µ4{ֆ5.G%AaAIJj~gC6qCE8}wumAgm{#+^k\bָIHq~"~GMmW=(n;1~rՎak'VD1'W]jX>'(qw Aakf|gl=Ŭݎw!d5QaϊeoCvVa{iW%nς'?_ kJ}Qq;;7q܊wXI9w Qx2xֈw%(fXhEAqVuhxE38^:㟦0 ,8Ӧt΀5.{ k<]s"(ӵ}bu-ֈy_"ys"7g/z fUƼ9S~0(ZIƷaǻs7H9tx<#UaB[)`YZҢXo$&m <*g40C 0zq1 4Pܿfl8:Ƹ8vOX)v7ƄTA0U>Up8v471 6׻y?n!x[.ƔGO{|4zRȐOg<W q=~= 07y~LYi<2o?27_yMpEsDn6y}@-e!mƮi>|Lt]( M~`8NOecheFٞ8Ln96*Õy8vkBrQ Nٛq_|_FJbᇏFx5jfza, Nh挼W^ṉd+*M:8sOSVx6n.:qO&'Wٓ(YWq4'ؑwʾ)24E;C?5vANj 0B@1FxX<9)\өF,IoXNLg)H24o<}|hL,AppnA uY{g;?|5/2vcY_9i'ۘ3˭q~`=^ML0P/1q0_̀^η1*1K`\84Cz-tvعl܍h UvwyTg۠۸Ev%3&޷}۬cHmjљbܷH޽A9clm6]Mfx)1*6аU;X#wٿn 4 бȚtC6n|Cb+,do +Z_n}n}7_-+H,,`fǝ̯91^;3,:\jWq,rN TKE"׽H$l‡a^٭G2j&x4g@JWi sۏ[im݃xn[w`rJOy]i rs9Vy`Wg|s:l̬|5I|0CrA#?s9_Oax5/D72.*\L/F 3ײb: h 9_fF}~$ǹQ %zTeLFxȸNcc:Vrŷ}`\}0C| 0 j:$2e*Y=撵"dM\rxى1uGnB(9.̧D{sBcgS\MFrH2iE3j<9E1y=yz9#qL 'N'qBN5b x/8?styU⁍֣Ž =ŝ a6پX<7i6n7iJq1gN6u:(]'ekuT!4J\ٔ.eX#SX0r^⩲pPblˈSi|18A-iq;b:rW?ǝydTx ^Ʈf\oݤ9 Yʫb,"kD^dTT5*ӺpvV.6֩ t^hJ5Y!ѥRfllJuWJu88G쪲ƹEwMuuTi]zDU$apiWB eέeŘ~Y.7Pl6ʜ1<؜_(sViq=6ʜtOwe/92.XXz>PBٜ_(sO(1E/9PBj'J)B eis~a_E(Ќ2ʜB xl/9PB€Â'e/9*xV e/9uC6gƱUe/9%'x`[hsN9 gsb/8g+9\ϳE䅣|z9'c|~;0x`8܏!fmxCP{@m9 c*&|YӜmΉ~z;P1 mΉ6gLsNlgJsNvL>>pp=c~ a4>p܏!̚TsL^?p?p<~Gs(CiΉ1?p?Ҝ[y~s'9\t&ħ9ÜW0Up|9a()~iΫ}j9#\>p5̥e%-$.Qʌꍔƕ#'VJ=RB/3^FTO\nJicIjH=phʜ1*BFTDe(a OWNVG$#*RVV_[MiCtř]JȈL\0̢!8yMi ̫vpDT.0Q*h1PJ0""QlB 2Dp%kC$Z95JIhLJIs#<%WjŐ{; 'Ir;5o3_k/#* #$$V! $$RLjhfS#2"+e@ZtrphTSܚHIdGhv Z95s A &s1qȖEsp%2筄TD̈8qוBN5[C`6HRNbRIs_)6QrNYh Ry=$%˦HIW:oY'q^$+<9y+;Ji'q\$+ŝD*٥^wNRIn|ēخx"O"MRIW =l9 PROuKMqzKI53[I|=o'STObRYH'-1:u#+eķTBObRYvOO򛏆HORIs_iQ)dDE3OIi96iJ|s9(RRt@ KK֡KYJ|KK-xT~^)/rzK_ʓr0ruĴ"LSWL֕Ҝ-E(E9[@&)\4R32aY9H6%ZzԔpX-iI=@ S䛷٥"RJvZR&) +sH,)r,&CSA&s^Vr6RH,*GR=$@NiPY@1Bro%*+4goL$De An9;ؖV5ŠWrT2N-]EL)RY񖤖IZRJ'QjI)TJT5FyY$$%LҦCrMSk*N)Ӛc. TkPeu1|jU?`KcYĢn|R1zg4, f|!=S'JU+XYL}uC[퓼V5A{Y(]++ykeC [gQV"md?nwmk]+XUҵZkKFKN-Am҂^HIK~HiY͈_kQLKN,Au,$K$6//I-xvk<S(D~jQmHZs}EZ}%|J[[k~sĵe?|k/@Z,[dؒORo)SX{k<ڒy=)a$%?c?Wj[r BzZZį5k)nL0~Jsj?7ZuK1oEZ'?/E?A[/{*joGA[CKh=~t|4B۲?O^jO񸔸n_'.j {ɷyjݢ>IxqZ8Jg('c-e:̎_Hxx_IkW|_sU{Ϩ|r2~W[ H@L3Uy[Nz'/#!i^׳ߚo}0[fR3Q[3_MߟLQ~kV~"3֫om4 )K[9HQjAߛ>mFZ߸BaTڔVoꔃc^jϊc.<yl)E o5WJ*|`JJµnSjaT~I)XMT% >ʧyP2|hJ!jՇl wWy=뙙Xg~}X;|6/-rJjBoWX?|uꉷCAvj:).wJp"[Zߺ֭ļlU,:)1)^:?pM1'[1K91?uޮ"夆x*1dcJu>E+`Ӎ6sYI1_ 1OuK糩,gݼux! 2O=kyP!sIe1gk>ٚ\=]\@#WcOrdP!,]@|X/I2 dE V5CIT/Y2ddX8Ѳ(C뫔5gkm2 @|:Z+5e֦ɯֵDD[$5U,I*iK2>jP3QMCuF'y!Vf=C\kv-WvXzeyI\V,]KYشGKZ$ZfY62TH̰T*z(6eXަ꿕+Y^߻6m/osSI}PI$ ՟71 nj$c&?t%BfuTIY93_қ39>Ńjo_iA36/UK=lM3y=}GT%7߲foR̚e*z+e3Y'fs.TEfn$俵'}3kIySD gn_Sow,3*W=>irN~]dp!CVXw~[>~xi?^1_~]ܳ~ﯟ~/ GVL"!?*\u7,~jV!>EvFc֖#%3a~㏮죅?~D~y,X^f_` ϗMJzlz.]N.ztMx^#%.G,.o }_vg8ߍ$=<>[gQ{pֶ+ʣLJk}?_endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 360] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000164 00000 n 0000011300 00000 n 0000011383 00000 n 0000011506 00000 n 0000011539 00000 n 0000000213 00000 n 0000000293 00000 n 0000014234 00000 n 0000014328 00000 n 0000014425 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 14503 %%EOF ordinal/NEWS0000644000176200001440000002447513574236527012433 0ustar liggesusersThis file documents updates and changes in package ordinal since version 2010.03-04 March 04 2010: - First version of the package is created. 2010-04-06: - removing class "clm.fit" from results of finalizeRho. - moving offset computations from logLik and gradient funtions to newRho function. - Bug fixed in grad.lambda - checks and warning messages added to profile.clm - a warning is now given if the profile fits do not converge - profile.clm has grown the argument 'stepWarn', which gives a warning if the no. profile steps in each direction (up or down) is less than stepWarn (default 8), which indicates that the profile is unreliable. - Bug in loglog-link for clmm fits fixed. - Missing values are handled better in clm and clmm. - clmm has grown an argument 'sdFixed' which assigns a fixed value of the standard deviation of the random effects. Optimization is performed with respect to the remaining parameters. - profile.clmm, confint.profile.clmm and plot.profile.clmm are now available. Profiling is restricted to the standard deviation parameter of the random effects. - control.clm and control.clmm now handles the control parameters. 2010-05-06: - allowing the formulas to be constructed outside clm and clmm (the formulas are evaluated in the parent frame before the variable names are extracted) 2010-05-17: - Better evaluation in case of non-standard formula usage allowing e.g. clm(data$y ~ data$x). - Better handling of ill-defined variance-covariance matrix of the parameters in summary methods for clm and clmm objects. 2010-06-12: - Standard Gauss-Hermite quadrature is now available via the nAGQ argument to clmm. - Core functions implemented in C for speed. This includes all link functions, update of the conditional modes of the random effects, adaptive Gauss-Hermite quadrature and standard, i.e. non-adaptive Gauss-Hermite quadrature. Select R or C implementation via the argument doFit to clmm. - Bug in random effects estimates and their conditional modes corrected. 2010-07-06: - Bug in predict when 'newdata' was supplied is now corrected. 2010-07-23: - Better descriptions of random effect estimates and fitted values in the clmm help page. 2010-10-22: - Updated help page for predict.clm/clmm. 2010-12-13: - Bug in predict.clm corrected for models with nominal effects and newdata supplied (thanks to Simon Blomberg for the bug report). 2011-04-21: - Better message from summary.clmm when Hess = FALSE - endpoint thresholds are now closer to infinity. This is due to a bug report from Ioannis Kosmidis (March 30, 2011); the model estimates weren't right with very large scale effects. Tests are added to testCLM.R - gradTol in clm.control now defaults to 1e-5 rather than 1e-4. convTol is retained at 1e-4, so we are asking for closer convergence than we require. - getGnll no longer returns Inf if !all(pr > 0) - link utility functions are moved from clm.R to linkUtils.R - extensive testing for NaN-specials in C-code for the link functions is added. - details section added to clmm.control.Rd with comment about using "central" gradients with the ucminf optimizer. - examples updated in confint.Rd 2012-01-19: - Changed evaluation of formula in clm to make clm more forgiving for evaluation inside other functions. 2012-05-09: - Updated evaluation of formula in clmm, cf. resent update of clm. 2012-05-22: - Better evaluation of fitted probabilities. This should reduce the occurance of the "sqrt(phi2) : NaNs produced" error message. - Improved evaluation of control parameters in clmm using the new function getCtrlArgs. - Better warning if intercept is attempted removed in clmm. 2012-05-23: - Adding useMatrix argument to clmm.control - Using getFitted in clm - Implementing getFittedC in C and updating C code for fit.clmm.ssr with better and faster evaluation of fitted values - Introduction of links.h, links.c and get_fitted.c in /src 2012-05-29: - Correcting formula interpretation in clm to allow for really long formulas. - Better evaluation of control arguments in clmm (adjustment of getCtrlAgs). - Adding clmm.control.R to ./test 2012-09-10: - Computing Newton step in clm with solve() rather than .Call("La_dgesv", ...) to accomodate changes in R base. 2012-09-11: - Using globalVariables() conditional on getRversion() >= '2.15.1'. 2013-03-20: - Adding symmetric2 threshold function, which restricts the latent mean in the reference group to zero. This means that the central threshold (ylev even) is zero or that the two central thresholds are equal apart from their sign (ylev uneven). 2013-04-08: - Allowing zero weights in clm unless there are no observations with a positive weight in one or more response categories. 2013-04-11: - clm now computes Theta and alpha.mat tables of thresholds and threshold-parameters if nominal effects are specified. 2013-04-17: - anova.clm and anova.clmm now tests for illegal arguments 'test' and 'type' (wish from Ben Bolker and Jonathan Dushoff) - introducing convergence code 3 in clm: Thresholds are not increasing, which can happen with nominal effects. 2013-06-21: - Allowing zero weights in clm even if an entire response category is zeroed out. 2013-07-23: - Newton-Raphson fitting algorithm for CLMs has been redesigned: clm.fit.env is now deprecated (and removed from the code base) and all fitting of CLMs take place in a new version of clm.fit.NR - Convergence assessment has been improved with a new set of convergence codes and new message handling. - clm.control has gained several arguments to accommodate this. - in clm the new function conv.check assess convergence and compute the variance-covariance matrix of the parameters. Thus vcov is always part of a clm object. - vcov.clm has been redesigned and can now compute the variance-covariance matrix with Cholesky, SVD, EIGEN and QR methods or just grap it from the clm object (default). - nominal_test and scale_test functions added: they add all terms in a model to nominal and scale formulae respectively and perform likelihood ratio tests. These functions can be helpful in model development and model/GOF testing, e.g. of non-proportional odds. - Lazy-loading of data enabled. - MASS moved from Depends to Imports. - In clm man-page the 'value' list is ordered alphabetically as are the elements in a clm object. - clmm now computes the variance-covariance matrix with the Cholesky decomposition. - makeThresholds now take ylevels rather than y as argument. - clm.control and clmm.control are moved to control.R - drop.cols has gained argument drop.scale which controls whether columns in the scale design matrix are droped if they are linearly dependent of columns in the nominal design matrix. This was previously implicitly TRUE but is now FALSE to allow fits of certain models. - The list of control arguments are now storred as part of the clm output. - weights, offset and S.offset can now be missing or NULL in clm.fit. - predict.clm now allows type="eta". 2013-08-22: - Exporting S3 print method for convergence.clm objects. 2013-08-23: - Fixing an issue in the Hessian computation for boundary fits with useMatrix=FALSE and a single scalar random-effects term. - Allowing control parameters to be passed on to nlminb (when it is used). A bug was fixed in getCtrlArgs and clmm.control now includes method="nlminb". - Adding test for no. random effects >= no. observations for each r.e. term. 2013-08-25 - changing default optimizer from ucminf to nlminb - adding grad.ctr4 to the list of gradient functions - explicitly computing the number of objective function evaluations rather than relying on the optimizer's count. - wrapping calls to optimizers in try() to catch errors that occur here - adding test for non-finite parameter values in *.ssr objective functions. - adding list of control parameters to list of clmm output. - refining test of no. random effects > no. observations. - removing ucminf control settings from clmm.control when fitting with nlminb. - fixing bug with C version of NRalgv3 (conditional mode update): Hessian (D) values are now initialized to 1 rather than 0. 2013-08-26: - registrering global variables. - removing use of ':::'. 2013-08-27: - documenting list of control parameters in clmm objects. 2013-09-27: - no longer importing numDeriv as we now use our own gradient and hessian functions - moving Matrix package from Depends to Imports 2013-10-01: - Updating convergence checking in clm.fit and simple_clm to the clm standard - Removing distinction between (non-S3/4) sclm, eclm and clm model classes 2013-10-31: - Now having 'methods' in depends since this is needed to run clmm. This was most likely only a problem when using Rscript where the methods package is not loaded by default. 2014-11-12: - Reimplementation of formula, model.frame and design matrix processing motivated by a bug in model.matrix.clm and predict.clm reported by Russell Lenth 2014-11-07 when implementing lsmeans support for clm::ordinal. 2014-11-14: - Fixing bug in convergence checking (conv.check) and added test to /tests/test.clm.convergence.R - Improved the efficiency (i.e. speed) in the evaluation of standard errors for predictions using predict.clm (based on feature request by Thomas Jagger). 2015-01-21: - Updating Citation information per CRAN request. 2015-06-28: - Updating maintainer email address 2016-12-12: - Fixing a couple of errors in CLM tutorial vignette - Correcting description of threshold argument to clmm - qgumbel did not respect it's lower.tail argument (thanks to John Fox for reporting) - Test for no. random effects less than the no. observations now gives a warning instead of an error and is now manageable via clmm.control. 2018-04-19: - Fixed insufficient protect in get_fitted - Registration of native routines (C-code) - Reduce exec time for clmm examples - change rBind -> rbind 2018-08-25: - Added sign.location and sign.nominal to clm.control() - Implemented type I, II and III type ANODE tables for clm fits - Implemented flexible link functions for clm()s - Added new article submitted to JSS as vignette and moved old vignettes to GitHub 2019-03-09: - Massage tests to check out on R-devel 2019-04-25: - Change in formula evaluation in base R prompts this update - very kindly fixed by Martin Maechler (R core) in PR#18 2019-12-11: - Get rid of S3 class checks with class() - now using inherits() instead. ordinal/R/0000755000176200001440000000000013575515131012111 5ustar liggesusersordinal/R/contrast_utils.R0000644000176200001440000002571613332052252015313 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# # contrast-utils.R - utility functions for contrasts, terms and anova # -------- Contents: -------- # # containment # term_contain # relatives # doolittle # ensure_full_rank # get_rdX # extract_contrasts_type3 ############################################## ######## containment() ############################################## containment <- function(object) { # lm or merMod # For all terms 'T' in object compute the terms # Return a list: # for each term 'T' a vector of terms that contain 'T'. terms <- terms(object) data_classes <- attr(terms(object), "dataClasses") # Note: need fixed.only for merMod objects to get dataClasses term_names <- attr(terms, "term.labels") factor_mat <- attr(terms, "factors") lapply(setNames(term_names, term_names), function(term) { term_names[term_contain(term, factor_mat, data_classes, term_names)] }) } ############################################## ######## term_contain() ############################################## #' Determine which Terms Contain a Term #' #' The definition of \emph{containment} follows from the SAS documentation on #' "The Four Types of Estimable Functions". #' #' Containment is defined for two model terms, say, F1 and F2 as: #' F1 is contained in F2 (F2 contains F1) if #' \enumerate{ #' \item F1 and F2 involve the same continuous variables (if any) #' \item F2 involve more factors than F1 #' \item All factors in F1 (if any) are part of F2 #' } #' The intercept, though not really a model term, is defined by SAS to be #' contained in all factor terms, but it is not contained in any #' effect involving a continuous variable. #' #' @param term character; name of a model term and one of \code{term_names}. #' @param factors the result of \code{attr(terms_object, "factors")}. #' @param dataClasses the result of #' \code{attr(terms(model, fixed.only=FALSE), "dataClasses")}. Note that #' \code{fixed.only=FALSE} is only needed for \code{merMod} objects, but does #' no harm for \code{lm} objects. #' @param term_names the result of \code{attr(terms_object, "term.labels")}. #' #' @return a logical vector indicating for each term in \code{term_names} if #' it contains \code{term}. #' @importFrom stats setNames #' @keywords internal term_contain <- function(term, factors, dataClasses, term_names) { get_vars <- function(term) # Extract vector of names of all variables in a term rownames(factors)[factors[, term] == 1] contain <- function(F1, F2) { # Returns TRUE if F1 is contained in F2 (i.e. if F2 contains F1) # F1, F2: Names of terms, i.e. attr(terms_object, "term.labels") all(vars[[F1]] %in% vars[[F2]]) && # all variables in F1 are also in F2 length(setdiff(vars[[F2]], vars[[F1]])) > 0L && # F2 involve more variables than F1 setequal(numerics[[F1]], numerics[[F2]]) # F1 and F2 involve the same covariates (if any) } # Get (named) list of all variables in terms: vars <- lapply(setNames(term_names, term_names), get_vars) # Get (named) list of all _numeric_ variables in all terms: numerics <- lapply(vars, function(varnms) varnms[which(dataClasses[varnms] == "numeric")]) # Check if 'term' is contained in each model term: sapply(term_names, function(term_nm) contain(term, term_nm)) } ############################################## ######## doolittle() ############################################## #' Doolittle Decomposition #' #' @param x a numeric square matrix with at least 2 columns/rows. #' @param eps numerical tolerance on the whether to normalize with components #' in \code{L} with the diagonal elements of \code{U}. #' #' @return a list with two matrices of the same dimension as \code{x}: #' \item{L}{lower-left unit-triangular matrix} #' \item{U}{upper-right triangular matrix (\emph{not} unit-triangular)} #' #' @keywords internal doolittle <- function(x, eps = 1e-6) { if(!is.matrix(x) || ncol(x) != nrow(x) || !is.numeric(x)) stop("argument 'x' should be a numeric square matrix") stopifnot(ncol(x) > 1L) n <- nrow(x) L <- U <- matrix(0, nrow=n, ncol=n) diag(L) <- rep(1, n) for(i in 1:n) { ip1 <- i + 1 im1 <- i - 1 for(j in 1:n) { U[i,j] <- x[i,j] if (im1 > 0) { for(k in 1:im1) { U[i,j] <- U[i,j] - L[i,k] * U[k,j] } } } if ( ip1 <= n ) { for ( j in ip1:n ) { L[j,i] <- x[j,i] if ( im1 > 0 ) { for ( k in 1:im1 ) { L[j,i] <- L[j,i] - L[j,k] * U[k,i] } } L[j, i] <- if(abs(U[i, i]) < eps) 0 else L[j,i] / U[i,i] } } } L[abs(L) < eps] <- 0 U[abs(U) < eps] <- 0 list( L=L, U=U ) } ############################################## ######## ensure_full_rank() ############################################## #' Ensure a Design Matrix has Full (Column) Rank #' #' Determine and drop redundant columns using the \code{\link{qr}} #' decomposition. #' #' @param X a design matrix as produced by \code{model.matrix}. #' @param tol \code{qr} tolerance. #' @param silent throw message if columns are dropped from \code{X}? Default #' is \code{FALSE}. #' @param test.ans Test if the resulting/returned matrix has full rank? Default #' is \code{FALSE}. #' #' @return A design matrix in which redundant columns are dropped #' @keywords internal ensure_full_rank <- function(X, tol = 1e-7, silent = FALSE, test.ans = FALSE) { ### works if ncol(X) >= 0 and nrow(X) >= 0 ## test and match arguments: stopifnot(is.matrix(X)) silent <- as.logical(silent)[1] ## perform the qr-decomposition of X using LINPACK methods: qr.X <- qr(X, tol = tol, LAPACK = FALSE) if(qr.X$rank == ncol(X)) { ## return X if X has full column rank return(X) } if(!silent) ## message the no. dropped columns: message(gettextf("Design is column rank deficient so dropping %d coef", ncol(X) - qr.X$rank)) ## return the columns correponding to the first qr.x$rank pivot ## elements of X: keep <- with(qr.X, pivot[seq_len(rank)]) newX <- X[, keep, drop = FALSE] sel <- with(qr.X, pivot[-seq_len(rank)]) ## Copy old attributes: if(!is.null(contr <- attr(X, "contrasts"))) attr(newX, "contrasts") <- contr if(!is.null(asgn <- attr(X, "assign"))) attr(newX, "assign") <- asgn[-sel] ## did we succeed? stop-if-not: if(test.ans && qr.X$rank != qr(newX)$rank) stop(gettextf("Determination of full column rank design matrix failed"), call. = FALSE) return(newX) } ############################################## ######## get_rdX() ############################################## #' Compute the 'Full' Rank-Deficient Design Matrix #' #' #' @param model a model object; lmerMod or lmerModLmerTest. #' @param do.warn throw a message if there is no data for some factor #' combinations. #' #' @return the rank-deficien design matrix #' @author Rune Haubo B. Christensen #' @keywords internal #' #' @importFrom stats as.formula model.frame terms model.matrix get_rdX <- function(model, do.warn=TRUE) { # Compute rank-deficient design-matrix X usign contr.treatment coding. # # model: terms(model), model.frame(model), fixef(model) Terms <- terms(model, fixed.only=TRUE) term_names <- attr(Terms, "term.labels") df <- model.frame(model) # Compute rank-deficient (full) design-matrix, X: rdXi <- if(length(term_names)) lapply(term_names, function(trm) { form <- as.formula(paste0("~ 0 + ", trm)) model.matrix(form, data=df) # no contrast arg }) else list(model.matrix(~ 1, data=df)[, -1, drop=FALSE]) rdX <- do.call(cbind, rdXi) param_names <- unlist(lapply(rdXi, colnames)) # Potentially add intercept: has_intercept <- attr(Terms, "intercept") != 0 if(has_intercept) { rdX <- cbind('(Intercept)'=rep(1, nrow(rdX)), rdX) param_names <- c("(Intercept)", param_names) } colnames(rdX) <- param_names # Warn/message if there are cells without data: is_zero <- which(colSums(rdX) == 0) if(do.warn && length(is_zero)) { txt <- sprintf("Missing cells for: %s. ", paste(param_names[is_zero], collapse = ", ")) # warning(paste(txt, "\nInterpret type III hypotheses with care."), call.=FALSE) message(paste(txt, "\nInterpret type III hypotheses with care.")) } rdX } ############################################## ######## extract_contrasts_type3 ############################################## #' @importFrom MASS ginv #' @importFrom stats terms resid lm.fit extract_contrasts_type3 <- function(model, X=NULL) { # Computes contrasts for type III tests with reference to treatment contrast coding # X: Optional full rank design matrix in contr.treatment coding Terms <- terms(model) term_names <- attr(Terms, "term.labels") if(is.null(X)) { X <- get_model_matrix(model, type="remake", contrasts="contr.treatment") X <- ensure_full_rank(X) } # Get 'complete' design matrix: rdX <- get_rdX(model, do.warn = TRUE) # treatment contrasts # cols for aliased coefs should be removed in X; not in rdX. # This makes ginv(X) unique! L <- zapsmall(t(MASS::ginv(X) %*% rdX)) # basic contrast matrix dimnames(L) <- list(colnames(rdX), colnames(X)) # Orthogonalize contrasts for terms which are contained in other terms: map <- term2colX(Terms, X) is_contained <- containment(model) # Orthogonalize higher order terms before lower order terms: terms_order <- attr(Terms, "order") orthog_order <- term_names[order(terms_order, decreasing = TRUE)] # Only orthogonalize terms with columns in X: keep <- names(which(sapply(map[orthog_order], length) > 0)) for(term in orthog_order[keep]) { # if term is contained in other terms: if(length(contains <- is_contained[[term]]) > 0) { # orthogonalize cols in L for 'term' wrt. cols that contain 'term': L[, map[[term]]] <- zapsmall(resid(lm.fit(x=L[, unlist(map[contains]), drop=FALSE], y=L[, map[[term]], drop=FALSE]))) } } # Keep rows in L corresponding to model coefficients: L <- L[colnames(X), , drop=FALSE] # Extract list of contrast matrices from L - one for each term: Llist <- lapply(map[term_names], function(term) t(L[, term, drop=FALSE])) # Keep all non-zero rows: lapply(Llist, function(L) L[rowSums(abs(L)) > 1e-8, , drop=FALSE]) } ordinal/R/clm.simple.R0000644000176200001440000001321313332052252014266 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## A implementation of simple CLMs (simple_clm), i.e., CLMs without ## scale and nominal effects. simple_clm <- function(formula, data, weights, start, subset, offset, doFit = TRUE, na.action, contrasts, model = TRUE, control = list(), link = c("logit", "probit", "cloglog", "loglog"), threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) { ## Initial argument matching and testing: mc <- match.call(expand.dots = FALSE) link <- match.arg(link) threshold <- match.arg(threshold) ## check for presence of formula: if(missing(formula)) stop("Model needs a formula") if(missing(contrasts)) contrasts <- NULL ## set control parameters: control <- do.call(clm.control, c(control, list(...))) ## Compute: y, X, wts, off, mf: if (missing(data)) data <- environment(formula) mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) ## Return model.frame? if(control$method == "model.frame") return(mf) y <- model.response(mf, "any") ## any storage mode if(!is.factor(y)) stop("response needs to be a factor", call.=FALSE) ## design matrix: mt <- attr(mf, "terms") X <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else cbind("(Intercept)" = rep(1, NROW(y))) ## Test for intercept in X: Xint <- match("(Intercept)", colnames(X), nomatch = 0) if(Xint <= 0) { X <- cbind("(Intercept)" = rep(1, NROW(y)), X) warning("an intercept is needed and assumed in 'formula'", call.=FALSE) } ## intercept in X is guaranteed. wts <- getWeights(mf) off <- getOffsetStd(mf) ylevels <- levels(droplevels(y[wts > 0])) frames <- list(y=y, ylevels=ylevels, X=X) ## Compute the transpose of the Jacobian for the threshold function, ## tJac and the names of the threshold parameters, alpha.names: frames <- c(frames, makeThresholds(ylevels, threshold)) ## test for column rank deficiency in design matrices: frames <- drop.cols(frames, silent=TRUE) ## Set envir rho with variables: B1, B2, o1, o2, wts, fitted: rho <- clm.newRho(parent.frame(), y=frames$y, X=frames$X, NOM=NULL, S=NULL, weights=wts, offset=off, S.offset=NULL, tJac=frames$tJac, control=control) ## Set starting values for the parameters: start <- set.start(rho, start=start, get.start=missing(start), threshold=threshold, link=link, frames=frames) rho$par <- as.vector(start) ## remove attributes ## Set pfun, dfun and gfun in rho: setLinks(rho, link) ## Possibly return the environment rho without fitting: if(!doFit) return(rho) ## Fit the clm: if(control$method == "Newton") fit <- clm.fit.NR(rho, control) else fit <- clm.fit.optim(rho, control$method, control$ctrl) ### NOTE: we could add arg non.conv = c("error", "warn", "message") to ### allow non-converged fits to be returned. ## Modify and return results: res <- clm.finalize(fit, weights=wts, coef.names=frames$coef.names, aliased=frames$aliased) res$control <- control res$link <- link res$start <- start if(control$method == "Newton" && !is.null(start.iter <- attr(start, "start.iter"))) res$niter <- res$niter + start.iter res$threshold <- threshold res$call <- match.call() res$contrasts <- attr(frames$X, "contrasts") res$na.action <- attr(mf, "na.action") res$terms <- mt res$xlevels <- .getXlevels(mt, mf) res$tJac <- frames$tJac res$y.levels <- frames$ylevels ## Check convergence: conv <- conv.check(res, Theta.ok=TRUE, tol=control$tol) print.conv.check(conv, action=control$convergence) ## print convergence message res$vcov <- conv$vcov res$cond.H <- conv$cond.H res$convergence <- conv[!names(conv) %in% c("vcov", "cond.H")] res$info <- with(res, { data.frame("link" = link, "threshold" = threshold, "nobs" = nobs, "logLik" = formatC(logLik, digits=2, format="f"), "AIC" = formatC(-2*logLik + 2*edf, digits=2, format="f"), "niter" = paste(niter[1], "(", niter[2], ")", sep=""), ### NOTE: iterations to get starting values for scale models *are* ### included here. "max.grad" = formatC(maxGradient, digits=2, format="e") ## BIC is not part of output since it is not clear what ## the no. observations are. ) }) class(res) <- "clm" ## add model.frame to results list? if(model) res$model <- mf return(res) } ordinal/R/lgamma.R0000644000176200001440000000513613332052252013466 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## [pdg]lgamma functions for the log-gamma distribution [lgamma]. ## Here glgamma is the gradient of the density function, dlgamma. ## The log-gamma distribution is ## used as a flexible link function in clm2() and clmm2(). plgamma <- function(q, lambda, lower.tail = TRUE) .C("plgamma_C", q = as.double(q), length(q), as.double(lambda[1]), as.integer(lower.tail[1]), NAOK = TRUE)$q plgammaR <- function(eta, lambda, lower.tail = TRUE) { q <- lambda v <- q^(-2) * exp(q * eta) if(q < 0) p <- 1 - pgamma(v, q^(-2)) if(q > 0) p <- pgamma(v, q^(-2)) if(isTRUE(all.equal(0, q, tolerance = 1e-6))) p <- pnorm(eta) if(!lower.tail) 1 - p else p } dlgamma <- function(x, lambda, log = FALSE) { stopifnot(length(lambda) == 1 && length(log) == 1) .C("dlgamma_C", x = as.double(x), length(x), as.double(lambda), as.integer(log), NAOK = TRUE)$x } dlgammaR <- function(x, lambda, log = FALSE) { q <- lambda q.2 <- q^(-2) qx <- q * x log.d <- log(abs(q)) + q.2 * log(q.2) - lgamma(q.2) + q.2 * (qx - exp(qx)) if (!log) exp(log.d) else log.d } glgamma <- function(x, lambda) { stopifnot(length(lambda) == 1) .C("glgamma_C", x = as.double(x), length(x), as.double(lambda[1]), NAOK = TRUE)$x } glgammaR <- function(x, lambda) { stopifnot(length(lambda) == 1) (1 - exp(lambda * x))/lambda * dlgamma(x, lambda) } glgammaR2 <- function(x, lambda) { stopifnot(length(lambda == 1)) if(lambda == 0) return(gnorm(x)) y <- dlgamma(x, lambda) y[!is.na(y) && y > 0] <- y * (1 - exp(lambda * x)) return(y) } ordinal/R/clm.R0000644000176200001440000001367013332052252013005 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## The main clm function and some auxiliary functions to generate the ## model frame and handle the model environment. checkArgs.clm <- function(mc) { nm <- names(as.list(mc)) if(!"formula" %in% nm) stop("Model needs a formula", call.=FALSE) if("offset" %in% nm) stop("offset argument not allowed: ", "specify 'offset' in formula or scale arguments instead") invisible() } clm <- function(formula, scale, nominal, data, weights, start, subset, doFit = TRUE, na.action, contrasts, model = TRUE, control = list(), link = c("logit", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) { mc <- match.call(expand.dots = FALSE) link <- match.arg(link) threshold <- match.arg(threshold) if(missing(contrasts)) contrasts <- NULL if(missing(start)) start <- NULL checkArgs.clm(mc=match.call()) ## set control parameters: ## getControl.clm control <- do.call(clm.control, c(control, list(...))) ## Extract and process formulas: call.envir <- parent.frame(n=1) formulas <- get_clmFormulas(mc, call.envir) ## Get full model.frame and terms.objects: fullmf <- get_clm.mf(mc, formulas$fullForm, attr(formulas, "envir"), call.envir) if(control$method == "model.frame") return(fullmf) terms.list <- if(any(c("scale", "nominal") %in% names(formulas))) get_clmTerms(mc, formulas, call.envir) else list(formula=terms(fullmf)) ## Get y, X, weights, off etc.: design <- get_clmDesign(fullmf, terms.list, contrasts) lst <- namedList(doFit, control, link, threshold, start, formulas) if(control$method == "design") return(c(design, lst)) ## Get clm.struct: design <- c(design, makeThresholds(design$y.levels, threshold)) design <- drop.cols(design, silent=TRUE, drop.scale=FALSE) clm.struct <- c(design, lst) ## Fit model, check convergence, or return a model environment: fit <- clm.fit.default(clm.struct) if(doFit == FALSE) return(fit) ## Format output, prepare result: keep <- c("terms", "contrasts", "xlevels", # formula "S.terms", "S.contrasts", "S.xlevels", # scale "nom.terms", "nom.contrasts", "nom.xlevels", # nominal "na.action", "y", "y.levels", "control", "link", "threshold", "start", "formulas") res <- c(fit, clm.struct[match(keep, names(clm.struct), 0L)], list(formula=lst$formulas$formula, call=match.call())) ## res$tJac <- format_tJac(res$tJac, res$y.levels, clm.struct$alpha.names) res$info=get_clmInfoTab(res) if(model) res$model <- fullmf res <- res[sort(names(res))] class(res) <- "clm" res } clm.newRho <- function(parent=parent.frame(), y, X, NOM=NULL, S=NULL, weights, offset, S.offset=NULL, tJac, control=clm.control(), ...) ### Setting variables in rho: B1, B2, o1, o2, weights. { ## Make B1, B2, o1, o2 based on y, X and tJac: keep <- weights > 0 y[!keep] <- NA y <- droplevels(y) ntheta <- nlevels(y) - 1 y <- c(unclass(y)) y[is.na(y)] <- 0 n <- sum(keep) B2 <- 1 * (col(matrix(0, nrow(X), ntheta + 1)) == y) o1 <- c(1e5 * B2[keep, ntheta + 1]) - offset[keep] o2 <- c(-1e5 * B2[keep, 1]) - offset[keep] B1 <- B2[keep, -(ntheta + 1), drop = FALSE] B2 <- B2[keep, -1, drop = FALSE] ## adjust B1 and B2 for structured thresholds: B1 <- B1 %*% tJac B2 <- B2 %*% tJac ## update B1 and B2 with nominal effects: if(NCOL(NOM) > 1) { ## !is.null(NOM) && ncol(NOM) > 1) { ## if !is.null(NOM) and NOM is more than an intercept: if(control$sign.nominal == "negative") NOM[, -1] <- -NOM[, -1] LL1 <- lapply(1:ncol(NOM), function(x) B1 * NOM[keep, x]) B1 <- do.call(cbind, LL1) LL2 <- lapply(1:ncol(NOM), function(x) B2 * NOM[keep, x]) B2 <- do.call(cbind, LL2) } ## update B1 and B2 with location effects (X): nbeta <- NCOL(X) - 1 if(nbeta > 0) { if(control$sign.location == "negative") X <- -X B1 <- cbind(B1, X[keep, -1, drop = FALSE]) B2 <- cbind(B2, X[keep, -1, drop = FALSE]) } dimnames(B1) <- NULL dimnames(B2) <- NULL n.psi <- ncol(B1) ## no. linear model parameters ## there may be scale offset without scale predictors: sigma <- Soff <- if(is.null(S.offset)) rep(1, n) else exp(S.offset[keep]) ## save scale model matrix: k <- 0 if(!is.null(S)) { S <- S[keep, -1, drop=FALSE] dimnames(S) <- NULL k <- ncol(S) ## no. scale parameters } has.scale <- ## TRUE if scale has to be considered. (!is.null(S) || any(S.offset != 0)) ## initialize fitted values and weights: fitted <- numeric(length = n) wts <- weights[keep] lst <- namedList(B1, B2, o1, o2, n.psi, S, Soff, k, sigma, has.scale, fitted, wts, clm.nll, clm.grad, clm.hess) list2env(x=lst, parent=parent) } ordinal/R/gdist.R0000644000176200001440000000412313332052252013335 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Gradients of densities of common distribution functions on the form ## g[dist], where "dist" can be one of "logis", "norm", and ## "cauchy". These functions are used in Newton-Raphson algorithms ## when fitting CLMs and CLMMs in clm(), clm2(), clmm() and ## clmm2(). Similar gradients are implemented for the gumbel, ## log-gamma, and Aranda-Ordaz distributions. glogis <- function(x) ### gradient of dlogis .C("glogis_C", x = as.double(x), length(x), NAOK = TRUE)$x gnorm <- function(x) ### gradient of dnorm(x) wrt. x .C("gnorm_C", x = as.double(x), length(x), NAOK = TRUE)$x gcauchy <- function(x) ### gradient of dcauchy(x) wrt. x .C("gcauchy_C", x = as.double(x), length(x), NAOK = TRUE)$x glogisR <- function(x) { ### glogis in R res <- rep(0, length(x)) isFinite <- !is.infinite(x) x <- x[isFinite] isNegative <- x < 0 q <- exp(-abs(x)) q <- 2*q^2*(1 + q)^-3 - q*(1 + q)^-2 q[isNegative] <- -q[isNegative] res[isFinite] <- q res } gnormR <- function(x) ### gnorm in R -x * dnorm(x) gcauchyR <- function(x) ### gcauchy(x) in R -2*x/pi*(1+x^2)^-2 ordinal/R/utils.R0000644000176200001440000004467213333000633013375 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Various utility functions. setLinks <- function(rho, link) { ### The Aranda-Ordaz and log-gamma links are not supported in this ### version of clm. rho$pfun <- switch(link, logit = plogis, probit = pnorm, cloglog = function(x, lower.tail=TRUE) pgumbel(x, lower.tail=lower.tail, max=FALSE), cauchit = pcauchy, loglog = pgumbel, "Aranda-Ordaz" = function(x, lambda) pAO(x, lambda), "log-gamma" = function(x, lambda) plgamma(x, lambda)) rho$dfun <- switch(link, logit = dlogis, probit = dnorm, cloglog = function(x) dgumbel(x, max=FALSE), cauchit = dcauchy, loglog = dgumbel, "Aranda-Ordaz" = function(x, lambda) dAO(x, lambda), "log-gamma" = function(x, lambda) dlgamma(x, lambda)) rho$gfun <- switch(link, logit = glogis, probit = gnorm, cloglog = function(x) ggumbel(x, max=FALSE), loglog = ggumbel, cauchit = gcauchy, "Aranda-Ordaz" = function(x, lambda) gAO(x, lambda), ## shouldn't happen "log-gamma" = function(x, lambda) glgamma(x, lambda) ) rho$link <- link rho$nlambda <- if(rho$link %in% c("Aranda-Ordaz", "log-gamma")) 1 else 0 if(rho$link == "Aranda-Ordaz") rho$lambda <- 1 if(rho$link == "log-gamma") rho$lambda <- 0.1 } makeThresholds <- function(y.levels, threshold) { ## , tJac) { ### Generate the threshold structure summarized in the transpose of ### the Jacobian matrix, tJac. Also generating nalpha and alpha.names. ### args: ### y - response variable, a factor ### threshold - one of "flexible", "symmetric" or "equidistant" ## stopifnot(is.factor(y)) lev <- y.levels ntheta <- length(lev) - 1 ## if(!is.null(tJac)) { ## stopifnot(nrow(tJac) == ntheta) ## nalpha <- ncol(tJac) ## alpha.names <- colnames(tJac) ## if(is.null(alpha.names) || anyDuplicated(alpha.names)) ## alpha.names <- as.character(1:nalpha) ## dimnames(tJac) <- NULL ## } ## else { ## threshold structure identified by threshold argument: if(threshold == "flexible") { tJac <- diag(ntheta) nalpha <- ntheta alpha.names <- paste(lev[-length(lev)], lev[-1], sep="|") } if(threshold == "symmetric") { if(!ntheta >=2) stop("symmetric thresholds are only meaningful for responses with 3 or more levels", call.=FALSE) if(ntheta %% 2) { ## ntheta is odd nalpha <- (ntheta + 1)/2 ## No. threshold parameters tJac <- t(cbind(diag(-1, nalpha)[nalpha:1, 1:(nalpha-1)], diag(nalpha))) tJac[,1] <- 1 alpha.names <- c("central", paste("spacing.", 1:(nalpha-1), sep="")) } else { ## ntheta is even nalpha <- (ntheta + 2)/2 tJac <- cbind(rep(1:0, each = ntheta / 2), rbind(diag(-1, ntheta / 2)[(ntheta / 2):1,], diag(ntheta / 2))) tJac[,2] <- rep(0:1, each = ntheta / 2) alpha.names <- c("central.1", "central.2") if(nalpha > 2) alpha.names <- c(alpha.names, paste("spacing.", 1:(nalpha-2), sep="")) } } ## Assumes latent mean is zero: if(threshold == "symmetric2") { if(!ntheta >=2) stop("symmetric thresholds are only meaningful for responses with 3 or more levels", call.=FALSE) if(ntheta %% 2) { ## ntheta is odd nalpha <- (ntheta - 1)/2 ## No. threshold parameters tJac <- rbind(apply(-diag(nalpha), 1, rev), rep(0, nalpha), diag(nalpha)) } else { ## ntheta is even nalpha <- ntheta/2 tJac <- rbind(apply(-diag(nalpha), 1, rev), diag(nalpha)) } alpha.names <- paste("spacing.", 1:nalpha, sep="") } if(threshold == "equidistant") { if(!ntheta >=2) stop("equidistant thresholds are only meaningful for responses with 3 or more levels", call.=FALSE) tJac <- cbind(1, 0:(ntheta-1)) nalpha <- 2 alpha.names <- c("threshold.1", "spacing") } ## } return(list(tJac = tJac, nalpha = nalpha, alpha.names = alpha.names)) } getFitted <- function(eta1, eta2, pfun, ...) { ## eta1, eta2: linear predictors ## pfun: cumulative distribution function ## ## Compute fitted values while maintaining high precision in the ## result - if eta1 and eta2 are both large, fitted is the ## difference between two numbers very close to 1, which leads to ## imprecision and potentially errors. ## ## Note that (eta1 > eta2) always holds, hence (eta2 > 0) happens ## relatively rarely. k2 <- eta2 > 0 fitted <- pfun(eta1) - pfun(eta2) fitted[k2] <- pfun(eta2[k2], lower.tail=FALSE) - pfun(eta1[k2], lower.tail=FALSE) fitted } getFittedC <- function(eta1, eta2, link = c("logit", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), lambda=1) ### Same as getFitted only this is implemented in C and handles all ### link functions including the flexible ones. { link <- match.arg(link) .Call("get_fitted", eta1, eta2, link, lambda) } getWeights <- function(mf) { ### mf - model.frame n <- nrow(mf) if(is.null(wts <- model.weights(mf))) wts <- rep(1, n) ## if (any(wts <= 0)) ## stop(gettextf("non-positive weights are not allowed"), ## call.=FALSE) ### NOTE: We do not remove observations where weights == 0, because ### that could be a somewhat surprising behaviour. It would also ### require that the model.frame be evaluated all over again to get ### the right response vector with the right number of levels. if(length(wts) && length(wts) != n) stop(gettextf("number of weights is %d should equal %d (number of observations)", length(wts), n), call.=FALSE) if(any(wts < 0)) stop(gettextf("negative weights are not allowed"), call.=FALSE) ## if(any(wts == 0)) { ## y <- model.response(mf, "any") ## if(any(table(y[wts > 0]) == 0)) ## stop(gettextf("zero positive weights for one or more response categories"), ## call.=FALSE) ## } return(as.double(wts)) } getOffset <- function(mf, terms) { ### mf - model.frame n <- nrow(mf) off <- rep(0, n) if(!is.null(o <- attr(terms, "offset"))) { if(length(o) > 1) stop("only one offset term allowed in each formula", call.=FALSE) varnm <- attr(terms, "variables") ## deparse all variable names - character vector: varnm <- unlist(lapply(as.list(varnm), deparse)[-1]) off <- mf[, varnm[o]] } ## off <- as.vector(mf[, o]) if(length(off) && length(off) != n) stop(gettextf("number of offsets is %d should equal %d (number of observations)", length(off), n), call.=FALSE) return(as.double(off)) } getOffsetStd <- function(mf) { n <- nrow(mf) if(is.null(off <- model.offset(mf))) off <- rep(0, n) if(length(off) && length(off) != n) stop(gettextf("number of offsets is %d should equal %d (number of observations)", length(off), n), call.=FALSE) return(as.double(off)) } getFullForm <- function(form, ..., envir=parent.frame()) { ### collect terms in several formulas in a single formula ### sets the environment of the resulting formula to envir. forms <- list(...) if(lf <- length(forms)) { rhs <- character(0) ## Collect rhs terms in a single vector of rh-sides: for(i in 1:lf) { rhs <- c(rhs, Deparse(forms[[i]][[2]])) if(length(forms[[i]]) >= 3) rhs <- c(rhs, Deparse(forms[[i]][[3]])) } ## add '+' inbetween terms: rhs <- paste(rhs, collapse=" + ") ## combine if 'deparse(form)' is a (long) vector: form2 <- paste(deparse(form, width.cutoff=500L), collapse=" ") ## combine form2 and rhs into a single string: form <- paste(form2, rhs, sep=" + ") } return(as.formula(form, env=envir)) } ## getFullForm <- function(form, ..., envir=parent.frame()) { ## ### collect terms in several formulas in a single formula (on the rhs) ## ### sets the environment of the resulting formula to envir. ## forms <- list(form, ...) ## allVars <- unlist(sapply(forms, all.vars)) ## rhs <- paste(allVars, collapse=" + ") ## form <- paste("~", rhs) ## return(as.formula(form, env=envir)) ## } ## getCtrlArgs <- function(control, extras) { ## ### Recover control arguments from clmm.control and extras (...): ## ### ## ## Collect control arguments in list: ## ctrl.args <- c(extras, control$method, control$useMatrix, ## control$ctrl, control$optCtrl) ## ## Identify the two occurences "trace", delete them, and add trace=1 ## ## or trace=-1 to the list of arguments: ## which.trace <- which(names(ctrl.args) == "trace") ## trace.sum <- sum(unlist(ctrl.args[which.trace])) ## ctrl.args <- ctrl.args[-which.trace] ## ## remove duplicated arguments: ## ctrl.args <- ctrl.args[!duplicated(names(ctrl.args))] ## if(trace.sum >= 1) ctrl.args$trace <- 1 ## if(trace.sum >= 2 || trace.sum <= -1) ctrl.args$trace <- -1 ## ## return the updated list of control parameters: ## do.call("clmm.control", ctrl.args) ## } getCtrlArgs <- function(control, extras) { ### Recover control arguments from clmm.control and extras (...): ### if(!is.list(control)) stop("'control' should be a list") ## Collect control arguments in list: ## 1) assuming 'control' is a call to clmm.control: ctrl.args <- if(setequal(names(control), names(clmm.control()))) c(extras, control["method"], control["useMatrix"], control$ctrl, control$optCtrl) ## assuming 'control' is specified with control=list( 'args'): else c(extras, control) ### NOTE: having c(extras, control) rather than c(control, extras) ### means that extras have precedence over control. ## Identify the two occurences "trace", delete them, and add trace=1 ## or trace=-1 to the list of arguments: which.trace <- which(names(ctrl.args) == "trace") trace.sum <- sum(unlist(ctrl.args[which.trace])) if(trace.sum) ctrl.args <- ctrl.args[-which.trace] ## remove duplicated arguments: ctrl.args <- ctrl.args[!duplicated(names(ctrl.args))] if(trace.sum >= 1) ctrl.args$trace <- 1 if(trace.sum >= 2 || trace.sum <= -1) ctrl.args$trace <- -1 ## return the updated list of control parameters: do.call("clmm.control", ctrl.args) } Trace <- function(iter, stepFactor, val, maxGrad, par, first=FALSE) { t1 <- sprintf(" %3d: %-5e: %.3f: %1.3e: ", iter, stepFactor, val, maxGrad) t2 <- formatC(par) if(first) cat("iter: step factor: Value: max|grad|: Parameters:\n") cat(t1, t2, "\n") } response.name <- function(terms) { vars <- as.character(attr(terms, "variables")) vars[1 + attr(terms, "response")] } getB <- function(y, NOM=NULL, X=NULL, offset=NULL, tJac=NULL) { ### NOTE: Is this function ever used? ### NOTE: no tests that arguments conform. nlev <- nlevels(y) n <- length(y) B2 <- 1 * (col(matrix(0, n, nlev)) == c(unclass(y))) o1 <- c(1e5 * B2[, nlev]) - offset o2 <- c(-1e5 * B2[,1]) - offset B1 <- B2[, -(nlev), drop = FALSE] B2 <- B2[, -1, drop = FALSE] ## adjust B1 and B2 for structured thresholds: if(!is.null(tJac)) { B1 <- B1 %*% tJac B2 <- B2 %*% tJac } ## update B1 and B2 with nominal effects: if(NCOL(NOM) > 1) { ## !is.null(NOM) && ncol(NOM) > 1) { ## if !is.null(NOM) and NOM is more than an intercept: LL1 <- lapply(1:ncol(NOM), function(x) B1 * NOM[,x]) B1 <- do.call(cbind, LL1) LL2 <- lapply(1:ncol(NOM), function(x) B2 * NOM[,x]) B2 <- do.call(cbind, LL2) } ## update B1 and B2 with location effects (X): nbeta <- NCOL(X) - 1 if(NCOL(X) > 1) { B1 <- cbind(B1, -X[, -1, drop = FALSE]) B2 <- cbind(B2, -X[, -1, drop = FALSE]) } dimnames(B1) <- NULL dimnames(B2) <- NULL namedList(B1, B2, o1, o2) } Deparse <- function(expr, width.cutoff = 500L, backtick = mode(expr) %in% c("call", "expression", "(", "function"), control = c("keepInteger", "showAttributes", "keepNA"), nlines = -1L) deparse(expr=expr, width.cutoff= width.cutoff, backtick=backtick, control=control, nlines=nlines) getContrasts <- function(terms, contrasts) { if(is.null(contrasts)) return(NULL) term.labels <- attr(terms, "term.labels") contrasts[names(contrasts) %in% term.labels] } checkContrasts <- function(terms, contrasts) { ### Check that contrasts are not specified for absent factors and warn ### about them term.labels <- attr(terms, "term.labels") nm.contr <- names(contrasts) notkeep <- nm.contr[!nm.contr %in% term.labels] msg <- if(length(notkeep) > 2) "variables '%s' are absent: their contrasts will be ignored" else "variable '%s' is absent: its contrasts will be ignored" if(length(notkeep)) warning(gettextf(msg, paste(notkeep, collapse=", ")), call.=FALSE) invisible() } get_clmInfoTab <- function(object, ...) { names <- c("link", "threshold", "nobs", "logLik", "edf", "niter", "maxGradient", "cond.H") stopifnot(all(names %in% names(object))) info <- with(object, { data.frame("link" = link, "threshold" = threshold, "nobs" = nobs, "logLik" = formatC(logLik, digits=2, format="f"), "AIC" = formatC(-2*logLik + 2*edf, digits=2, format="f"), "niter" = paste(niter[1], "(", niter[2], ")", sep=""), ### NOTE: iterations to get starting values for scale models *are* ### included here. "max.grad" = formatC(maxGradient, digits=2, format="e"), "cond.H" = formatC(cond.H, digits=1, format="e") ## BIC is not part of output since it is not clear what ## the no. observations are. ) }) info } format_tJac <- function(tJac, y.levels, alpha.names) { lev <- y.levels rownames(tJac) <- paste(lev[-length(lev)], lev[-1], sep="|") colnames(tJac) <- alpha.names tJac } extractFromFrames <- function(frames, fullmf) { lst <- list(y.levels=frames$y.levels, na.action=attr(fullmf, "na.action"), tJac=format_tJac(frames)) lstX <- list(contrasts=attr(frames$X, "contrasts"), terms=frames$terms, xlevels=.getXlevels(frames$terms, fullmf)) lst <- c(lst, lstX) if(!is.null(frames[["S"]])) lst <- c(lst, list(S.contrasts=attr(frames$S, "contrasts"), S.terms=frames$S.terms, S.xlevels=.getXlevels(frames$S.terms, fullmf))) if(!is.null(frames[["NOM"]])) lst <- c(lst, list(nom.contrasts=attr(frames$NOM, "contrasts"), nom.terms=frames$nom.terms, nom.xlevels=.getXlevels(frames$nom.terms, fullmf))) lst } formatTheta <- function(alpha, tJac, x, sign.nominal) { ## x: alpha, tJac, nom.terms, NOM, nom.contrasts, nom.xlevels, Theta.ok <- TRUE if(is.null(x[["NOM"]])) { ## no nominal effects Theta <- alpha %*% t(tJac) colnames(Theta) <- rownames(tJac) return(namedList(Theta, Theta.ok)) } x$nom.assign <- attr(x$NOM, "assign") args <- c("nom.terms", "nom.assign") args <- c("nom.terms") if(any(sapply(args, function(txt) is.null(x[[txt]])))) { ## Nominal effects, but we cannot compute Theta warning("Cannot assess if all thresholds are increasing", call.=FALSE) return(namedList(Theta.ok)) } ## Get matrix of thresholds; Theta: Theta.list <- getThetamat(terms=x$nom.terms, alpha=alpha, assign=attr(x$NOM, "assign"), contrasts=x$nom.contrasts, tJac=tJac, xlevels=x$nom.xlevels, sign.nominal=sign.nominal) ## Test that (finite) thresholds are increasing: if(all(is.finite(unlist(Theta.list$Theta)))) { th.increasing <- apply(Theta.list$Theta, 1, function(th) all(diff(th) >= 0)) if(!all(th.increasing)) Theta.ok <- FALSE } Theta <- if(length(Theta.list) == 2) with(Theta.list, cbind(mf.basic, Theta)) else Theta.list$Theta alpha.mat <- matrix(alpha, ncol=ncol(tJac), byrow=TRUE) colnames(alpha.mat) <- colnames(tJac) rownames(alpha.mat) <- attr(x$NOM, "orig.colnames") ## Return namedList(Theta, alpha.mat, Theta.ok) } ## We don't need this function anymore since the terms objects now ## always contain dataClasses and predvars attributes. ## get_dataClasses <- function(mf) { ## if(!is.null(Terms <- attr(mf, "terms")) && ## !is.null(dataCl <- attr(Terms, "dataClasses"))) ## return(dataCl) ## sapply(mf, .MFclass) ## } ## Returns a named list, where the names are the deparsed actual ## arguments: namedList <- function(...) { setNames(list(...), nm=sapply(as.list(match.call()), deparse)[-1]) } ## a <- 1 ## b <- 2 ## c <- 3 ## d <- list(e=2, f=factor(letters[rep(1:2, 2)])) ## g <- matrix(runif(9), 3) ## ## namedList(a, b, c) ## namedList(a, b, c, d, g) ## ## res <- namedList(d, g) ## names(res) ordinal/R/derivatives.R0000644000176200001440000001310513332052252014550 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Functions for finite difference computations of derivatives ## (gradient and Hessian) of user-specified functions. deriv12 <- function(fun, x, delta=1e-4, fx=NULL, ...) { ### Compute gradient and Hessian at the same time (to save computing ### time) nx <- length(x) fx <- if(!is.null(fx)) fx else fun(x, ...) stopifnot(length(fx) == 1) H <- array(NA, dim=c(nx, nx)) g <- numeric(nx) for(j in 1:nx) { ## Diagonal elements: xadd <- xsub <- x xadd[j] <- x[j] + delta xsub[j] <- x[j] - delta fadd <- fun(xadd, ...) fsub <- fun(xsub, ...) H[j, j] <- (fadd - 2 * fx + fsub) / delta^2 g[j] <- (fadd - fsub) / (2 * delta) ## Off diagonal elements: for(i in 1:nx) { if(i >= j) break ## Compute upper triangular elements: xaa <- xas <- xsa <- xss <- x xaa[c(i, j)] <- x[c(i, j)] + c(delta, delta) xas[c(i, j)] <- x[c(i, j)] + c(delta, -delta) xsa[c(i, j)] <- x[c(i, j)] + c(-delta, delta) xss[c(i, j)] <- x[c(i, j)] - c(delta, delta) H[i, j] <- H[j, i] <- (fun(xaa, ...) - fun(xas, ...) - fun(xsa, ...) + fun(xss, ...)) / (4 * delta^2) } } list(gradient = g, Hessian = H) } myhess <- function(fun, x, fx=NULL, delta=1e-4, ...) { nx <- length(x) fx <- if(!is.null(fx)) fx else fun(x, ...) stopifnot(length(fx) == 1) H <- array(NA, dim=c(nx, nx)) for(j in 1:nx) { ## Diagonal elements: xadd <- xsub <- x xadd[j] <- x[j] + delta xsub[j] <- x[j] - delta H[j, j] <- (fun(xadd, ...) - 2 * fx + fun(xsub, ...)) / delta^2 ## Upper triangular (off diagonal) elements: for(i in 1:nx) { if(i >= j) break xaa <- xas <- xsa <- xss <- x xaa[c(i, j)] <- x[c(i, j)] + c(delta, delta) xas[c(i, j)] <- x[c(i, j)] + c(delta, -delta) xsa[c(i, j)] <- x[c(i, j)] + c(-delta, delta) xss[c(i, j)] <- x[c(i, j)] - c(delta, delta) H[j, i] <- H[i, j] <- (fun(xaa, ...) - fun(xas, ...) - fun(xsa, ...) + fun(xss, ...)) / (4 * delta^2) } } H } mygrad <- function(fun, x, delta = 1e-4, method = c("central", "forward", "backward"), ...) { method <- match.arg(method) nx <- length(x) if(method %in% c("central", "forward")) { Xadd <- matrix(rep(x, nx), nrow=nx, byrow=TRUE) + diag(delta, nx) fadd <- apply(Xadd, 1, fun, ...) } if(method %in% c("central", "backward")) { Xsub <- matrix(rep(x, nx), nrow=nx, byrow=TRUE) - diag(delta, nx) fsub <- apply(Xsub, 1, fun, ...) ## eval.parent perhaps? } res <- switch(method, "forward" = (fadd - fun(x, ...)) / delta, "backward" = (fun(x, ...) - fsub) / delta, "central" = (fadd - fsub) / (2 * delta) ) res } grad.ctr3 <- function(fun, x, delta=1e-4, ...) { nx <- length(x) Xadd <- matrix(rep(x, nx), nrow=nx, byrow=TRUE) + diag(delta, nx) Xsub <- matrix(rep(x, nx), nrow=nx, byrow=TRUE) - diag(delta, nx) fadd <- apply(Xadd, 1, fun, ...) fsub <- apply(Xsub, 1, fun, ...) ## eval.parent perhaps? (fadd - fsub) / (2 * delta) } grad.ctr2 <- function(fun, x, delta=1e-4, ...) { ans <- x for(i in seq_along(x)) { xadd <- xsub <- x xadd[i] <- x[i] + delta xsub[i] <- x[i] - delta ans[i] <- (fun(xadd, ...) - fun(xsub, ...)) / (2 * delta) } ans } grad.ctr <- function(fun, x, delta=1e-4, ...) { sapply(seq_along(x), function(i) { xadd <- xsub <- x xadd[i] <- x[i] + delta xsub[i] <- x[i] - delta (fun(xadd, ...) - fun(xsub, ...)) / (2 * delta) }) } grad <- grad.ctr grad.ctr4 <- function(fun, x, delta=1e-4, ...) { ### - checking finiteness of x and fun-values ### - taking care to avoid floating point errors ### - not using h=x*delta rather than h=delta (important for small or ### large x?) if(!all(is.finite(x))) stop("Cannot compute gradient: non-finite argument") ans <- x ## return values for(i in seq_along(x)) { xadd <- xsub <- x ## reset fun arguments xadd[i] <- x[i] + delta xsub[i] <- x[i] - delta ans[i] <- (fun(xadd, ...) - fun(xsub, ...)) / (xadd[i] - xsub[i]) ### NOTE: xadd[i] - xsub[i] != 2*delta with floating point arithmetic. } if(!all(is.finite(ans))) { warning("cannot compute gradient: non-finite function values occured") ans[!is.finite(ans)] <- Inf } ans } ordinal/R/warning_functions.R0000644000176200001440000000250313332052253015761 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# givesWarnings <- function(expr) countWarnings(expr) > 0L countWarnings <- function(expr) { .number_of_warnings <- 0L frame_number <- sys.nframe() ans <- withCallingHandlers(expr, warning = function(w) { assign(".number_of_warnings", .number_of_warnings + 1L, envir = sys.frame(frame_number)) invokeRestart("muffleWarning") }) .number_of_warnings } ordinal/R/gumbel.R0000644000176200001440000000744213332052252013505 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## [pdqrg]gumbel functions for the gumbel distribution. ## Here ggumbel is the gradient of the density function, dgumbel. pgumbel <- function(q, location = 0, scale = 1, lower.tail = TRUE, max = TRUE) ### CDF for Gumbel max and min distributions ### Currently only unit length location and scale are supported. { if(max) ## right skew, loglog link .C("pgumbel_C", q = as.double(q), length(q), as.double(location)[1], as.double(scale)[1], as.integer(lower.tail), NAOK = TRUE)$q else ## left skew, cloglog link .C("pgumbel2_C", q = as.double(q), length(q), as.double(location)[1], as.double(scale)[1], as.integer(lower.tail), NAOK = TRUE)$q } pgumbelR <- function(q, location = 0, scale = 1, lower.tail = TRUE) ### R equivalent of pgumbel() { q <- (q - location)/scale p <- exp(-exp(-q)) if (!lower.tail) 1 - p else p } pgumbel2R <- function(q, location = 0, scale = 1, lower.tail = TRUE) { q <- (-q - location)/scale p <- exp(-exp(-q)) if (!lower.tail) p else 1 - p } dgumbel <- function(x, location = 0, scale = 1, log = FALSE, max = TRUE) ### PDF for the Gumbel max and mon distributions { if(max) ## right skew, loglog link .C("dgumbel_C", x = as.double(x), length(x), as.double(location)[1], as.double(scale)[1], as.integer(log), NAOK = TRUE)$x else ## left skew, cloglog link .C("dgumbel2_C", x = as.double(x), length(x), as.double(location)[1], as.double(scale)[1], as.integer(log), NAOK = TRUE)$x } dgumbelR <- function(x, location = 0, scale = 1, log = FALSE) ### dgumbel in R { q <- (x - location)/scale log.d <- -exp(-q) - q - log(scale) if (!log) exp(log.d) else log.d } dgumbel2R <- function(x, location = 0, scale = 1, log = FALSE) { q <- (-x - location)/scale log.d <- -exp(-q) - q - log(scale) if (!log) exp(log.d) else log.d } ggumbel <- function(x, max = TRUE) { ### gradient of dgumbel(x) wrt. x if(max) ## right skew, loglog link .C("ggumbel_C", x = as.double(x), length(x), NAOK = TRUE)$x else ## left skew, cloglog link .C("ggumbel2_C", x = as.double(x), length(x), NAOK = TRUE)$x } ggumbelR <- function(x){ ### ggumbel in R q <- exp(-x) ifelse(q == Inf, 0, { eq <- exp(-q) -eq*q + eq*q*q }) } ggumbel2R <- function(x) -ggumbelR(-x) rgumbel <- function(n, location = 0, scale = 1, max = TRUE) { if(max) location - scale * log(-log(runif(n))) else location + scale * log(-log(runif(n))) } qgumbel <- function(p, location = 0, scale = 1, lower.tail = TRUE, max = TRUE) { if(!lower.tail) p <- 1 - p if(max) ## right skew, loglog link location - scale * log(-log(p)) else ## left skew, cloglog link location + scale * log(-log(1 - p)) } ordinal/R/clm.fitter.R0000644000176200001440000003743713332052252014310 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Functions to fit/estimate CLMs (clm.fit.NR, clm.fit.optim) and ## functions implementing the negative log-likelihood, its gradient ## and hessian (.nll, .grad, .hess). These functions are rarely to be ## called directly from outside the package. clm.fit.NR <- function(rho, control = list()) ### The main work horse: Where the actual fitting of the clm goes on. ### Fitting the clm via modified Newton-Raphson with step halving. ### -------- Assumes the existence of the following functions: ### clm.nll - negative log-likelihood ### clm.grad - gradient of nll wrt. par ### clm.hess - hessian of nll wrt. par ### Trace - for trace information { control <- do.call(clm.control, control) stepFactor <- 1 innerIter <- modif.iter <- abs.iter <- 0L conv <- 2L ## Convergence flag (iteration limit reached) nll <- rho$clm.nll(rho) if(!is.finite(nll)) stop("Non-finite log-likelihood at starting value") ## do.newton <- ## rel.conv <- FALSE ## stephalf <- TRUE ## Newton-Raphson algorithm: for(i in 1:(control$maxIter + 1L)) { gradient <- rho$clm.grad(rho) maxGrad <- max(abs(gradient)) if(control$trace > 0) { Trace(iter=i+innerIter-1, stepFactor, nll, maxGrad, rho$par, first=(i==1)) if(control$trace > 1 && i > 1) { cat("\tgrad: ") cat(paste(formatC(gradient, digits=3, format="e"))) cat("\n\tstep: ") cat(paste(formatC(-step, digits=3, format="e"))) cat("\n\teigen: ") cat(paste(formatC(eigen(hessian, symmetric=TRUE, only.values=TRUE)$values, digits=3, format="e"))) cat("\n") } } abs.conv <- (maxGrad < control$gradTol) if(abs.conv) abs.iter <- abs.iter + 1L hessian <- rho$clm.hess(rho) ## Compute cholesky factor of Hessian: ch = Ut U ch <- try(chol(hessian), silent=TRUE) ### NOTE: solve(hessian, gradient) is not good enough because it will ### compute step for negative-definite Hessians and we don't want ### that. ### OPTION: What if Hessian is closely singular but slightly positive? ### Could we do something better in that case? if(inherits(ch, "try-error")) { if(abs.conv) { ## step.ok not true. conv <- 1L break ## cannot meet relative criterion. } ## If Hessian is non-positive definite: min.ev <- min(eigen(hessian, symmetric=TRUE, only.values=TRUE)$values) inflation.factor <- 1 ## Inflate diagonal of Hessian to make it positive definite: inflate <- abs(min.ev) + inflation.factor hessian <- hessian + diag(inflate, nrow(hessian)) if(control$trace > 0) cat(paste("Hessian is singular at iteration", i-1, "inflating diagonal with", formatC(inflate, digits=5, format="f"), "\n")) ch <- try(chol(hessian), silent=TRUE) if(inherits(ch, "try-error")) stop(gettextf("Cannot compute Newton step at iteration %d", i-1), call.=FALSE) modif.iter <- modif.iter + 1L ## do.newton <- FALSE } else modif.iter <- 0L if(modif.iter >= control$maxModIter) { conv <- 4L break } ## solve U'y = g for y, then ## solve U step = y for step: step <- c(backsolve(ch, backsolve(ch, gradient, transpose=TRUE))) rel.conv <- (max(abs(step)) < control$relTol) ## Test if step is in a descent direction - ## otherwise use step <- grad / max|grad|: ## if(crossprod(gradient, step) < 0) { ## if(control$trace > 0) ## cat("Newton step is not in descent direction; using gradient instead\n") ## step <- c(gradient / max(abs(gradient))) ## } else if(abs.conv && rel.conv) { conv <- 0L ## no need to step back as stephalf was false so the new ## par are just better. break } ## update parameters: rho$par <- rho$par - stepFactor * step nllTry <- rho$clm.nll(rho) lineIter <- 0 stephalf <- (nllTry > nll) ### NOTE: sometimes nllTry > nll just due to noise, so we also check ### reduction in gradient for small diffs: if(stephalf && abs(nll - nllTry) < 1e-10) stephalf <- maxGrad < max(abs(rho$clm.grad(rho))) ## Assess convergence: ## (only attempt to sattisfy rel.conv if abs.conv is true and ## it is possible to take the full newton step) ### OPTION: And if 'step' is not close to 1 or 1/2, but ### small. Otherwise this just indicates that the parameter is ### infinite. ## if(abs.conv && !step.ok) { if(abs.conv && stephalf) { conv <- 1L ## we need to step back to the par for which abs.conv ## was true: rho$par <- rho$par + stepFactor * step rho$clm.nll(rho) break } ## if(abs.conv && rel.conv) { ## conv <- 0L ## rho$par <- rho$par + stepFactor * step ## rho$clm.nll(rho) ## ## no need to step back as stephalf was false so the new ## ## par are just better. ## break ## } if(abs.conv && abs.iter >= 5L) { ## Cannot satisy rel.conv in 5 iterations after satisfying ## abs.conv. Probably some parameters are unbounded. conv <- 1L break } ## Step halving if nll increases: while(stephalf) { stepFactor <- stepFactor/2 rho$par <- rho$par + stepFactor * step nllTry <- rho$clm.nll(rho) lineIter <- lineIter + 1 if(control$trace > 0) { cat("step halving:\n") cat("nll reduction: ", formatC(nll - nllTry, digits=5, format="e"), "\n") Trace(i+innerIter-1, stepFactor, nll, maxGrad, rho$par, first = FALSE) } if(lineIter > control$maxLineIter){ conv <- 3L break } innerIter <- innerIter + 1 stephalf <- (nllTry > nll) if(stephalf && abs(nll - nllTry) < 1e-10) stephalf <- (maxGrad < max(abs(rho$clm.grad(rho)))) } ## end step halving if(conv == 3L) break if(control$trace > 0) cat("nll reduction: ", formatC(nll - nllTry, digits=5, format="e"), "\n") nll <- nllTry ## Double stepFactor if needed: stepFactor <- min(1, 2 * stepFactor) } ## end Newton iterations message <- switch(as.character(conv), "0" = "Absolute and relative convergence criteria were met", "1" = "Absolute convergence criterion was met, but relative criterion was not met", "2" = "iteration limit reached", "3" = "step factor reduced below minimum", "4" = "maximum number of consecutive Newton modifications reached") if(conv <= 1L && control$trace > 0) { cat("\nOptimizer converged! ", message, fill = TRUE) } if(conv > 1 && control$trace > 0) { cat("\nOptimization failed ", message, fill = TRUE) } ## return results: gradient <- c(rho$clm.grad(rho)) res <- list(par = rho$par, gradient = gradient, ##as.vector(gradient), ## Hessian = hessian, Hessian = rho$clm.hess(rho), ## ensure hessian is evaluated ## at optimum logLik = -nll, convergence = conv, ## 0: abs and rel criteria meet ## 1: abs criteria meet, rel criteria not meet ## 2: iteration limit reached ## 3: step factor reduced below minium message = message, maxGradient = max(abs(gradient)), niter = c(outer = i-1, inner = innerIter), fitted = rho$fitted) return(res) } clm.fit.optim <- function(rho, method = c("ucminf", "nlminb", "optim"), control=list()) { method <- match.arg(method) ## optimize the likelihood: optRes <- switch(method, "nlminb" = nlminb(rho$par, function(par) clm.nll(rho, par), function(par) clm.grad_direct(rho, par), control=control), "ucminf" = ucminf(rho$par, function(par) clm.nll(rho, par), function(par) clm.grad_direct(rho, par), control=control), "optim" = optim(rho$par, function(par) clm.nll(rho, par), function(par) clm.grad_direct(rho, par), method="BFGS", control=control) ) ## save results: rho$par <- optRes[[1]] res <- list(par = rho$par, logLik = -clm.nll(rho), gradient = clm.grad(rho), Hessian = clm.hess(rho), fitted = rho$fitted) res$maxGradient = max(abs(res$gradient)) res$optRes <- optRes res$niter <- switch(method, "nlminb" = optRes$evaluations, "ucminf" = c(optRes$info["neval"], 0), "optim" = optRes$counts) res$convergence <- switch(method, "nlminb" = optRes$convergence, "ucminf" = optRes$convergence, "optim" = optRes$convergence) return(res) } clm.fit.flex <- function(rho, control=list()) { lwr <- if(rho$link == "Aranda-Ordaz") c(rep(-Inf, length(rho$par) - 1), 1e-5) else rep(-Inf, length(rho$par)) ## optimize the likelihood: optRes <- nlminb(rho$par, function(par, rho) clm.nll.flex(rho, par), lower=lwr, rho=rho) ## save results: rho$par <- optRes$par res <- list(par = rho$par, lambda = setNames(rho$par[length(rho$par)], "lambda"), logLik = -clm.nll.flex(rho), gradient = numDeriv::grad(func=function(par, rho) clm.nll.flex(rho, par), x = rho$par, rho=rho), Hessian = numDeriv::hessian(func=function(par, rho) clm.nll.flex(rho, par), x = rho$par, rho=rho), fitted = rho$fitted) res$maxGradient = max(abs(res$gradient)) res$optRes <- optRes res$niter <- optRes$evaluations res$convergence <- optRes$convergence return(res) } clm.nll.flex <- function(rho, par) { if(!missing(par)) rho$par <- par with(rho, { if(k > 0) sigma <- Soff * exp(drop(S %*% par[n.psi + 1:k])) ### NOTE: we have to divide by sigma even if k=0 since there may be an ### offset but no predictors in the scale model: eta1 <- (drop(B1 %*% par[1:n.psi]) + o1)/sigma eta2 <- (drop(B2 %*% par[1:n.psi]) + o2)/sigma fitted <- pfun(eta1, par[length(par)]) - pfun(eta2, par[length(par)]) }) if(all(is.finite(rho$fitted)) && all(rho$fitted > 0)) ### NOTE: Need test here because some fitted <= 0 if thresholds are ### not ordered increasingly. -sum(rho$wts * log(rho$fitted)) else Inf } clm.nll <- function(rho, par) { if(!missing(par)) rho$par <- par with(rho, { if(k > 0) sigma <- Soff * exp(drop(S %*% par[n.psi + 1:k])) ### NOTE: we have to divide by sigma even if k=0 since there may be an ### offset but no predictors in the scale model: eta1 <- (drop(B1 %*% par[1:n.psi]) + o1)/sigma eta2 <- (drop(B2 %*% par[1:n.psi]) + o2)/sigma }) ### NOTE: getFitted is not found from within rho, so we have to ### evalueate it outside of rho rho$fitted <- getFittedC(rho$eta1, rho$eta2, rho$link, rho$par[length(rho$par)]) if(all(is.finite(rho$fitted)) && all(rho$fitted > 0)) ### NOTE: Need test here because some fitted <= 0 if thresholds are ### not ordered increasingly. -sum(rho$wts * log(rho$fitted)) else Inf } ## clm.nll <- function(rho) { ## negative log-likelihood ## ### For linear models ## with(rho, { ## eta1 <- drop(B1 %*% par) + o1 ## eta2 <- drop(B2 %*% par) + o2 ## }) ## ### NOTE: getFitted is not found from within rho, so we have to ## ### evalueate it outside of rho ## rho$fitted <- getFittedC(rho$eta1, rho$eta2, rho$link) ## if(all(rho$fitted > 0)) ## ### NOTE: Need test here because some fitted <= 0 if thresholds are ## ### not ordered increasingly. ## ### It is assumed that 'all(is.finite(pr)) == TRUE' ## -sum(rho$wts * log(rho$fitted)) ## else Inf ## } ## clm.grad <- function(rho) { ## gradient of the negative log-likelihood ## ### return: vector of gradients ## ### For linear models ## with(rho, { ## p1 <- dfun(eta1) ## p2 <- dfun(eta2) ## wtpr <- wts/fitted ## dpi.psi <- B1 * p1 - B2 * p2 ## -crossprod(dpi.psi, wtpr) ## ### NOTE: It is assumed that all(fitted > 0) == TRUE and that ## ### all(is.finite(c(p1, p2))) == TRUE ## }) ## } clm.grad <- function(rho) { ### requires that clm.nll has been called prior to ### clm.grad. with(rho, { p1 <- if(!nlambda) dfun(eta1) else dfun(eta1, lambda) p2 <- if(!nlambda) dfun(eta2) else dfun(eta2, lambda) wtpr <- wts/fitted C2 <- B1*p1/sigma - B2*p2/sigma if(k <= 0) return(-crossprod(C2, wtpr)) C3 <- -(eta1 * p1 - eta2 * p2) * S return(-crossprod(cbind(C2, C3), wtpr)) ### NOTE: C2 and C3 are used by clm.hess }) } clm.grad_direct <- function(rho, par) { ### does not require that clm.nll has been called prior to ### clm.grad. clm.nll(rho, par) clm.grad(rho) } ## clm.hess <- function(rho) { ## hessian of the negative log-likelihood ## ### return Hessian matrix ## ### For linear models ## with(rho, { ## dg.psi <- crossprod(B1 * gfun(eta1) * wtpr, B1) - ## crossprod(B2 * gfun(eta2) * wtpr, B2) ## -dg.psi + crossprod(dpi.psi, (dpi.psi * wtpr / fitted)) ## ### NOTE: It is assumed that all(fitted > 0) == TRUE and that ## ### all(is.finite(c(g1, g2))) == TRUE ## }) ## } clm.hess <- function(rho) { ### requires that clm.grad has been called prior to this. with(rho, { g1 <- if(!nlambda) gfun(eta1) else gfun(eta1, lambda) g2 <- if(!nlambda) gfun(eta2) else gfun(eta2, lambda) wtprpr <- wtpr/fitted ## Phi3 dg.psi <- crossprod(B1 * g1 * wtpr / sigma^2, B1) - crossprod(B2 * g2 * wtpr / sigma^2, B2) ## upper left: D <- dg.psi - crossprod(C2, (C2 * wtprpr)) if(k <= 0) return(-D) ## no scale predictors ## upper right (lower left transpose): wtprsig <- wtpr/sigma epg1 <- p1 + g1*eta1 epg2 <- p2 + g2*eta2 Et <- crossprod(B1, -wtprsig * epg1 * S) - crossprod(B2, -wtprsig * epg2 * S) - crossprod(C2, wtprpr * C3) ## lower right: F <- -crossprod(S, wtpr * ((eta1*p1 - eta2*p2)^2 / fitted - (eta1*epg1 - eta2*epg2)) * S) ## combine and return hessian: H <- rbind(cbind(D , Et), cbind(t(Et), F)) return(-H) }) } ordinal/R/clmm.R0000644000176200001440000007650613460411313013170 0ustar liggesusers############################################################################# # Copyright (c) 2010-2019 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Implementation of Cumulative Link Mixed Models in clmm(). if(getRversion() >= '2.15.1') utils::globalVariables(c("ths", "link", "threshold", "optRes", "neval", "Niter", "tJac", "y.levels")) clmm <- function(formula, data, weights, start, subset, na.action, contrasts, Hess = TRUE, model = TRUE, link = c("logit", "probit", "cloglog", "loglog", "cauchit"), ##, "Aranda-Ordaz", "log-gamma"), ## lambda, doFit = TRUE, control = list(), nAGQ = 1L, threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) { ### Extract the matched call and initial testing: mc <- match.call(expand.dots = FALSE) ### OPTION: Possibly call clm() when there are no random effects? link <- match.arg(link) threshold <- match.arg(threshold) if(missing(formula)) stop("Model needs a formula") if(missing(contrasts)) contrasts <- NULL ## set control parameters: control <- getCtrlArgs(control, list(...)) nAGQ <- as.integer(round(nAGQ)) formulae <- clmm.formulae(formula=formula) ## mf, y, X, wts, off, terms: frames <- clmm.frames(modelcall=mc, formulae=formulae, contrasts) ### QUEST: What should 'method="model.frame"' return? Do we want Zt ### included here as well? if(control$method == "model.frame") return(frames) ## Test rank deficiency and possibly drop some parameters: ## X is guarantied to have an intercept at this point. frames$X <- drop.coef(frames$X, silent=FALSE) ## Compute the transpose of the Jacobian for the threshold function, ## tJac and the names of the threshold parameters, alpha.names: ths <- makeThresholds(levels(frames$y), threshold) ## Set rho environment: rho <- with(frames, { clm.newRho(parent.frame(), y=y, X=X, weights=wts, offset=off, tJac=ths$tJac) }) ## compute grouping factor list, and Zt and ST matrices: retrms <- getREterms(frames = frames, formulae$formula) ## For each r.e. term, test if Z has more columns than rows to detect ## unidentifiability: test_no_ranef(Zt_list=retrms$retrms, frames=frames, checkRanef=control$checkRanef) ### OPTION: save (the evaluated) formula in frames, so we only need the ### frames argument to getREterms() ? use.ssr <- (retrms$ssr && !control$useMatrix) ## Set inverse link function and its two first derivatives (pfun, ## dfun and gfun) in rho: setLinks(rho, link) ## Compute list of dimensions for the model fit: rho$dims <- getDims(frames=frames, ths=ths, retrms=retrms) ## Update model environment with r.e. information: if(use.ssr) { rho.clm2clmm.ssr(rho=rho, retrms = retrms, ctrl=control$ctrl) ## Set starting values for the parameters: if(missing(start)) start <- c(fe.start(frames, link, threshold), 0) rho$par <- start nbeta <- rho$nbeta <- ncol(frames$X) - 1 ## no. fixef parameters nalpha <- rho$nalpha <- ths$nalpha ## no. threshold parameters ntau <- rho$ntau <- length(retrms$gfList) ## no. variance parameters stopifnot(is.numeric(start) && length(start) == (nalpha + nbeta + ntau)) } else { rho.clm2clmm(rho=rho, retrms=retrms, ctrl=control$ctrl) if(missing(start)) { rho$fepar <- fe.start(frames, link, threshold) rho$ST <- STstart(rho$ST) start <- c(rho$fepar, ST2par(rho$ST)) } else { stopifnot(is.list(start) && length(start) == 2) stopifnot(length(start[[1]]) == rho$dims$nfepar) stopifnot(length(start[[2]]) == rho$dims$nSTpar) rho$fepar <- as.vector(start[[1]]) rho$ST <- par2ST(as.vector(start[[2]]), rho$ST) } } ### OPTION: set starting values in a more elegant way. ## Set AGQ parameters: set.AGQ(rho, nAGQ, control, use.ssr) ## Possibly return the environment, rho without fitting: if(!doFit) return(rho) ## Fit the clmm: fit <- if(use.ssr) clmm.fit.ssr(rho, control = control$optCtrl, method=control$method, Hess) else clmm.fit.env(rho, control = control$optCtrl, method=control$method, Hess) ## Modify and return results: fit$nAGQ <- nAGQ fit$link <- link fit$start <- start fit$threshold <- threshold fit$call <- match.call() fit$formula <- formulae$formula fit$gfList <- retrms$gfList fit$control <- control res <- clmm.finalize(fit=fit, frames=frames, ths=ths, use.ssr) ## add model.frame to results list? if(model) res$model <- frames$mf return(res) } clmm.formulae <- function(formula) { ## Evaluate the formula in the enviroment in which clmm was called ## (parent.frame(2)) to get it evaluated properly: form <- eval.parent(formula, 2) ## get the environment of the formula. If this does not have an ## environment (it could be a character), then use the calling environment. form.envir <- if(!is.null(env <- environment(form))) env else parent.frame(2) ## ensure 'formula' is a formula-object: form <- tryCatch(formula(if(is.character(form)) form else deparse(form), env = form.envir), error = identity) ## report error if the formula cannot be interpreted if(inherits(form, "error")) stop("unable to interpret 'formula'") environment(form) <- form.envir ## Construct a formula with all (fixed and random) variables ## (fullForm) and a formula with only fixed-effects variables ## (fixedForm): fixedForm <- nobars(form) ## ignore terms with '|' fullForm <- subbars(form) # substitute `+' for `|' ## Set the appropriate environments: environment(fullForm) <- environment(fixedForm) <- environment(form) <- form.envir list(formula = form, fullForm = fullForm, fixedForm = fixedForm) } clmm.frames <- function(modelcall, formulae, contrasts) { ## Extract full model.frame (fullmf): m <- match(c("data", "subset", "weights", "na.action", "offset"), names(modelcall), 0) mf <- modelcall[c(1, m)] mf$formula <- formulae$fullForm mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") fixedmf <- mf ## save call for later modification and evaluation fullmf <- eval(mf, envir = parent.frame(2)) ## '2' to get out of ## clmm.frames and clmm ### OPTION: Consider behavior if data is a matrix? fixedmf$formula <- formulae$fixedForm fixedmf <- eval(fixedmf, envir = parent.frame(2)) attr(fullmf, "terms") <- attr(fixedmf, "terms") ## return: list(mf = fullmf, y = getY(fullmf), X = getX(fullmf, fixedmf, contrasts), wts = getWeights(fullmf), off = getOffsetStd(fullmf), terms = attr(fixedmf, "terms") ) } getY <- function(mf) { ### Extract model response: y <- model.response(mf) if(!is.factor(y)) stop("response needs to be a factor") y } getX <- function(fullmf, fixedmf, contrasts) { fixedTerms <- attr(fixedmf, "terms") X <- model.matrix(fixedTerms, fullmf, contrasts) n <- nrow(X) ## remove intercept from X: Xint <- match("(Intercept)", colnames(X), nomatch = 0) if(Xint <= 0) { X <- cbind("(Intercept)" = rep(1, n), X) warning("an intercept is needed and assumed") } ## intercept in X is garanteed. X } getZt <- function(retrms) { ZtList <- lapply(retrms, '[[', "Zt") Zt <- do.call(rbind, ZtList) Zt@Dimnames <- vector("list", 2) Zt } getREterms <- function(frames, formula) { ### NOTE: Need to parse mf - not just fullmf because we need the model ### fits for an identifiability check below. fullmf <- droplevels(with(frames, mf[wts > 0, ])) barlist <- expandSlash(findbars(formula[[3]])) ### NOTE: make sure 'formula' is appropriately evaluated and returned ### by clmm.formulae if(!length(barlist)) stop("No random effects terms specified in formula") term.names <- unlist(lapply(barlist, deparse)) names(barlist) <- unlist(lapply(barlist, function(x) deparse(x[[3]]))) ### NOTE: Deliberately naming the barlist elements by grouping factors ### and not by r.e. terms. ## list of grouping factors for the random terms: rel <- lapply(barlist, function(x) { ff <- eval(substitute(as.factor(fac)[,drop = TRUE], list(fac = x[[3]])), fullmf) ## per random term transpose indicator matrix: Zti <- as(ff, "sparseMatrix") ## per random term model matrix: mm <- model.matrix(eval(substitute(~ expr, list(expr = x[[2]]))), fullmf) Zt = do.call(rbind, lapply(seq_len(ncol(mm)), function(j) { Zti@x <- mm[,j] Zti } )) ### QUEST: can we drop rows from Zt when g has missing values in terms ### of the form (1 + g | f)? ST <- matrix(0, ncol(mm), ncol(mm), dimnames = list(colnames(mm), colnames(mm))) list(f = ff, Zt = Zt, ST = ST) ### OPTION: return the i'th element of Lambda here. }) q <- sum(sapply(rel, function(x) nrow(x$Zt))) ### OPTION: If the model is nested (all gr.factors are nested), then ### order the columns of Zt, such that they come in blocks ### corresponding to the levels of the coarsest grouping factor. Each ### block of Zt-columns contain first the j'th level of the 1st gr.fac. ### followed by columns for the 2nd gr.fac. ### ## single simple random effect on the intercept? ssr <- (length(barlist) == 1 && as.character(barlist[[1]][[2]]) == "1") ## order terms by decreasing number of levels in the factor but don't ## change the order if this is already true: nlev <- sapply(rel, function(re) nlevels(re$f)) if (any(diff(nlev)) > 0) rel <- rel[rev(order(nlev))] nlev <- nlev[rev(order(nlev))] ## separate r.e. terms from the factor list: retrms <- lapply(rel, "[", -1) names(retrms) <- term.names ## list of grouping factors: gfl <- lapply(rel, "[[", "f") ## which r.e. terms are associated with which grouping factors: attr(gfl, "assign") <- seq_along(gfl) ## only save unique g.f. and update assign attribute: fnms <- names(gfl) ## check for repeated factors: if (length(fnms) > length(ufn <- unique(fnms))) { ## check that the lengths of the number of levels coincide gfl <- gfl[match(ufn, fnms)] attr(gfl, "assign") <- match(fnms, ufn) names(gfl) <- ufn } ## test that all variables for the random effects are factors and ## have at least 3 levels: stopifnot(all(sapply(gfl, is.factor))) stopifnot(all(sapply(gfl, nlevels) > 2)) ## no. r.e. per level for each of the r.e. terms qi <- unlist(lapply(rel, function(re) ncol(re$ST))) stopifnot(q == sum(nlev * qi)) dims <- list(n = nrow(fullmf), ## no. observations nlev.re = nlev, ## no. levels for each r.e. term nlev.gf = sapply(gfl, nlevels), ## no. levels for each grouping factor qi = qi, nretrms = length(rel), ## no. r.e. terms ngf = length(gfl), ## no. unique grouping factors ## total no. random effects: q = sum(nlev * qi), ## = sum(sapply(rel, function(re) nrow(re$Zt))) ## no. r.e. var-cov parameters: nSTpar = sum(sapply(qi, function(q) q * (q + 1) / 2)) ) ## c(retrms=retrms, list(gfList = gfl, dims = dims, ssr = ssr)) list(retrms=retrms, gfList = gfl, dims = dims, ssr = ssr) } test_no_ranef <- function(Zt_list, frames, checkRanef=c("warn", "error", "message")) { ## For each r.e. term, test if Z has more columns than rows to detect ## unidentifiability: checkfun <- switch(checkRanef, "warn" = function(...) warning(..., call.=FALSE), "error" = function(...) stop(..., call.=FALSE), "message" = message) nrow_fullmf <- with(frames, nrow(mf[wts > 0, ])) REterm.names <- names(Zt_list) for(i in seq_along(Zt_list)) { Zti <- Zt_list[[i]][["Zt"]] if(nrow(Zti) > ncol(Zti) || (all(frames$wts == 1) && nrow(Zti) == ncol(Zti))) checkfun(gettextf("no. random effects (=%d) >= no. observations (=%d) for term: (%s)", nrow(Zti), ncol(Zti), REterm.names[i])) } ## Test if total no. random effects >= total nobs: q <- sum(sapply(Zt_list, function(x) nrow(x$Zt))) if(all(frames$wts == 1) && q >= nrow_fullmf) checkfun(gettextf("no. random effects (=%d) >= no. observations (=%d)", q, nrow_fullmf)) invisible(NULL) ### NOTE: q > nrow(fullmf) is (sometimes) allowed if some frames$wts > 1 ### ### NOTE: if all(frames$wts == 1) we cannot have observation-level ### random effects so we error if nrow(Zti) >= ncol(Zti) ### ### NOTE: Could probably also throw an error if q >= sum(frames$wts), ### but I am not sure about that. ### ### NOTE: It would be better to test the rank of the Zt matrix, but ### also computationally more intensive. ### } fe.start <- function(frames, link, threshold) { ## get starting values from clm: fit <- with(frames, clm.fit(y=y, X=X, weights=wts, offset=off, link=link, threshold=threshold)) unname(coef(fit)) } getDims <- function(frames, ths, retrms) ### Collect and compute all relevant dimensions in a list { dims <- retrms$dims ## n is also on retrms$dims dims$n <- sum(frames$wts > 0) dims$nbeta <- ncol(frames$X) - 1 dims$nalpha <- ths$nalpha dims$nfepar <- dims$nalpha + dims$nbeta dims } rho.clm2clmm <- function(rho, retrms, ctrl) ### update environment, rho returned by clm.newRho(). { ### OPTION: write default list of control arguments? ## control arguments are used when calling update.u(rho) rho$ctrl = ctrl ## compute Zt design matrix: rho$Zt <- getZt(retrms$retrms) rho$ST <- lapply(retrms$retrms, `[[`, "ST") rho$allST1 <- all(sapply(rho$ST, ncol) == 1) ## Lambda <- getLambda(rho$ST, rho$dims$nlev.re) ## Vt <- crossprod(Lambda, rho$Zt) ## rho$L <- Cholesky(tcrossprod(Vt), ## LDL = TRUE, super = FALSE, Imult = 1) rho$L <- Cholesky(tcrossprod(crossprod(getLambda(rho$ST, rho$dims$nlev.re), rho$Zt)), LDL = TRUE, super = FALSE, Imult = 1) rho$Niter <- 0L ## no. conditional mode updates rho$neval <- 0L ## no. evaluations of the log-likelihood function rho$u <- rho$uStart <- rep(0, rho$dims$q) rho$.f <- if(package_version(packageDescription("Matrix")$Version) > "0.999375-30") 2 else 1 } getLambda <- function(ST, nlev) { ### ST: a list of ST matrices ### nlev: a vector of no. random effects levels .local <- function(ST, nlev) { if(ncol(ST) == 1) .symDiagonal(n=nlev, x = rep(as.vector(ST[1, 1]), nlev)) else kronecker(as(ST, "sparseMatrix"), .symDiagonal(n=nlev)) ## This would make sense if the columns in Z (rows in Zt) were ordered differently: ## kronecker(Diagonal(n=nlev), ST) ### NOTE: .symDiagonal() appears to be faster than Diagonal() here. } stopifnot(length(ST) == length(nlev)) res <- if(length(ST) == 1) .local(ST[[1]], nlev) else .bdiag(lapply(seq_along(ST), function(i) .local(ST[[i]], nlev[i]))) ## coerce to diagonal matrix if relevant: if(all(sapply(ST, ncol) == 1)) as(res, "diagonalMatrix") else as(res, "CsparseMatrix") ### QUESTION: Are there any speed gains by coerce'ing Lambda to ### 'diagonalMatrix' or 'CsparseMatrix'? ### QUESTION: What is the best way to form the kronecker product in .local()? } getNLA <- function(rho, par, which=rep(TRUE, length(par))) { ### negative log-likelihood by the Laplace approximation if(!missing(par)) { setPar.clmm(rho, par, which) if(any(!is.finite(par))) stop(gettextf(paste(c("Non-finite parameters not allowed:", formatC(par, format="g")), collapse=" "))) } rho$neval <- rho$neval + 1L if(!update.u(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) logDetD <- c(suppressWarnings(determinant(rho$L)$modulus)) - rho$dims$q * log(2*pi) / 2 rho$nll + logDetD } nll.u <- function(rho) { ## negative log-likelihood if(rho$allST1) { ## are all ST matrices scalars? rho$varVec <- rep.int(unlist(rho$ST), rho$dims$nlev.re) b.expanded <- as.vector(crossprod(rho$Zt, rho$varVec * rho$u)) ### NOTE: Working with Lambda when it is diagonal will slow things ### down significantly. } else { rho$ZLt <- crossprod(getLambda(rho$ST, rho$dims$nlev.re), rho$Zt) b.expanded <- as.vector(crossprod(rho$ZLt, rho$u)) } rho$eta1Fix <- drop(rho$B1 %*% rho$fepar) rho$eta2Fix <- drop(rho$B2 %*% rho$fepar) rho$eta1 <- as.vector(rho$eta1Fix - b.expanded + rho$o1) rho$eta2 <- as.vector(rho$eta2Fix - b.expanded + rho$o2) rho$fitted <- getFittedC(rho$eta1, rho$eta2, rho$link) if(any(!is.finite(rho$fitted)) || any(rho$fitted <= 0)) nll <- Inf else nll <- -sum(rho$wts * log(rho$fitted)) - sum(dnorm(x=rho$u, mean=0, sd=1, log=TRUE)) nll } nllFast.u <- function(rho) { ## negative log-likelihood ## Does not update X %*% beta - fixed effect part. if(rho$allST1) { rho$varVec <- rep.int(unlist(rho$ST), rho$dims$nlev.re) b.expanded <- as.vector(crossprod(rho$Zt, rho$varVec * rho$u)) } else { rho$ZLt <- crossprod(getLambda(rho$ST, rho$dims$nlev.re), rho$Zt) b.expanded <- as.vector(crossprod(rho$ZLt, rho$u)) } rho$eta1 <- as.vector(rho$eta1Fix - b.expanded + rho$o1) rho$eta2 <- as.vector(rho$eta2Fix - b.expanded + rho$o2) rho$fitted <- getFittedC(rho$eta1, rho$eta2, rho$link) if(any(!is.finite(rho$fitted)) || any(rho$fitted <= 0)) nll <- Inf else nll <- -sum(rho$wts * log(rho$fitted)) - sum(dnorm(x=rho$u, mean=0, sd=1, log=TRUE)) nll } grad.u <- function(rho){ ## gradient of nll wrt. u (random effects) ### should only be called with up to date values of eta1, eta2, par ## compute phi1: rho$p1 <- rho$dfun(rho$eta1) rho$p2 <- rho$dfun(rho$eta2) rho$wtpr <- rho$wts/rho$fitted phi1 <- as.vector(rho$wtpr * (rho$p1 - rho$p2)) if(rho$allST1) (rho$Zt %*% phi1) * rho$varVec + rho$u else rho$ZLt %*% phi1 + rho$u } hess.u <- function(rho) { ## Hessian of nll wrt. u (random effects) ### should only be called with up-to-date values of eta1, eta2, par, ### p1, p2 g1 <- rho$gfun(rho$eta1) ## does not need to be saved in rho g2 <- rho$gfun(rho$eta2) ## does not need to be saved in rho phi2 <- rho$wts * ( ((rho$p1 - rho$p2) / rho$fitted)^2 - ( (g1 - g2) / rho$fitted) ) ## This may happen if the link function [pfun, dfun and gfun] ## evaluates its arguments inaccurately: if(any(phi2 < 0)) return(FALSE) if(rho$allST1) Vt <- crossprod(Diagonal(x = rho$varVec), tcrossprod(rho$Zt, Diagonal(x = sqrt(phi2)))) else Vt <- rho$ZLt %*% Diagonal(x = sqrt(phi2)) rho$L <- update(rho$L, Vt, mult = 1) return(TRUE) } getPar.clmm <- function(rho) ### Extract vector of parameters from model-environment rho c(rho$fepar, ST2par(rho$ST)) setPar.clmm <- function(rho, par, which=rep(TRUE, length(par))) { ### Set parameters in model environment rho. which <- as.logical(as.vector(which)) oldpar <- getPar.clmm(rho) stopifnot(length(which) == length(oldpar)) stopifnot(sum(which) == length(par)) ## over-wright selected elements of oldpar: oldpar[which] <- as.vector(par) ## assign oldpar to rho$fepar and rho$ST: rho$fepar <- oldpar[1:rho$dims$nfepar] rho$ST <- par2ST(oldpar[-(1:rho$dims$nfepar)], rho$ST) } ST2par <- function(STlist) { ### Compute parameter vector from list of ST matrices. unlist(lapply(STlist, function(ST) { ## if(ncol(ST) == 1) as.vector(ST) else as.vector(c(diag(ST), ST[lower.tri(ST)])) })) } par2ST <- function(STpar, STlist) { ### Fill in parameters in list of ST matrices. Reverse of ST2par(). nc <- sapply(STlist, ncol) asgn <- rep(1:length(nc), sapply(nc, function(qi) qi * (qi + 1) / 2)) STparList <- split(STpar, asgn) stopifnot(length(asgn) == length(ST2par(STlist))) for(i in 1:length(STlist)) { par <- STparList[[i]] if(nc[i] > 1) { diag(STlist[[i]]) <- par[1:nc[i]] STlist[[i]][lower.tri(STlist[[i]])] <- par[-(1:nc[i])] } else { STlist[[i]][] <- par } } STlist } STatBoundary <- function(STpar, STlist, tol=1e-3) { ### Compute dummy vector of which ST parameters are at the ### boundary of the parameters space (variance-parameters that are ### zero). STcon <- STconstraints(STlist) stopifnot(length(STpar) == length(STcon)) as.integer(STcon == 1 & STpar <= tol) } paratBoundary <- function(rho, tol=1e-3) ### Compute dummy vector of which parameters are at the boundary of ### the parameter space. c(rep(0, rho$dims$nfepar), STatBoundary(ST2par(rho$ST), rho$ST, tol)) paratBoundary2 <- function(rho, tol=1e-3) { STcon <- STconstraints(rho$ST) c(rep(0L, rho$dims$nfepar), as.integer(STcon == 1 & ST2par(rho$ST) < tol)) } STconstraints <- function(STlist) { ### Compute indicator vector of which variance parameters are constrained above zero. The ### variance parameters are non-negative, while the covariance parameters are not ### constrained. ### ### This function can also be used to generate starting values for the covar. parameters. nc <- sapply(STlist, ncol) unlist(lapply(nc, function(qi) { c(rep(1L, qi), rep(0L, qi * (qi - 1) / 2)) } )) } parConstraints <- function(rho) ### Returns a dummy vector of the same length as getPar.clmm(rho) ### indicating which parameters are contrained to be non-negative. c(rep(0, rho$dims$nfepar), STconstraints(rho$ST)) STstart <- function(STlist) par2ST(STconstraints(STlist), STlist) isNested <- function(f1, f2) ### Borrowed from lme4/R/lmer.R ### Checks if f1 is nested within f2. { f1 <- as.factor(f1) f2 <- as.factor(f2) stopifnot(length(f1) == length(f2)) sm <- as(new("ngTMatrix", i = as.integer(f2) - 1L, j = as.integer(f1) - 1L, Dim = c(length(levels(f2)), length(levels(f1)))), "CsparseMatrix") all(diff(sm@p) < 2) } set.AGQ <- function(rho, nAGQ, control, ssr) { ## Stop if arguments are incompatible: if(nAGQ != 1 && !ssr) stop("Quadrature methods are not available with more than one random effects term", call.=FALSE) if(nAGQ != 1 && control$useMatrix) stop("Quadrature methods are not available with 'useMatrix = TRUE'", call.=FALSE) rho$nAGQ <- nAGQ if(nAGQ %in% 0:1) return(invisible()) ghq <- gauss.hermite(abs(nAGQ)) rho$ghqns <- ghq$nodes rho$ghqws <- if(nAGQ > 0) ghq$weights ## AGQ else log(ghq$weights) + (ghq$nodes^2)/2 ## GHQ } clmm.fit.env <- function(rho, control = list(), method=c("nlminb", "ucminf"), Hess = FALSE) ### Fit the clmm by optimizing the Laplace likelihood. ### Returns a list with elements: ### ### coefficients ### ST ### logLik ### Niter ### dims ### u ### optRes ### fitted.values ### L ### Zt ### ranef ### condVar ### gradient ### (Hessian) { method <- match.arg(method) if(method == "ucminf") warning("cannot use ucminf optimizer for this model, using nlminb instead") ## Compute lower bounds on the parameter vector lwr <- c(-Inf, 0)[parConstraints(rho) + 1] ## hack to remove ucminf control settings: keep <- !names(control) %in% c("grad", "grtol") control <- if(length(keep)) control[keep] else list() ## Fit the model with Laplace: fit <- try(nlminb(getPar.clmm(rho), function(par) getNLA(rho, par), lower=lwr, control=control), silent=TRUE) ### OPTION: Make it possible to use the ucminf optimizer with ### log-transformed std-par instead. ## Check if optimizer converged without error: if(inherits(fit, "try-error")) stop("optimizer ", method, " failed to converge", call.=FALSE) ### OPTION: Could have an argument c(warn, fail, ignore) to optionally ### return the fitted model despite the optimizer failing. ## Ensure parameters in rho are set at the optimum: setPar.clmm(rho, fit$par) ## Ensure random mode estimation at optimum: nllFast.u(rho) update.u(rho) names(rho$ST) <- names(rho$dims$nlev.re) ## Prepare list of results: res <- list(coefficients = fit$par[1:rho$dims$nfepar], ST = rho$ST, logLik = -fit$objective, dims = rho$dims, ### OPTION: Should we evaluate hess.u(rho) to make sure rho$L contains ### the right values corresponding to the optimum? u = rho$u, optRes = fit, fitted.values = rho$fitted, L = rho$L, Zt = rho$Zt ) ## save ranef and condVar in res: if(rho$allST1) { res$ranef <- rep.int(unlist(rho$ST), rho$dims$nlev.re) * rho$u res$condVar <- as.vector(diag(solve(rho$L)) * rep.int(unlist(rho$ST)^2, rho$dims$nlev.re)) } else { Lambda <- getLambda(rho$ST, rho$dims$nlev.re) res$ranef <- Lambda %*% rho$u res$condVar <- tcrossprod(Lambda %*% solve(rho$L), Lambda) } ## Add gradient vector and optionally Hessian matrix: bound <- as.logical(paratBoundary2(rho)) optpar <- fit$par[!bound] if(Hess) { ### NOTE: This is the Hessian evaluated for all parameters that are ### not at the boundary at the parameter space. The likelihood for ### models with boundary parameters is still defined as a function of ### all the parameters, so standard errors will differ whether or not ### boundary terms are included or not. gH <- deriv12(function(par) getNLA(rho, par, which=!bound), x=optpar) res$gradient <- gH$gradient res$Hessian <- gH$Hessian } else { res$gradient <- grad.ctr(function(par) getNLA(rho, par, which=!bound), x=optpar) } ### OPTION: We could check that the (forward) gradient for variances at the ### boundary are not < -1e-5 (wrt. -logLik/nll/getNLA) ## Setting Niter and neval after gradient and Hessian evaluations: res$Niter <- rho$Niter res$neval <- rho$neval ## return value: res } update.u <- function(rho) { stepFactor <- 1 innerIter <- 0 rho$u <- rho$uStart rho$nll <- nll.u(rho) if(!is.finite(rho$nll)) return(FALSE) rho$gradient <- grad.u(rho) maxGrad <- max(abs(rho$gradient)) conv <- -1 ## Convergence flag message <- "iteration limit reached when updating the random effects" if(rho$ctrl$trace > 0) Trace(iter=0, stepFactor, rho$nll, maxGrad, rho$u, first=TRUE) ## Newton-Raphson algorithm: for(i in 1:rho$ctrl$maxIter) { if(maxGrad < rho$ctrl$gradTol) { message <- "max|gradient| < tol, so current iterate is probably solution" if(rho$ctrl$trace > 0) cat("\nOptimizer converged! ", "max|grad|:", maxGrad, message, fill = TRUE) conv <- 0 break } if(!hess.u(rho)) return(FALSE) step <- as.vector(solve(rho$L, rho$gradient)) rho$u <- rho$u - stepFactor * step nllTry <- nllFast.u(rho) ## no 'X %*% beta' update lineIter <- 0 ## Step halfing: while(nllTry > rho$nll) { stepFactor <- stepFactor/2 rho$u <- rho$u + stepFactor * step nllTry <- nllFast.u(rho) ## no 'X %*% beta' update lineIter <- lineIter + 1 if(rho$ctrl$trace > 0) Trace(i+innerIter, stepFactor, rho$nll, maxGrad, rho$u, first=FALSE) if(lineIter > rho$ctrl$maxLineIter){ message <- "step factor reduced below minimum when updating the random effects" conv <- 1 break } innerIter <- innerIter + 1 } rho$nll <- nllTry rho$gradient <- grad.u(rho) maxGrad <- max(abs(rho$gradient)) if(rho$ctrl$trace > 0) Trace(i+innerIter, stepFactor, rho$nll, maxGrad, rho$u, first=FALSE) stepFactor <- min(1, 2 * stepFactor) } if(conv != 0 && rho$ctrl$innerCtrl == "warnOnly") { warning(message, "\n at iteration ", rho$Niter) utils::flush.console() } else if(conv != 0 && rho$ctrl$innerCtrl == "giveError") stop(message, "\n at iteration ", rho$Niter) rho$Niter <- rho$Niter + i - 1 if(!hess.u(rho)) return(FALSE) if(!is.finite(rho$nll)) return(FALSE) else return(TRUE) } clmm.finalize <- function(fit, frames, ths, use.ssr) { fit$tJac <- ths$tJac fit$contrasts <- attr(frames$X, "contrasts") fit$na.action <- attr(frames$mf, "na.action") fit$terms <- frames$terms ### QUEST: Should the terms object contain only the fixed effects ### terms? fit$xlevels <- .getXlevels(frames$terms, frames$mf) fit$y.levels <- levels(frames$y) fit <- within(fit, { ## extract coefficients from 'fit': names(coefficients) <- names(gradient) <- c(ths$alpha.names, colnames(frames$X)[-1]) alpha <- coefficients[1:dims$nalpha] beta <- if(dims$nbeta > 0) coefficients[dims$nalpha + 1:dims$nbeta] else numeric(0) ## set various fit elements: edf <- dims$edf <- dims$nfepar + dims$nSTpar dims$nobs <- sum(frames$wts) dims$df.residual <- dims$nobs - dims$edf Theta <- alpha %*% t(tJac) nm <- paste(y.levels[-length(y.levels)], y.levels[-1], sep="|") dimnames(Theta) <- list("", nm) rm(nm) info <- data.frame("link" = link, "threshold" = threshold, "nobs" = dims$nobs, "logLik" = formatC(logLik, digits=2, format="f"), "AIC" = formatC(-2*logLik + 2*dims$edf, digits=2, format="f"), ## "niter" = paste(optRes$info["neval"], "(", Niter, ")", ## sep=""), "niter" = paste(neval, "(", Niter, ")", sep=""), "max.grad" = formatC(max(abs(gradient)), digits=2, format="e") ## BIC is not part of output since it is not clear what ## the no. observations are. ) }) bound <- if(use.ssr) rep(FALSE, fit$dims$edf) else as.logical(paratBoundary2(fit)) dn <- c(names(fit$coefficients), paste("ST", seq_len(fit$dims$nSTpar), sep=""))[!bound] names(fit$gradient) <- dn if(!is.null(fit$Hessian)) dimnames(fit$Hessian) <- list(dn, dn) ## set class and return fit: class(fit) <- "clmm" return(fit) } ordinal/R/clmm.ssr.R0000644000176200001440000002417213332052252013767 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Functions for fitting CLMMs with a single simple random-effects ## term (ssr). rho.clm2clmm.ssr <- function(rho, retrms, ctrl) ### Version of rho.clm2clmm that is set up to use the C ### implementations of Laplace, AGQ and GHQ for a single random ### effect. { gfList <- retrms$gfList rho$grFac <- gfList[[1]] rho$ctrl <- ctrl rho$sigma <- rep(1, nrow(rho$B1)) rho$lambda <- 0 rho$nlev <- as.vector(sapply(gfList, nlevels)) rho$random.names <- sapply(gfList, levels) rho$tau.names <- names(gfList) rho$nrandom <- sum(rho$nlev) ## no. random effects rho$Niter <- 0L rho$neval <- 0L rho$u <- rho$uStart <- rep(0, rho$nrandom) rho$linkInt <- switch(rho$link, logit = 1L, probit = 2L, cloglog = 3L, loglog = 4L, cauchit = 5L) rho$ST <- lapply(retrms$retrms, `[[`, "ST") } ## set.AGQ <- function(rho, nAGQ) { ## rho$nAGQ <- nAGQ ## if(nAGQ %in% c(0L, 1L)) return(invisible()) ## ghq <- gauss.hermite(abs(nAGQ)) ## rho$ghqns <- ghq$nodes ## rho$ghqws <- ## if(nAGQ > 0) ghq$weights ## AGQ ## else log(ghq$weights) + (ghq$nodes^2)/2 ## GHQ ## } clmm.fit.ssr <- function(rho, control = list(), method=c("nlminb", "ucminf"), Hess = FALSE) ### Fit a clmm with a single simple random effects term using AGQ, GHQ ### or Laplace. { optim.error <- function(fit, method) if(inherits(fit, "try-error")) stop("optimizer ", method, " terminated with an error", call.=FALSE) ### OPTION: Could have an argument c(warn, fail, ignore) to optionally ### return the fitted model despite the optimizer failing. method <- match.arg(method) ## Set appropriate objective function: obj.fun <- if(rho$nAGQ < 0) getNGHQ.ssr else if(rho$nAGQ > 1) getNAGQ.ssr else getNLA.ssr ## nAGQ %in% c(0, 1) init.val <- obj.fun(rho, rho$par) if(!is.finite(init.val)) stop(gettextf("non-finite likelihood at starting value (%g)", init.val), call.=FALSE) ## Fit the model: if(method == "ucminf") { fit <- try(ucminf(rho$par, function(par) obj.fun(rho, par), control = control), silent=TRUE) ## Check if optimizer converged without error: optim.error(fit, method) ## Save return value: value <- fit$value } else if(method == "nlminb") { ## hack to remove ucminf control settings: keep <- !names(control) %in% c("grad", "grtol") control <- if(length(keep)) control[keep] else list() fit <- try(nlminb(rho$par, function(par) obj.fun(rho, par), control = control), silent=TRUE) ## Check if optimizer converged without error: optim.error(fit, method) ## Save return value: value <- fit$objective } else stop("unkown optimization method: ", method) ## Extract parameters from optimizer results: rho$par <- fit$par ## Ensure random mode estimation at optimum: nllBase.uC(rho) update.uC(rho) rho$ST <- par2ST(rho$tau, rho$ST) names(rho$ST) <- names(rho$dims$nlev.re) ## Format ranef modes and condVar: ranef <- rho$u * rho$tau condVar <- 1/rho$D * rho$tau^2 ## names(ranef) <- names(condVar) <- rho$random.names ## ranef <- list(ranef) ## condVar <- list(condVar) ## names(ranef) <- names(condVar) <- rho$tau.names ## Prepare list of results: res <- list(coefficients = fit$par[1:rho$dims$nfepar], ST = rho$ST, optRes = fit, logLik = -value, fitted.values = rho$fitted, ranef = ranef, condVar = condVar, dims = rho$dims, u = rho$u) ## Add gradient vector and optionally Hessian matrix: ## bound <- as.logical(paratBoundary2(rho)) ## optpar <- fit$par[!bound] if(Hess) { ## gH <- deriv12(function(par) obj.fun(rho, par, which=!bound), gH <- deriv12(function(par) obj.fun(rho, par), x=fit$par) res$gradient <- gH$gradient res$Hessian <- gH$Hessian } else { ## res$gradient <- grad.ctr(function(par) getNLA(rho, par, which=!bound), res$gradient <- grad.ctr(function(par) obj.fun(rho, par), x=fit$par) } ## Setting Niter and neval after gradient and Hessian evaluations: res$Niter <- rho$Niter res$neval <- rho$neval return(res) } getNGHQ.ssr <- function(rho, par) { ### negative log-likelihood by standard Gauss-Hermite quadrature ### implemented in C: if(!missing(par)) { rho$par <- par if(any(!is.finite(par))) stop(gettextf(paste(c("Non-finite parameters occured:", formatC(par, format="g")), collapse=" "))) } rho$neval <- rho$neval + 1L nllBase.uC(rho) ## Update tau, eta1Fix and eta2Fix with(rho, { .C("getNGHQ_C", nll = double(1), as.integer(grFac), as.double(tau), as.double(eta1Fix), as.double(eta2Fix), as.double(o1), as.double(o2), as.double(sigma), as.double(wts), length(sigma), length(uStart), as.double(ghqns), as.double(ghqws), as.integer(abs(nAGQ)), as.integer(linkInt), as.double(ghqns * tau), as.double(lambda))$nll }) } getNAGQ.ssr <- function(rho, par) { ### negative log-likelihood by adaptive Gauss-Hermite quadrature ### implemented in C: if(!missing(par)) { rho$par <- par if(any(!is.finite(par))) stop(gettextf(paste(c("Non-finite parameters occured:", formatC(par, format="g")), collapse=" "))) } rho$neval <- rho$neval + 1L if(!update.uC(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) with(rho, { .C("getNAGQ", nll = double(1), as.integer(grFac), as.double(tau), as.double(eta1Fix), as.double(eta2Fix), as.double(o1), as.double(o2), as.double(sigma), as.double(wts), length(sigma), length(uStart), as.double(ghqns), as.double(log(ghqws)), as.double(ghqns^2), as.double(u), as.double(D), as.integer(abs(nAGQ)), as.integer(linkInt), as.double(lambda))$nll }) } getNLA.ssr <- function(rho, par) { ### negative log-likelihood by the Laplace approximation ### (with update.u2 in C or R): if(!missing(par)) { rho$par <- par if(any(!is.finite(par))) stop(gettextf(paste(c("Non-finite parameters occured:", formatC(par, format="g")), collapse=" "))) } rho$neval <- rho$neval + 1L if(!update.uC(rho)) return(Inf) if(any(rho$D <= 0)) return(Inf) logDetD <- sum(log(rho$D)) rho$negLogLik - rho$nrandom*log(2*pi)/2 + logDetD/2 } nllBase.uC <- function(rho) { ### updates tau, eta1Fix and eta2Fix given new parameter values with(rho, { tau <- exp(par[nalpha + nbeta + 1:ntau]) eta1Fix <- drop(B1 %*% par[1:(nalpha + nbeta)]) eta2Fix <- drop(B2 %*% par[1:(nalpha + nbeta)]) }) return(invisible()) } update.uC <- function(rho) { ### C-implementation of NR-algorithm. nllBase.uC(rho) ## update: tau, eta1Fix, eta2Fix fit <- with(rho, { .C("NRalgv3", as.integer(ctrl$trace), as.integer(ctrl$maxIter), as.double(ctrl$gradTol), as.integer(ctrl$maxLineIter), as.integer(grFac), ## OBS as.double(tau), # stDev as.double(o1), as.double(o2), as.double(eta1Fix), as.double(eta2Fix), as.double(sigma), ## rep(1, n) as.integer(linkInt), ## as.double(wts), ## pre. weights u = as.double(uStart), fitted = as.double(fitted), ## pre. pr funValue = double(1), gradValues = as.double(uStart), hessValues = as.double(rep(1, length(uStart))), length(fitted), length(uStart), maxGrad = double(1), conv = 0L, as.double(lambda), ## Niter = as.integer(Niter) ## OBS )[c("u", "fitted", "funValue", "gradValues", "hessValues", "maxGrad", "conv", "Niter")] }) ## Get message: message <- switch(as.character(fit$conv), "1" = "max|gradient| < tol, so current iterate is probably solution", "0" = "Non finite negative log-likelihood", "-1" = "iteration limit reached when updating the random effects", "-2" = "step factor reduced below minimum when updating the random effects") ## Check for convergence and report warning/error: if(rho$ctrl$trace > 0 && fit$conv == 1) cat("\nOptimizer converged! ", "max|grad|:", fit$maxGrad, message, fill = TRUE) if(fit$conv != 1 && rho$ctrl$innerCtrl == "warnOnly") warning(message, "\n at iteration ", rho$Niter) else if(fit$conv != 1 && rho$ctrl$innerCtrl == "giveError") stop(message, "\n at iteration ", rho$Niter) ## Store values and return: rho$Niter <- fit$Niter rho$fitted <- fit$fitted rho$u <- fit$u rho$D <- fit$hessValue rho$gradient <- fit$gradValue if(!is.finite(rho$negLogLik <- fit$funValue)) return(FALSE) return(TRUE) } ordinal/R/clm.methods.R0000644000176200001440000003454713332052252014455 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Implementation of various methods for clm objects. print.clm <- function(x, digits = max(3, getOption("digits") - 3), ...) { cat("formula:", Deparse(formula(x$terms)), fill=TRUE) ### NOTE: deparse(x$call$formula) will not always work since this may ### not always be appropriately evaluated. if(!is.null(x$call$scale)) cat("scale: ", Deparse(formula(x$S.terms)), fill=TRUE) if(!is.null(x$call$nominal)) cat("nominal:", Deparse(formula(x$nom.terms)), fill=TRUE) if(!is.null(data.name <- x$call$data)) cat("data: ", Deparse(data.name), fill=TRUE) if(!is.null(x$call$subset)) cat("subset: ", Deparse(x$call$subset), fill=TRUE) cat("\n") print(x$info, row.names=FALSE, right=FALSE) if(length(x$beta)) { if(sum(x$aliased$beta) > 0) { cat("\nCoefficients: (", sum(x$aliased$beta), " not defined because of singularities)\n", sep = "") } else cat("\nCoefficients:\n") print.default(format(x$beta, digits = digits), quote = FALSE) } if(length(x$zeta)) { if(sum(x$aliased$zeta) > 0) cat("\nlog-scale coefficients: (", sum(x$aliased$zeta), " not defined because of singularities)\n", sep = "") else cat("\nlog-scale coefficients:\n") print.default(format(x$zeta, digits = digits), quote = FALSE) } if(length(x$lambda)) { cat("\nLink coefficient:\n") print.default(format(x$lambda, digits = digits), quote = FALSE) } if(length(x$alpha) > 0) { if(sum(x$aliased$alpha) > 0) cat("\nThreshold coefficients: (", sum(x$aliased$alpha), " not defined because of singularities)\n", sep = "") else cat("\nThreshold coefficients:\n") if(!is.null(x$call$nominal)) print.default(format(x$alpha.mat, digits = digits), quote = FALSE) else print.default(format(x$alpha, digits = digits), quote = FALSE) } if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") return(invisible(x)) } vcov.clm <- function(object, tol = sqrt(.Machine$double.eps), method = c("clm", "Cholesky", "svd", "eigen", "qr"), ...) { method <- match.arg(method) if(method == "clm") return(object$vcov) if(is.null(object$Hessian)) stop("Model needs to be fitted with Hess = TRUE") dn <- dimnames(object$Hessian) H <- object$Hessian if(!all(is.finite(H))) stop("cannot compute vcov: non-finite values in Hessian") if(method == "svd") { Hsvd <- svd(H) ## positive <- Hsvd$d > max(tol * Hsvd$d[1L], tol) positive <- Hsvd$d > tol if(!all(positive)) stop(gettextf("Cannot compute vcov: \nHessian is numerically singular with min singular value = %g", min(Hsvd$d))) cov <- Hsvd$v %*% (1/Hsvd$d * t(Hsvd$u)) } else if(method == "eigen") { evd <- eigen(H, symmetric=TRUE) ## tol <- max(tol * evd$values[1L], tol) ## if evd$values[1L] < 0 if(any(evd$values < tol)) stop(gettextf("Cannot compute vcov: \nHessian is not positive definite with min eigenvalue = %g", min(evd$values))) cov <- with(evd, vectors %*% diag(1/values) %*% t(vectors)) } else if(method == "Cholesky") { cholH <- try(chol(H), silent=TRUE) if(inherits(cholH, "try-error")) stop("Cannot compute vcov: \nHessian is not positive definite") cov <- chol2inv(cholH) } else if(method == "qr") { qrH <- qr(H, tol=sqrt(.Machine$double.eps)) if(qrH$rank < nrow(H)) stop("Cannot compute vcov: \nHessian is numerically singular") cov <- solve.qr(qrH) } else stop("method not recognized") ## Need to test for negative variances, since some methods (svd, ## qr) may produce a vcov-matrix if the Hessian is *negative* ## definite: if(any(diag(cov) < 0)) { stop("Cannot compute vcov: \nHessian is not positive definite") } structure(cov, dimnames=dn) } summary.clm <- function(object, correlation = FALSE, ...) { vcov <- object$vcov coefs <- matrix(NA, length(object$coefficients), 4, dimnames = list(names(object$coefficients), c("Estimate", "Std. Error", "z value", "Pr(>|z|)"))) coefs[, 1] <- object$coefficients if(!all(is.finite(vcov))) { ## warning("Variance-covariance matrix of the parameters is not defined") coefs[, 2:4] <- NA if(correlation) warning("Correlation matrix is unavailable") } else { alias <- unlist(object$aliased) coefs[!alias, 2] <- sd <- sqrt(diag(vcov)) ## Cond is Inf if Hessian contains NaNs: object$cond.H <- if(any(is.na(object$Hessian))) Inf else with(eigen(object$Hessian, symmetric=TRUE, only.values = TRUE), abs(max(values) / min(values))) coefs[!alias, 3] <- coefs[!alias, 1]/coefs[!alias, 2] coefs[!alias, 4] <- 2 * pnorm(abs(coefs[!alias, 3]), lower.tail=FALSE) if(correlation) object$correlation <- cov2cor(vcov) } object$coefficients <- coefs class(object) <- "summary.clm" return(object) } print.summary.clm <- function(x, digits = max(3, getOption("digits") - 3), signif.stars = getOption("show.signif.stars"), ...) { cat("formula:", Deparse(formula(x$terms)), fill=TRUE) ### NOTE: deparse(x$call$formula) will not always work since this may ### not always be appropriately evaluated. if(!is.null(x$call$scale)) cat("scale: ", Deparse(formula(x$S.terms)), fill=TRUE) if(!is.null(x$call$nominal)) cat("nominal:", Deparse(formula(x$nom.terms)), fill=TRUE) if(!is.null(data.name <- x$call$data)) cat("data: ", Deparse(data.name), fill=TRUE) if(!is.null(x$call$subset)) cat("subset: ", Deparse(x$call$subset), fill=TRUE) cat("\n") print(x$info, row.names=FALSE, right=FALSE) nalpha <- length(x$alpha) nbeta <- length(x$beta) nzeta <- length(x$zeta) nlambda <- length(x$lambda) if(nbeta > 0) { if(sum(x$aliased$beta) > 0) cat("\nCoefficients: (", sum(x$aliased$beta), " not defined because of singularities)\n", sep = "") else cat("\nCoefficients:\n") printCoefmat(x$coefficients[nalpha + 1:nbeta, , drop=FALSE], digits=digits, signif.stars=signif.stars, has.Pvalue=TRUE, ...) } ## else cat("\nNo Coefficients\n") if(nzeta > 0) { if(sum(x$aliased$zeta) > 0) cat("\nlog-scale coefficients: (", sum(x$aliased$zeta), " not defined because of singularities)\n", sep = "") else cat("\nlog-scale coefficients:\n") printCoefmat(x$coefficients[nalpha + nbeta + 1:nzeta, , drop=FALSE], digits=digits, signif.stars=signif.stars, has.Pvalue=TRUE, ...) } if(nlambda > 0) { cat("\nLink coefficients:\n") printCoefmat(x$coefficients[nalpha + nbeta + nzeta + nlambda, , drop=FALSE], digits=digits, signif.stars=signif.stars, has.Pvalue=TRUE, ...) } if(nalpha > 0) { ## always true if(sum(x$aliased$alpha) > 0) cat("\nThreshold coefficients: (", sum(x$aliased$alpha), " not defined because of singularities)\n", sep = "") else cat("\nThreshold coefficients:\n") printCoefmat(x$coefficients[seq_len(nalpha), -4, drop=FALSE], digits=digits, has.Pvalue=FALSE, signif.stars=FALSE, ...) } if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") if(!is.null(correl <- x$correlation)) { cat("\nCorrelation of Coefficients:\n") ll <- lower.tri(correl) correl[ll] <- format(round(correl[ll], digits)) correl[!ll] <- "" print(correl[-1, -ncol(correl)], quote = FALSE, ...) } return(invisible(x)) } logLik.clm <- function(object, ...) structure(object$logLik, df = object$edf, nobs=object$nobs, class = "logLik") extractAIC.clm <- function(fit, scale = 0, k = 2, ...) { edf <- fit$edf c(edf, -2*fit$logLik + k * edf) } ### NOTE: AIC.clm implicitly defined via logLik.clm anova.clm <- function(object, ..., type = c("I", "II", "III", "1", "2", "3")) ### requires that clm objects have components: ### edf: no. parameters used ### call$formula ### link (character) ### threshold (character) ### logLik ### { mc <- match.call() dots <- list(...) ## remove 'test' and 'type' arguments from dots-list: not.keep <- which(names(dots) %in% c("test")) if(length(not.keep)) { message("'test' argument ignored in anova.clm\n") dots <- dots[-not.keep] } if(length(dots) == 0) return(single_anova(object, type=type)) ## Multi-model anova method proceeds: mlist <- c(list(object), dots) if(!all(sapply(mlist, function(model) inherits(model, c("clm", "clmm"))))) stop("only 'clm' and 'clmm' objects are allowed") nfitted <- sapply(mlist, function(x) length(x$fitted.values)) if(any(nfitted != nfitted[1L])) stop("models were not all fitted to the same dataset") ### OPTION: consider comparing y returned by the models for a better check? no.par <- sapply(mlist, function(x) x$edf) ## order list with increasing no. par: ord <- order(no.par, decreasing=FALSE) mlist <- mlist[ord] no.par <- no.par[ord] no.tests <- length(mlist) ## extract formulas, links, thresholds, scale formulas, nominal ## formulas: forms <- sapply(mlist, function(x) Deparse(x$call$formula)) links <- sapply(mlist, function(x) x$link) thres <- sapply(mlist, function(x) x$threshold) nominal <- sapply(mlist, function(x) Deparse(x$call$nominal)) scale <- sapply(mlist, function(x) Deparse(x$call$scale)) models <- data.frame(forms) models.names <- 'formula:' if(any(!nominal %in% c("~1", "NULL"))) { nominal[nominal == "NULL"] <- "~1" models$nominal <- nominal models.names <- c(models.names, "nominal:") } if(any(!scale %in% c("~1", "NULL"))) { scale[scale == "NULL"] <- "~1" models$scale <- scale models.names <- c(models.names, "scale:") } models.names <- c(models.names, "link:", "threshold:") models <- cbind(models, data.frame(links, thres)) ## extract AIC, logLik, statistics, df, p-values: AIC <- sapply(mlist, function(x) -2*x$logLik + 2*x$edf) logLiks <- sapply(mlist, function(x) x$logLik) statistic <- c(NA, 2*diff(sapply(mlist, function(x) x$logLik))) df <- c(NA, diff(no.par)) pval <- c(NA, pchisq(statistic[-1], df[-1], lower.tail=FALSE)) pval[!is.na(df) & df==0] <- NA ## collect results in data.frames: tab <- data.frame(no.par, AIC, logLiks, statistic, df, pval) tab.names <- c("no.par", "AIC", "logLik", "LR.stat", "df", "Pr(>Chisq)") mnames <- sapply(as.list(mc), Deparse)[-1] colnames(tab) <- tab.names rownames(tab) <- rownames(models) <- mnames[ord] colnames(models) <- models.names attr(tab, "models") <- models attr(tab, "heading") <- "Likelihood ratio tests of cumulative link models:\n" class(tab) <- c("anova.clm", "data.frame") tab } print.anova.clm <- function(x, digits=max(getOption("digits") - 2, 3), signif.stars=getOption("show.signif.stars"), ...) { if (!is.null(heading <- attr(x, "heading"))) cat(heading, "\n") models <- attr(x, "models") print(models, right=FALSE) cat("\n") printCoefmat(x, digits=digits, signif.stars=signif.stars, tst.ind=4, cs.ind=NULL, # zap.ind=2, #c(1,5), P.values=TRUE, has.Pvalue=TRUE, na.print="", ...) return(invisible(x)) } model.matrix.clm <- function(object, type = c("design", "B"), ...) { type <- match.arg(type) mf <- try(model.frame(object), silent=TRUE) if(inherits(mf, "try-error")) stop("Cannot extract model.matrix: refit model with 'model=TRUE'?") ### NOTE: we want to stop even if type="B" since the fullmf is needed ### in get_clmRho also and this way the error message is better. if(type == "design") { contr <- c(object$contrasts, object$S.contrasts, object$nom.contrasts) design <- get_clmDesign(fullmf=object$model, terms.list=terms(object, "all"), contrasts=contr) keep <- c("X", "NOM", "S") select <- match(keep, names(design), nomatch=0) ans <- design[select] } else { ## if type == "B": env <- get_clmRho.clm(object) ans <- list(B1 = env$B1, B2 = env$B2) ans$S <- env$S ## may not exist } return(ans) } model.frame.clm <- function(formula, ...) { ### returns a model frame with *all* variables used for fitting. if(is.null(mod <- formula$model)) stop("Cannot extract model.frame: refit model with 'model=TRUE'") else mod } coef.clm <- function(object, na.rm = FALSE, ...) { if(na.rm) { coefs <- object$coefficients coefs[!is.na(coefs)] } else object$coefficients } coef.summary.clm <- function(object, na.rm = FALSE, ...) { if(na.rm) { coefs <- object$coefficients coefs[!is.na(coefs[,1]), , drop=FALSE] } else object$coefficients } nobs.clm <- function(object, ...) object$nobs terms.clm <- function(x, type=c("formula", "scale", "nominal", "all"), ...) { type <- match.arg(type) term.nm <- c("terms", "S.terms", "nom.terms") Terms <- x[names(x) %in% term.nm] ind <- match(term.nm, names(Terms), 0L) Terms <- Terms[ind] names(Terms) <- c("formula", "scale", "nominal")[ind != 0] if(type == "all") return(Terms) if(!type %in% names(Terms)) stop(gettextf("no terms object for '%s'", type)) Terms[[type]] } ordinal/R/clm.slice2D.R0000644000176200001440000002056113332052252014266 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# slice2D <- function(object, ...) { UseMethod("slice2D") } slice2D.clm <- function(object, parm=seq_along(par), lambda=3, grid=20, ...) { ## argument matching and testing: stopifnot(is.numeric(lambda) && lambda > 0) stopifnot(is.numeric(grid) && grid >= 1) grid <- as.integer(round(grid)) par <- coef(object, na.rm=TRUE) par.names <- names(par) stopifnot(length(parm) == length(unique(parm))) if(is.character(parm)) parm <- match(parm, par.names, nomatch = 0) if(!all(parm %in% seq_along(par))) stop("invalid 'parm' argument") stopifnot(length(parm) >= 2L) parm <- as.integer(parm) nparm <- length(parm) ## parm is an integer vector indexing non-aliased coef. ml <- object$logLik parm.names <- par.names[parm] mle <- par[parm] ## get environment corresponding to object: env <- get_clmRho(object) ## env <- update(object, doFit=FALSE) names(par) <- NULL env$par <- as.vector(par) ## set env$par to mle stopifnot(isTRUE(all.equal(env$clm.nll(env), -object$logLik))) ## generate sequence of parameters at which to compute the ## log-likelihood: curv <- sqrt(1/diag(object$Hessian)) ## curvature in nll wrt. par par.range <- par + curv %o% (c(-1, 1) * lambda) ## All pairwise combinations: pairs <- t(combn(seq_len(nparm), 2)) ncombn <- nrow(pairs) ### Allow for sequential paired comparisons? par.seq <- lapply(parm, function(ind) { seq(par.range[ind, 1], par.range[ind, 2], length = grid) }) names(par.seq) <- par.names zlist <- vector(mode="list", length=ncombn) names(zlist) <- paste(par.names[pairs[, 1]], par.names[pairs[, 2]], sep=".") for(k in 1:ncombn) { i <- pairs[k, 1] j <- pairs[k, 2] xx <- expand.grid(x=par.seq[[i]], y=par.seq[[j]]) ## Set parameter values to MLEs: env$par <- par ## Compute log-likelihood over entire grid: z <- apply(xx, 1, function(x) { env$par[c(i, j)] <- as.vector(x); env$clm.nll(env) }) ## Store log-likelihood values in a matrix: zlist[[k]] <- matrix(z, ncol=grid) } res <- list(zlist=zlist, par.seq=par.seq, par.range=par.range, pairs=pairs, original.fit=object, mle=mle) class(res) <- c("slice2D.clm") res } safe.as.int <- function(x) as.integer(round(x)) plot.slice2D.clm <- function(x, parm = seq_along(orig.par), ## How to specify default values ## of parm? plot.mle = TRUE, ask = prod(par("mfcol")) < nrow(pairs) && dev.interactive(), ...) ### parm: a character vector or integer vector of length >= 2 with ### those par-combinations to make contour plots. { ## stopifnot(all parm in names(par.seq)) orig.par <- coef(x$original.fit, na.rm=TRUE) ### More parm stuff here... stopifnot(is.numeric(parm) && length(parm) >= 2L) parm <- as.integer(round(parm)) par.names <- names(orig.par) ## of <- attr(x, "original.fit") ## par <- coef(of) ## ml <- of$logLik keep <- (x$pairs[, 1] %in% parm) & (x$pairs[, 2] %in% parm) pairs <- x$pairs[keep, , drop=FALSE] stopifnot(length(pairs) >= 2) if(ask) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) } ## Plotting the contours: for(k in seq_len(nrow(pairs))) { i <- pairs[k, 1] j <- pairs[k, 2] contour(x$par.seq[[i]], x$par.seq[[j]], x$zlist[[k]], xlab = par.names[i], ylab = par.names[j]) points(orig.par[i], orig.par[j], pch = 4, col = "red", lwd = 2) } return(invisible()) } sliceg.clm <- function(object, parm = seq_along(par), lambda = 3, grid = 1e2, quad.approx = TRUE, ...) { ## argument matching and testing: stopifnot(is.numeric(lambda) && lambda > 0) stopifnot(is.numeric(grid) && grid >= 1) grid <- as.integer(round(grid)) par <- coef(object, na.rm=TRUE) par.names <- names(par) npar <- length(par) stopifnot(length(parm) == length(unique(parm))) if(is.character(parm)) parm <- match(parm, par.names, nomatch = 0) ### disallow character argument due to ambiguity? if(!all(parm %in% seq_along(par))) stop("invalid 'parm' argument") stopifnot(length(parm) > 0) parm <- as.integer(round(parm)) ## parm is an integer vector indexing non-aliased coef. ml <- object$logLik parm.names <- par.names[parm] ## get environment corresponding to object: rho <- get_clmRho(object) ## rho <- update(object, doFit = FALSE) names(par) <- NULL rho$par <- par ## set rho$par to mle stopifnot(isTRUE(all.equal(rho$clm.nll(rho), -object$logLik))) ## generate sequence of parameters at which to compute the ## log-likelihood: curv <- sqrt(1/diag(object$Hessian)) ## curvature in nll wrt. par par.range <- par + curv %o% c(-lambda, lambda) ## par.seq - list of length npar with a sequence of values for each ## parameter : par.seq <- lapply(parm, function(ind) { seq(par.range[ind, 1], par.range[ind, 2], length = grid) }) ## compute relative logLik for all par.seq for each par: logLik <- lapply(seq_along(parm), function(i) { # for each par rho$par <- par ## reset par values to MLE sapply(par.seq[[ i ]], function(par.val) { # for each par.seq value rho$par[ parm[i] ] <- par.val rho$clm.nll(rho) rho$clm.grad(rho)[ parm[i] ] }) }) ## collect parameter sequences and relative logLik in a list of ## data.frames: res <- lapply(seq_along(parm), function(i) { structure(data.frame(par.seq[[ i ]], logLik[[ i ]]), ## names = c(parm.names[i], "logLik")) names = c(parm.names[i], "gradient")) }) ## set attributes: names(res) <- parm.names attr(res, "original.fit") <- object attr(res, "mle") <- par[parm] ## class(res) <- "slice.clm" class(res) <- "sliceg.clm" ## if(!quad.approx) return(res) ## ## compute quadratic approx to *positive* logLik: ## Quad <- function(par, mle, curv) ## -((mle - par)^2 / curv^2 / 2) ## for(i in seq_along(parm)) ## res[[ i ]]$quad <- ## Quad(par.seq[[ i ]], par[ parm[i] ], curv[ parm[i] ]) return(res) } plot.sliceg.clm <- function(x, parm = seq_along(x), type = c("quadratic", "linear"), plot.mle = TRUE, ask = prod(par("mfcol")) < length(parm) && dev.interactive(), ...) { ## Initiala argument matching and testing: type <- match.arg(type) stopifnot(is.numeric(parm)) parm <- as.integer(round(parm)) of <- attr(x, "original.fit") par <- coef(of) ml <- of$logLik ## take the signed sqrt of nll and quad: ## if(type == "linear") { ## sgn.sqrt <- function(par, mle, logLik) ## (2 * (par > mle) - 1) * sqrt(-logLik) ## mle <- coef(attr(x, "original.fit")) ## for(i in parm) { ## x[[i]]$logLik <- sgn.sqrt(x[[i]][1], mle[i], x[[i]]$logLik) ## if(!is.null(x[[i]]$quad)) ## x[[i]]$quad <- sgn.sqrt(x[[i]][1], mle[i], x[[i]]$quad) ## } ## ylab <- "Signed log-likelihood root" ## } ## else ## ylab <- "Relative log-likelihood" ylab <- "Gradient" if(ask) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) } ## actual plotting: for(i in parm) { z <- x[[i]] plot(z[1:2], type = "l", ylab=ylab, ...) if(!is.null(z$quad)) lines(z[[1]], z[[3]], lty = 2) if(plot.mle && type == "quadratic") ## abline(v = par[i]) abline(v = attr(x, "mle")[i]) ## abline(v = par[names(x)[i]]) } return(invisible()) } ordinal/R/clm.slice.R0000644000176200001440000001600713332052252014100 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Methods to compute and plot likelihood-slices for clm objects. slice <- function(object, ...) { UseMethod("slice") } slice.clm <- function(object, parm = seq_along(par), lambda = 3, grid = 1e2, quad.approx = TRUE, ...) { ## argument matching and testing: stopifnot(is.numeric(lambda) && lambda > 0) stopifnot(is.numeric(grid) && grid >= 1) grid <- as.integer(round(grid)) par <- coef(object, na.rm=TRUE) par.names <- names(par) npar <- length(par) stopifnot(length(parm) == length(unique(parm))) if(is.character(parm)) parm <- match(parm, par.names, nomatch = 0) ### disallow character argument due to ambiguity? if(!all(parm %in% seq_along(par))) stop("invalid 'parm' argument") stopifnot(length(parm) > 0) parm <- as.integer(round(parm)) ## parm is an integer vector indexing non-aliased coef. ml <- object$logLik parm.names <- par.names[parm] ## get environment corresponding to object: rho <- get_clmRho(object) ## rho <- update(object, doFit = FALSE) names(par) <- NULL rho$par <- par ## set rho$par to mle stopifnot(isTRUE(all.equal(rho$clm.nll(rho), -object$logLik))) ## generate sequence of parameters at which to compute the ## log-likelihood: curv <- sqrt(1/diag(object$Hessian)) ## curvature in nll wrt. par par.range <- par + curv %o% c(-lambda, lambda) ## par.seq - list of length npar with a sequence of values for each ## parameter : par.seq <- lapply(parm, function(ind) { seq(par.range[ind, 1], par.range[ind, 2], length = grid) }) ## compute relative logLik for all par.seq for each par: logLik <- lapply(seq_along(parm), function(i) { # for each par rho$par <- par ## reset par values to MLE sapply(par.seq[[ i ]], function(par.val) { # for each par.seq value rho$par[ parm[i] ] <- par.val -rho$clm.nll(rho) - ml ## relative logLik }) }) ## collect parameter sequences and relative logLik in a list of ## data.frames: res <- lapply(seq_along(parm), function(i) { structure(data.frame(par.seq[[ i ]], logLik[[ i ]]), names = c(parm.names[i], "logLik")) }) ## set attributes: names(res) <- parm.names attr(res, "original.fit") <- object attr(res, "mle") <- par[parm] class(res) <- "slice.clm" if(!quad.approx) return(res) ## compute quadratic approx to *positive* logLik: Quad <- function(par, mle, curv) -((mle - par)^2 / curv^2 / 2) for(i in seq_along(parm)) res[[ i ]]$quad <- Quad(par.seq[[ i ]], par[ parm[i] ], curv[ parm[i] ]) return(res) } plot.slice.clm <- function(x, parm = seq_along(x), type = c("quadratic", "linear"), plot.mle = TRUE, ask = prod(par("mfcol")) < length(parm) && dev.interactive(), ...) { ## Initiala argument matching and testing: type <- match.arg(type) stopifnot(is.numeric(parm)) parm <- as.integer(round(parm)) of <- attr(x, "original.fit") par <- coef(of) ml <- of$logLik ## take the signed sqrt of nll and quad: if(type == "linear") { sgn.sqrt <- function(par, mle, logLik) (2 * (par > mle) - 1) * sqrt(-logLik) mle <- coef(attr(x, "original.fit")) for(i in parm) { x[[i]]$logLik <- sgn.sqrt(x[[i]][1], mle[i], x[[i]]$logLik) if(!is.null(x[[i]]$quad)) x[[i]]$quad <- sgn.sqrt(x[[i]][1], mle[i], x[[i]]$quad) } ylab <- "Signed log-likelihood root" } else ylab <- "Relative log-likelihood" if(ask) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) } ## actual plotting: for(i in parm) { z <- x[[i]] plot(z[1:2], type = "l", ylab=ylab, ...) if(!is.null(z$quad)) lines(z[[1]], z[[3]], lty = 2) if(plot.mle && type == "quadratic") ## abline(v = par[i]) abline(v = attr(x, "mle")[i]) ## abline(v = par[names(x)[i]]) } return(invisible()) } ## slice.clm <- ## function(object, parm = seq_along(par), lambda = 3, grid = 1e2, ## quad.approx = TRUE, ...) ## { ## ## argument matching and testing: ## stopifnot(is.numeric(lambda) && lambda > 0) ## stopifnot(is.numeric(grid) && grid >= 1) ## grid <- as.integer(grid) ## par <- coef(object) ## par.names <- names(par) ## npar <- length(par) ## stopifnot(length(parm) == length(unique(parm))) ## if(is.character(parm)) ## parm <- match(parm, par.names, nomatch = 0) ## if(!all(parm %in% seq_along(par))) ## stop("invalid 'parm' argument") ## stopifnot(length(parm) > 0) ## parm <- as.integer(parm) ## ml <- object$logLik ## parm.names <- par.names[parm] ## ## ## get environment corresponding to object: ## rho <- update(object, doFit = FALSE) ## names(par) <- NULL ## rho$par <- par ## set rho$par to mle ## stopifnot(isTRUE(all.equal(rho$clm.nll(rho), -object$logLik))) ## ## ## generate sequence of parameters at which to compute the ## ## log-likelihood: ## curv <- sqrt(1/diag(object$Hess)) ## curvature in nll wrt. par ## par.range <- par + curv %o% c(-lambda, lambda) ## ## par.seq - list of length npar: ## par.seq <- sapply(parm, function(ind) { ## seq(par.range[ind, 1], par.range[ind, 2], length = grid) }, ## simplify = FALSE) ## ## compute relative logLik for all par.seq for each par: ## logLik <- lapply(seq_along(parm), function(i) { # for each par ## rho$par <- par ## reset par values to MLE ## sapply(par.seq[[ i ]], function(par.val) { # for each val ## rho$par[ parm[i] ] <- par.val ## -rho$clm.nll(rho) - ml ## relative logLik ## }) ## }) ## ## ## collect results in a list of data.frames: ## res <- lapply(seq_along(parm), function(i) { ## structure(data.frame(par.seq[[ i ]], logLik[[ i ]]), ## names = c(parm.names[i], "logLik")) ## }) ## ## ## set attributes: ## names(res) <- parm.names ## attr(res, "original.fit") <- object ## class(res) <- "slice.clm" ## ## if(!quad.approx) return(res) ## ## compute quadratic approx to *positive* logLik: ## Quad <- function(par, mle, curv) ## -((mle - par)^2 / curv^2 / 2) ## for(i in seq_along(parm)) ## res[[ i ]]$quad <- ## Quad(par.seq[[ i ]], par[ parm[i] ], curv[ parm[i] ]) ## ## return(res) ## } ordinal/R/clm.start.R0000644000176200001440000001127413573720764014160 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Functions to compute starting values for CLMs in clm(). set.start <- function(rho, start=NULL, get.start=TRUE, threshold, link, frames) { ## set starting values for the parameters: if(get.start) { start <- ## not 'starting' scale effects: clm.start(y.levels=frames$y.levels, threshold=threshold, X=frames$X, NOM=frames$NOM, has.intercept=TRUE) if(length(rho$lambda) > 0) start <- c(start, rho$lambda) if(length(rho$lambda) == 0 && (NCOL(frames$S) > 1 || link == "cauchit")) { ### NOTE: only special start if NCOL(frames$S) > 1 (no reason for ### special start if scale is only offset and no predictors). ### NOTE: start cauchit models at the probit estimates if start is not ### supplied: rho$par <- start if(link == "cauchit") setLinks(rho, link="probit") else setLinks(rho, link) tempk <- rho$k rho$k <- 0 ## increased gradTol and relTol: fit <- try(clm.fit.NR(rho, control=list(gradTol=1e-3, relTol=1e-3)), silent=TRUE) if(inherits(fit, "try-error")) stop("Failed to find suitable starting values: please supply some", call.=FALSE) start <- c(fit$par, rep(0, NCOL(frames$S) - 1)) attr(start, "start.iter") <- fit$niter rho$k <- tempk } } ## test start: stopifnot(is.numeric(start)) length.start <- ncol(rho$B1) + NCOL(frames$S) + length(rho$lambda) - 1 if(length(start) != length.start) stop(gettextf("length of start is %d should equal %d", length(start), length.start), call.=FALSE) return(start) } start.threshold <- function(y.levels, threshold = c("flexible", "symmetric", "symmetric2", "equidistant")) ### args: ### y.levels - levels of the model response, at least of length two ### threshold - threshold structure, character. { ## match and test arguments: threshold <- match.arg(threshold) ny.levels <- length(y.levels) ntheta <- ny.levels - 1L if(threshold %in% c("symmetric", "symmetric2", "equidistant") && ny.levels < 3) stop(gettextf("symmetric and equidistant thresholds are only meaningful for responses with 3 or more levels")) ## default starting values: start <- qlogis((1:ntheta) / (ntheta + 1) ) # just a guess ## adjusting for threshold functions: if(threshold == "symmetric" && ntheta %% 2) { ## ntheta odd >= 3 nalpha <- (ntheta + 1) / 2 start <- c(start[nalpha], diff(start[nalpha:ntheta])) ## works for ## ntheta >= 1 } if(threshold == "symmetric" && !ntheta %% 2) {## ntheta even >= 4 nalpha <- (ntheta + 2) / 2 start <- c(start[c(nalpha - 1, nalpha)], diff(start[nalpha:ntheta])) ## works for ntheta >= 2 } if(threshold == "symmetric2" && ntheta %% 2) { ## ntheta odd >= 3 nalpha <- (ntheta + 3) / 2 start <- start[nalpha:ntheta] ## works for ntheta >= 3 } if(threshold == "symmetric2" && !ntheta %% 2) {## ntheta even >= 4 nalpha <- (ntheta + 2) / 2 start <- start[nalpha:ntheta] ## works for ntheta >= 2 } if(threshold == "equidistant") start <- c(start[1], mean(diff(start))) ## return starting values for the threshold parameters: return(as.vector(start)) } start.beta <- function(X, has.intercept = TRUE) return(rep(0, NCOL(X) - has.intercept)) ## clm.start <- function(y.levels, threshold, X, has.intercept = TRUE) ## return(c(start.threshold(y.levels, threshold), ## start.beta(X, has.intercept))) clm.start <- function(y.levels, threshold, X, NOM=NULL, S=NULL, has.intercept=TRUE) { st <- start.threshold(y.levels, threshold) if(!is.null(NOM) && NCOL(NOM) > 1) st <- c(st, rep(rep(0, length(st)), ncol(NOM)-1)) start <- c(st, start.beta(X, has.intercept)) if(!is.null(S) && NCOL(S) > 1) start <- c(start, rep(0, ncol(S) - 1)) start } ordinal/R/clmm2.R0000644000176200001440000010213113573721321013242 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## The main clmm2 function and some related auxiliary functions. clmm2.control <- function(method = c("ucminf", "nlminb", "model.frame"), ..., trace = 0, maxIter = 50, gradTol = 1e-4, maxLineIter = 50, innerCtrl = c("warnOnly", "noWarn", "giveError")) { method <- match.arg(method) innerCtrl <- match.arg(innerCtrl) ctrl <- list(trace=ifelse(trace < 0, 1, 0), maxIter=maxIter, gradTol=gradTol, maxLineIter=maxLineIter, innerCtrl=innerCtrl) optCtrl <- list(trace = abs(trace), ...) if(!is.numeric(unlist(ctrl[-5]))) stop("maxIter, gradTol, maxLineIter and trace should all be numeric") if(any(ctrl[-c(1, 5)] <= 0)) stop("maxIter, gradTol and maxLineIter have to be > 0") if(method == "ucminf" && !"grtol" %in% names(optCtrl)) optCtrl$grtol <- 1e-5 if(method == "ucminf" && !"grad" %in% names(optCtrl)) optCtrl$grad <- "central" list(method = method, ctrl = ctrl, optCtrl = optCtrl) } .negLogLikBase <- function(rho) { ### Update stDev, sigma, eta1Fix, and eta2Fix given new par: with(rho, { if(estimLambda > 0) lambda <- par[nxi + p + k + 1:estimLambda] if(estimStDev) stDev <- exp(par[p+nxi+k+estimLambda+ 1:s]) sigma <- if(k > 0) expSoffset * exp(drop(Z %*% par[nxi+p + 1:k])) else expSoffset eta1Fix <- drop(B1 %*% par[1:(nxi + p)]) eta2Fix <- drop(B2 %*% par[1:(nxi + p)]) }) return(invisible()) } .negLogLikMfast <- function(rho) { ## negative log-likelihood fit <- with(rho, { .C("nll", as.double(u), length(u), as.integer(grFac), as.double(stDev), as.double(o1), as.double(o2), length(o1), eta1 = as.double(eta1), eta2 = as.double(eta2), as.double(eta1Fix), as.double(eta2Fix), as.double(sigma), pr = as.double(pr), as.double(weights), as.double(lambda), as.integer(linkInt), nll = double(1) )[c("eta1", "eta2", "pr", "nll")] }) rho$eta1 <- fit$eta1 rho$eta2 <- fit$eta2 rho$pr <- fit$pr fit$nll } update.u2.v3 <- function(rho) { ### third version: C-implementation of NR-algorithm. .negLogLikBase(rho) ## update: par, stDev, eta1Fix, eta2Fix eta2Fix, sigma fit <- with(rho, .C("NRalgv3", as.integer(ctrl$trace), as.integer(ctrl$maxIter), as.double(ctrl$gradTol), as.integer(ctrl$maxLineIter), as.integer(grFac), as.double(stDev), as.double(o1), as.double(o2), as.double(eta1Fix), as.double(eta2Fix), as.double(sigma), as.integer(linkInt), as.double(weights), u = as.double(uStart), pr = as.double(pr), funValue = double(1), gradValues = as.double(uStart), hessValues = as.double(rep(1, length(uStart))), length(pr), length(uStart), maxGrad = double(1), conv = 0L, as.double(lambda), Niter = as.integer(Niter) )[c("u", "funValue", "gradValues", "hessValues", "maxGrad", "conv", "Niter")] ) ## Get message: message <- switch(as.character(fit$conv), "1" = "max|gradient| < tol, so current iterate is probably solution", "0" = "Non finite negative log-likelihood", "-1" = "iteration limit reached when updating the random effects", "-2" = "step factor reduced below minimum when updating the random effects") ## Check for convergence and report warning/error: if(rho$ctrl$trace > 0 && fit$conv == 1) cat("\nOptimizer converged! ", "max|grad|:", fit$maxGrad, message, fill = TRUE) if(fit$conv != 1 && rho$ctrl$innerCtrl == "warnOnly") warning(message, "\n at iteration ", rho$Niter) else if(fit$conv != 1 && rho$ctrl$innerCtrl == "giveError") stop(message, "\n at iteration ", rho$Niter) ## Store values and return: rho$Niter <- fit$Niter rho$u <- fit$u rho$D <- fit$hessValue rho$gradient <- fit$gradValue if(!is.finite(rho$negLogLik <- fit$funValue)) return(FALSE) return(TRUE) } clmm2 <- function(location, scale, nominal, random, data, weights, start, subset, na.action, contrasts, Hess = FALSE, model = TRUE, sdFixed, link = c("logistic", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), lambda, doFit = TRUE, control, nAGQ = 1, threshold = c("flexible", "symmetric", "equidistant"), ...) ## Handle if model = FALSE ### Marginal fitted values? (pr | u = 0) or (pr | u = u.tilde) ? ### How can we (should we?) get u.tilde and var(u.tilde) with GHQ? ### Make safeStart function if !is.finite(negLogLik) ### Write test suite for doFit argument { R <- match.call(expand.dots = FALSE) Call <- match.call() if(missing(random)) { Call[[1]] <- as.name("clm2") return(eval.parent(Call)) } if(missing(lambda)) lambda <- NULL if(missing(contrasts)) contrasts <- NULL if(missing(control)) control <- clmm2.control(...) if(!setequal(names(control), c("method", "ctrl", "optCtrl"))) stop("specify 'control' via clmm2.control()") if (missing(data)) data <- environment(location) if (is.matrix(eval.parent(R$data))) R$data <- as.data.frame(data) ### Collect all variables in a single formula and evaluate to handle ### missing values correctly: m <- match(c("location", "scale", "nominal"), names(R), 0) F <- lapply(as.list(R[m]), eval.parent) ## evaluate in parent varNames <- unique(unlist(lapply(F, all.vars))) longFormula <- eval(parse(text = paste("~", paste(varNames, collapse = "+")))[1]) m <- match(c("location", "data", "subset", "weights", "random", "na.action"), names(R), 0) R <- R[c(1, m)] R$location <- longFormula R$drop.unused.levels <- TRUE R[[1]] <- as.name("model.frame") names(R)[names(R) == "location"] <- "formula" R <- eval.parent(R) nonNA <- rownames(R) ### Append nonNA index to Call$subset to get the right design matrices ### from clm2: Call$subset <- if(is.null(Call$subset)) nonNA else c(paste(deparse(Call$subset), "&"), nonNA) Call$start <- if(is.null(Call$start) || !is.null(Call$sdFixed)) Call$start else start[-length(start)] Call$random <- Call$control <- Call$nAGQ <- Call$sdFixed <- Call$innerCtrl <- NULL Call$method <- control$method Call$doFit <- Call$Hess <- FALSE Call[[1]] <- as.name("clm2") rhoM <- eval.parent(Call) if(control$method == "model.frame") return(rhoM) rhoM$call <- match.call() rhoM$randomName <- deparse(rhoM$call$random) ### Set grouping factor and stDev parameter: rhoM$grFac <- R[,"(random)"] if(!missing(sdFixed) && !is.null(sdFixed)) { stopifnot(length(sdFixed) == 1 && sdFixed > 0) rhoM$estimStDev <- FALSE rhoM$stDev <- sdFixed } else rhoM$estimStDev <- TRUE with(rhoM, { r <- nlevels(grFac) ## no. random effects grFac <- as.integer(unclass(grFac)) if(r <= 2) stop("Grouping factor must have 3 or more levels") s <- ifelse(estimStDev, 1L, 0L) ## no. variance parameters Niter <- 0L }) ### set starting values: if(missing(start)) { suppressWarnings(fitCLM(rhoM)) if(rhoM$estimStDev) rhoM$start <- rhoM$par <- c(rhoM$par, log(1)) else rhoM$start <- rhoM$par } else rhoM$start <- rhoM$par <- start rhoM$uStart <- rhoM$u <- rep(0, rhoM$r) ### Test starting values: if(length(rhoM$start) != with(rhoM, nxi + p + k + estimLambda + estimStDev)) stop("'start' is ", length(rhoM$start), " long, but should be ", with(rhoM, nxi + p + k + estimLambda + estimStDev), " long") if(rhoM$ncolXX == 0) { if(!all(diff(c(rhoM$tJac %*% rhoM$start[1:rhoM$nalpha])) > 0)) stop("Threshold starting values are not of increasing size") } ### Change the lower limit if lambda is estimated with the ### Aranda-Ordaz link and sdFixed is not supplied: if(rhoM$estimLambda > 0 && rhoM$link == "Aranda-Ordaz" && is.null(rhoM$call$sdFixed)) rhoM$limitLow <- c(rep(-Inf, length(rhoM$par)-2), 1e-5, -Inf) ### This should hardly ever be the case: .negLogLikBase(rhoM) ## set lambda, stDev, sigma, eta1Fix and eta2Fix if(!is.finite(.negLogLikMfast(rhoM))) stop("Non-finite integrand at starting values") rhoM$ctrl <- control$ctrl rhoM$optCtrl <- control$optCtrl if(rhoM$method == "nlminb") { m <- match(names(rhoM$optCtrl), c("grad","grtol"), 0) rhoM$optCtrl <- rhoM$optCtrl[!m] } ### Match doFit: if(is.logical(doFit) || is.numeric(doFit)) { if(doFit) doFit <- "C" else doFit <- "no" } else if(!is.character(doFit) || !doFit %in% c("no", "R", "C")) stop("argument 'doFit' not recognized. 'doFit' should be\n numeric, logical or one of c('no', 'R', 'C')") ### Set ObjFun parameters: ObjFun <- getNLA2 ## same for "R" and "C" rhoM$updateU <- if(doFit == "R") update.u2 else update.u2.v3 rhoM$nAGQ <- as.integer(nAGQ) if(rhoM$nAGQ >= 2) { ghq <- gauss.hermite(rhoM$nAGQ) rhoM$ghqns <- ghq$nodes rhoM$ghqws <- ghq$weights if(doFit == "R") { ObjFun <- getNAGQinR rhoM$PRnn <- array(0, dim=c(rhoM$n, rhoM$nAGQ)) rhoM$PRrn <- array(0, dim=c(rhoM$r, rhoM$nAGQ)) rhoM$ghqws <- ghq$weights * exp(rhoM$ghqns^2) } else ObjFun <- getNAGQinC } if(rhoM$nAGQ <= -1) { ghq <- gauss.hermite(abs(rhoM$nAGQ)) rhoM$ghqns <- ghq$nodes rhoM$ghqws <- ghq$weights * exp((ghq$nodes^2)/2) if(doFit == "R"){ ObjFun <- getNGHQinR } else { ObjFun <- getNGHQinC rhoM$ghqws <- log(ghq$weights) + (ghq$nodes^2)/2 } } stopifnot(rhoM$nAGQ != 0) ## test needed? ### Fit the model: if(!doFit %in% c("C", "R")) return(rhoM) if(rhoM$nAGQ > -1) rhoM$updateU(rhoM) # Try updating the random effects rhoM$optRes <- switch(rhoM$method, "ucminf" = ucminf(rhoM$start, function(x) ObjFun(rhoM, x), control=rhoM$optCtrl), "nlminb" = nlminb(rhoM$start, function(x) ObjFun(rhoM, x), control=rhoM$optCtrl, lower = rhoM$limitLow, upper = rhoM$limitUp)) rhoM$par <- rhoM$optRes[[1]] if(Hess) { if(rhoM$link == "Aranda-Ordaz" && rhoM$estimLambda > 0 && rhoM$lambda < 1e-3) message("Cannot get Hessian because lambda = ",rhoM$lambda ," is too close to boundary.\n", " Fit model with link == 'logistic' to get Hessian") else { rhoM$Hessian <- myhess(function(x) ObjFun(rhoM, x), rhoM$par) rhoM$par <- rhoM$optRes[[1]] } } .negLogLikMfast(rhoM) ## update pr ## if(rhoM$nAGQ > -1) rhoM$updateU(rhoM) # Makes sure ranef's are evaluated at the optimum ### Post processing: res <- finalizeRhoM(rhoM) res$call <- match.call() res$na.action <- attr(R, "na.action") res$contrasts <- contrasts class(res) <- c("clmm2", "clm2") res } getNLA2 <- function(rho, par) { ### negative log-likelihood by the Laplace approximation ### (with update.u2 in C or R): if(!missing(par)) rho$par <- par if(!rho$updateU(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) ## logDetD <- sum(log(rho$D/(2*pi))) logDetD <- sum(log(rho$D)) - rho$r * log(2*pi) rho$negLogLik + logDetD / 2 } getNAGQinR <- function(rho, par) { ### negative log-likelihood by adaptive Gauss-Hermite quadrature ### implemented in R: if(!missing(par)) rho$par <- par if(!rho$updateU(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) with(rho, { K <- sqrt(2/D) agqws <- K %*% t(ghqws) agqns <- apply(K %*% t(ghqns), 2, function(x) x + u) ranNew <- apply(agqns, 2, function(x) x[grFac] * stDev) eta1Tmp <- (eta1Fix + o1 - ranNew) / sigma eta2Tmp <- (eta2Fix + o2 - ranNew) / sigma if(nlambda) ## PRnn <- (pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))^weights ## This is likely a computationally more safe solution: PRnn <- exp(weights * log(pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))) else ## PRnn <- (pfun(eta1Tmp) - pfun(eta2Tmp))^weights PRnn <- exp(weights * log(pfun(eta1Tmp) - pfun(eta2Tmp))) ### OPTION: The fitted values could be evaluated with getFittedC for ### better precision. for(i in 1:r) ## PRrn[i,] <- apply(PRnn[grFac == i, ], 2, prod) PRrn[i,] <- apply(PRnn[grFac == i, ,drop = FALSE], 2, prod) PRrn <- PRrn * agqws * dnorm(x=agqns, mean=0, sd=1) ### OPTION: Could this be optimized by essentially computing dnorm 'by hand'? }) -sum(log(rowSums(rho$PRrn))) } ## tmpAGQ(rho) tmpAGQ <- function(rho, par) { if(!missing(par)) rho$par <- par with(rho, { ls() stDev <- exp(ST[[1]][1, 1]) nlambda <- 0 K <- sqrt(2/D) agqws <- K %*% t(ghqws) agqns <- apply(K %*% t(ghqns), 2, function(x) x + u) grFac <- unclass(grFac) ranNew <- apply(agqns, 2, function(x) x[grFac] * stDev) eta1Tmp <- (eta1Fix + o1 - ranNew) / sigma eta2Tmp <- (eta2Fix + o2 - ranNew) / sigma if(nlambda) PRnn <- exp(weights * log(pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))) else PRnn <- exp(wts * log(pfun(eta1Tmp) - pfun(eta2Tmp))) dim(eta1Tmp) exp(wts[IND] * log(pfun(eta1Tmp[IND, ]) - pfun(eta2Tmp[IND, ]))) PRrn <- do.call(rbind, lapply(1:dims$q, function(i) { apply(PRnn[grFac == i, ,drop = FALSE], 2, prod) })) head(PRrn) PRrn <- do.call(rbind, lapply(1:dims$q, function(i) { apply(PRnn[grFac == i, ,drop = FALSE], 2, function(x) sum(log(x))) })) head(PRrn) ## Could we do something like PRnn <- wts * log(pfun(eta1Tmp) - pfun(eta2Tmp)) PRrn <- do.call(rbind, lapply(1:dims$q, function(i) { apply(PRnn[grFac == i, ,drop = FALSE], 2, function(x) sum(x)) })) head(PRrn, 20) ## to avoid first exp()ing and then log()ing? head(exp(PRrn), 20) range(PRrn) exp(range(PRrn)) out <- PRrn + log(agqws) + log(dnorm(x=agqns, mean=0, sd=1)) log(2 * 3) log(2) + log(3) PRnn[grFac == 12, , drop=FALSE] IND <- which(grFac == 12) cbind(IND, wts[IND], PRnn[IND, ]) dim(PRrn) ## There seems to be underfloow allready in the computations ## in PRnn, which propagates to PRrn PRrn <- PRrn * agqws * dnorm(x=agqns, mean=0, sd=1) }) -sum(log(rowSums(rho$PRrn))) } getNAGQinC <- function(rho, par) { ### negative log-likelihood by adaptive Gauss-Hermite quadrature ### implemented in C: if(!missing(par)) rho$par <- par if(!rho$updateU(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) with(rho, { .C("getNAGQ", nll = double(1), ## nll as.integer(grFac), ## grFac as.double(stDev), ## stDev as.double(eta1Fix), as.double(eta2Fix), as.double(o1), as.double(o2), as.double(sigma), ## Sigma as.double(weights), length(sigma), ## nx - no. obs length(uStart), ## nu - no. re as.double(ghqns), as.double(log(ghqws)), ## lghqws as.double(ghqns^2), ## ghqns2 as.double(u), as.double(D), as.integer(abs(nAGQ)), as.integer(linkInt), as.double(lambda))$nll }) } getNGHQinR <- function(rho, par) { ### negative log-likelihood by standard Gauss-Hermite quadrature ### implemented in R: if(!missing(par)) rho$par <- par .negLogLikBase(rho) ## Update lambda, stDev, sigma and eta*Fix with(rho, { ns <- ghqns * stDev SS <- numeric(r) ## summed likelihood for(i in 1:r) { ind <- grFac == i eta1Fi <- eta1Fix[ind] eta2Fi <- eta2Fix[ind] o1i <- o1[ind] o2i <- o2[ind] si <- sigma[ind] wt <- weights[ind] for(h in 1:abs(nAGQ)) { eta1s <- (eta1Fi + o1i - ns[h]) / si eta2s <- (eta2Fi + o2i - ns[h]) / si ## SS[i] <- exp(sum(wt * log(pfun(eta1s) - pfun(eta2s)))) * ## ghqws[h] * exp(ghqns[h]^2) * dnorm(x=ghqns[h]) + SS[i] SS[i] <- exp(sum(wt * log(pfun(eta1s) - pfun(eta2s)))) * ghqws[h] + SS[i] ### OPTION: The fitted values could be evaluated with getFittedC for ### better precision. } } -sum(log(SS)) + r * log(2*pi)/2 }) } getNGHQinC <- function(rho, par) { ### negative log-likelihood by standard Gauss-Hermite quadrature ### implemented in C: if(!missing(par)) rho$par <- par .negLogLikBase(rho) ## Update lambda, stDev, sigma and eta*Fix with(rho, { .C("getNGHQ_C", nll = double(1), as.integer(grFac), as.double(stDev), as.double(eta1Fix), as.double(eta2Fix), as.double(o1), as.double(o2), as.double(sigma), as.double(weights), length(sigma), length(uStart), as.double(ghqns), as.double(ghqws), as.integer(abs(nAGQ)), as.integer(linkInt), as.double(ghqns * stDev), as.double(lambda))$nll }) } finalizeRhoM <- function(rhoM) { if(rhoM$method == "ucminf") { if(rhoM$optRes$info[1] > rhoM$optCtrl[["grtol"]]) warning("clmm2 may not have converged:\n optimizer 'ucminf' terminated with max|gradient|: ", rhoM$optRes$info[1], call.=FALSE) rhoM$convergence <- ifelse(rhoM$optRes$info[1] > rhoM$optCtrl[["grtol"]], FALSE, TRUE) } if(rhoM$method == "nlminb") { rhoM$convergence <- ifelse(rhoM$optRes$convergence == 0, TRUE, FALSE) if(!rhoM$convergence) warning("clmm2 may not have converged:\n optimizer 'nlminb' terminated with message: ", rhoM$optRes$message, call.=FALSE) } if(rhoM$ctrl$gradTol < max(abs(rhoM$gradient))) warning("Inner loop did not converge at termination:\n max|gradient| = ", max(abs(rhoM$gradient))) with(rhoM, { if(nxi > 0) { xi <- par[1:nxi] names(xi) <- xiNames thetaNames <- paste(lev[-length(lev)], lev[-1], sep="|") Alpha <- Theta <- matrix(par[1:nxi], nrow=ncolXX, byrow=TRUE) Theta <- t(apply(Theta, 1, function(x) c(tJac %*% x))) if(ncolXX > 1){ dimnames(Theta) <- list(dnXX[[2]], thetaNames) dimnames(Alpha) <- list(dnXX[[2]], alphaNames) } else { Theta <- c(Theta) Alpha <- c(Alpha) names(Theta) <- thetaNames names(Alpha) <- alphaNames } coefficients <- xi } else coefficients <- numeric(0) if(p > 0) { beta <- par[nxi + 1:p] names(beta) <- dnX[[2]] coefficients <- c(coefficients, beta) } if(k > 0) { zeta <- par[nxi+p + 1:k] names(zeta) <- dnZ[[2]] coefficients <- c(coefficients, zeta) } if(estimLambda > 0) { names(lambda) <- "lambda" coefficients <- c(coefficients, lambda) } if(s > 0) { stDev <- exp(par[nxi+p+k + estimLambda + 1:s]) coefficients <- c(coefficients, stDev) } names(stDev) <- randomName if(exists("Hessian", inherits=FALSE)) dimnames(Hessian) <- list(names(coefficients), names(coefficients)) edf <- p + nxi + k + estimLambda + s nobs <- sum(weights) fitted.values <- pr df.residual = nobs - edf ranef <- u * stDev condVar <- 1/D * stDev^2 logLik <- -optRes[[2]] }) res <- as.list(rhoM) keepNames <- c("ranef", "df.residual", "fitted.values", "edf", "start", "stDev", "beta", "coefficients", "zeta", "Alpha", "Theta", "xi", "lambda", "convergence", "Hessian", "gradient", "optRes", "logLik", "Niter", "grFac", "call", "scale", "location", "nominal", "method", "y", "lev", "nobs", "threshold", "estimLambda", "link", "nAGQ", "condVar", "contrasts", "na.action") m <- match(keepNames, names(res), 0) res <- res[m] res } anova.clmm2 <- function (object, ..., test = c("Chisq", "none")) { anova.clm2(object, ..., test = c("Chisq", "none")) } print.clmm2 <- function(x, ...) { if(x$nAGQ >= 2) cat(paste("Cumulative Link Mixed Model fitted with the adaptive", "Gauss-Hermite \nquadrature approximation with", x$nAGQ ,"quadrature points"), "\n\n") else if(x$nAGQ <= -1) cat(paste("Cumulative Link Mixed Model fitted with the", "Gauss-Hermite \nquadrature approximation with", abs(x$nAGQ) ,"quadrature points"), "\n\n") else cat("Cumulative Link Mixed Model fitted with the Laplace approximation\n", fill=TRUE) if(!is.null(cl <- x$call)) { cat("Call:\n") dput(cl, control=NULL) } if(length(x$stDev)) { cat("\nRandom effects:\n") varMat <- matrix(c(x$stDev^2, x$stDev), nrow = length(x$stDev), ncol=2) rownames(varMat) <- names(x$stDev) colnames(varMat) <- c("Var", "Std.Dev") print(varMat, ...) } else { cat("\nNo random effects\n") } if(length(x$beta)) { cat("\nLocation coefficients:\n") print(x$beta, ...) } else { cat("\nNo location coefficients\n") } if(length(x$zeta)) { cat("\nScale coefficients:\n") print(x$zeta, ...) } else { cat("\nNo Scale coefficients\n") } if(x$estimLambda > 0) { cat("\nLink coefficient:\n") print(x$lambda) } if(length(x$xi) > 0) { cat("\nThreshold coefficients:\n") print(x$Alpha, ...) if(x$threshold != "flexible") { cat("\nThresholds:\n") print(x$Theta, ...) } } cat("\nlog-likelihood:", format(x$logLik, nsmall=2), "\n") cat("AIC:", format(-2*x$logLik + 2*x$edf, nsmall=2), "\n") if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") invisible(x) } vcov.clmm2 <- function(object, ...) { if(is.null(object$Hessian)) { stop("Model needs to be fitted with Hess = TRUE") } dn <- names(object$coefficients) structure(solve(object$Hessian), dimnames = list(dn, dn)) } summary.clmm2 <- function(object, digits = max(3, .Options$digits - 3), correlation = FALSE, ...) { estimStDev <- !("sdFixed" %in% names(as.list(object$call))) edf <- object$edf coef <- with(object, matrix(0, edf-estimStDev, 4, dimnames = list(names(coefficients[seq_len(edf-estimStDev)]), c("Estimate", "Std. Error", "z value", "Pr(>|z|)")))) coef[, 1] <- object$coefficients[seq_len(edf-estimStDev)] if(is.null(object$Hessian)) { stop("Model needs to be fitted with Hess = TRUE") } vc <- try(vcov(object), silent = TRUE) if(inherits(vc, "try-error")) { warning("Variance-covariance matrix of the parameters is not defined") coef[, 2:4] <- NaN if(correlation) warning("Correlation matrix is unavailable") object$condHess <- NaN } else { sd <- sqrt(diag(vc)) coef[, 2] <- sd[seq_len(edf - estimStDev)] object$condHess <- with(eigen(object$Hessian, only.values = TRUE), abs(max(values) / min(values))) coef[, 3] <- coef[, 1]/coef[, 2] coef[, 4] <- 2*pnorm(abs(coef[, 3]), lower.tail=FALSE) if(correlation) object$correlation <- (vc/sd)/rep(sd, rep(object$edf, object$edf)) } object$coefficients <- coef object$digits <- digits varMat <- matrix(c(object$stDev^2, object$stDev), nrow = length(object$stDev), ncol=2) rownames(varMat) <- names(object$stDev) colnames(varMat) <- c("Var", "Std.Dev") object$varMat <- varMat class(object) <- "summary.clmm2" object } print.summary.clmm2 <- function(x, digits = x$digits, signif.stars = getOption("show.signif.stars"), ...) { if(x$nAGQ >=2) cat(paste("Cumulative Link Mixed Model fitted with the adaptive", "Gauss-Hermite \nquadrature approximation with", x$nAGQ ,"quadrature points\n\n")) else if(x$nAGQ <= -1) cat(paste("Cumulative Link Mixed Model fitted with the", "Gauss-Hermite \nquadrature approximation with", abs(x$nAGQ) ,"quadrature points"), "\n\n") else cat("Cumulative Link Mixed Model fitted with the Laplace approximation\n", fill=TRUE) if(!is.null(cl <- x$call)) { cat("Call:\n") dput(cl, control=NULL) } if(length(x$stDev)) { cat("\nRandom effects:\n") print(x$varMat, ...) } else { cat("\nNo random effects\n") } ### OPTION: Should the number of obs. and the number of groups be ### displayed as in clmm? coef <- format(round(x$coefficients, digits=digits)) coef[,4] <- format.pval(x$coefficients[, 4]) p <- length(x$beta); nxi <- length(x$xi) k <- length(x$zeta); u <- x$estimLambda if(p > 0) { cat("\nLocation coefficients:\n") print(coef[nxi + 1:p, , drop=FALSE], quote = FALSE, ...) } else { cat("\nNo location coefficients\n") } if(k > 0) { cat("\nScale coefficients:\n") print(coef[(nxi+p+1):(nxi+p+k), , drop=FALSE], quote = FALSE, ...) } else { cat("\nNo scale coefficients\n") } if(x$estimLambda > 0) { cat("\nLink coefficients:\n") print(coef[(nxi+p+k+1):(nxi+p+k+u), , drop=FALSE], quote = FALSE, ...) } if(nxi > 0) { cat("\nThreshold coefficients:\n") print(coef[1:nxi, -4, drop=FALSE], quote = FALSE, ...) } cat("\nlog-likelihood:", format(x$logLik, nsmall=2), "\n") cat("AIC:", format(-2*x$logLik + 2*x$edf, nsmall=2), "\n") cat("Condition number of Hessian:", format(x$condHess, nsmall=2), "\n") if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") if(!is.null(correl <- x$correlation)) { cat("\nCorrelation of Coefficients:\n") ll <- lower.tri(correl) correl[ll] <- format(round(correl[ll], digits)) correl[!ll] <- "" print(correl[-1, -ncol(correl)], quote = FALSE, ...) } invisible(x) } ## ranef.clmm2 <- function(x) { ## x$ranef ## } ## Trace <- function(iter, stepFactor, val, maxGrad, par, first=FALSE) { ## t1 <- sprintf(" %3d: %-5e: %.3f: %1.3e: ", ## iter, stepFactor, val, maxGrad) ## t2 <- formatC(par) ## if(first) ## cat("iter: step factor: Value: max|grad|: Parameters:\n") ## cat(t1, t2, "\n") ## } gauss.hermite <- function (n) { n <- as.integer(n) if (n < 0) stop("need non-negative number of nodes") if (n == 0) return(list(nodes = numeric(0), weights = numeric(0))) i <- 1:n i1 <- i[-n] muzero <- sqrt(pi) a <- rep(0, n) b <- sqrt(i1/2) A <- rep(0, n * n) A[(n + 1) * (i1 - 1) + 2] <- b A[(n + 1) * i1] <- b dim(A) <- c(n, n) vd <- eigen(A, symmetric = TRUE) w <- rev(as.vector(vd$vectors[1, ])) w <- muzero * w^2 x <- rev(vd$values) list(nodes = x, weights = w) } profile.clmm2 <- function(fitted, alpha = 0.01, range, nSteps = 20, trace = 1, ...) { estimStDev <- !("sdFixed" %in% names(as.list(fitted$call))) if(!estimStDev) ## || is.null(fitted$Hessian)) fitted <- update(fitted, Hess = TRUE, sdFixed = NULL) MLogLik <- fitted$logLik MLstDev <- fitted$stDev if(missing(range) && is.null(fitted$Hessian)) stop("'range' should be specified or model fitted with 'Hess = TRUE'") if(missing(range) && !is.null(fitted$Hessian)) { range <- log(fitted$stDev) + qnorm(1 - alpha/2) * c(-1, 1) * sqrt(vcov(fitted)[fitted$edf, fitted$edf]) range <- exp(range) pct <- paste(round(100*c(alpha/2, 1-alpha/2), 1), "%") ci <- array(NA, dim = c(1, 2), dimnames = list("stDev", pct)) ci[] <- range } stopifnot(all(range > 0)) logLik <- numeric(nSteps) stDevSeq <- seq(min(range), max(range), length.out = nSteps) if(trace) message("Now profiling stDev with ", nSteps, " steps: i =") if(trace) cat(1, "") rho <- update(fitted, Hess = FALSE, sdFixed = min(range)) logLik[1] <- rho$logLik start <- as.vector(rho$coefficients) for(i in 2:nSteps){ if(trace) cat(i, "") rho <- update(rho, sdFixed = stDevSeq[i], start = start) logLik[i] <- rho$logLik start <- as.vector(rho$coefficients) } if(trace) cat("\n") if(any(logLik > fitted$logLik)) warning("Profiling found a better optimum,", " so original fit had not converged") sgn <- 2*(stDevSeq > MLstDev) -1 Lroot <- sgn * sqrt(2) * sqrt(-logLik + MLogLik) res <- data.frame("Lroot" = c(0, Lroot), "stDev" = c(MLstDev, stDevSeq)) res <- res[order(res[,1]),] if(!all(diff(res[,2]) > 0)) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for stDev") val <- structure(list(stDev = res), original.fit = fitted) if(exists("ci", inherits=FALSE)) attr(val, "WaldCI") <- ci class(val) <- c("profile.clmm2", "profile") val } confint.profile.clmm2 <- function(object, parm = seq_along(Pnames), level = 0.95, ...) { Pnames <- names(object) confint.profile.clm2(object, parm = parm, level = level, ...) } plot.profile.clmm2 <- function(x, parm = seq_along(Pnames), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, fig = TRUE, n = 1e3, ..., ylim = NULL) { Pnames <- names(x) plot.profile.clm2(x, parm = parm, level = level, Log = Log, relative = relative, fig = fig, n = n, ..., ylim = ylim) } update.clmm2 <- function(object, formula., location, scale, nominal, ..., evaluate = TRUE) { call <- object$call if (is.null(call)) stop("need an object with call component") extras <- match.call(expand.dots = FALSE)$... if (!missing(location)) call$location <- update.formula(formula(attr(object$location, "terms")), location) if (!missing(scale)) call$scale <- if(!is.null(object$scale)) update.formula(formula(attr(object$scale, "terms")), scale) else scale if (!missing(nominal)) call$nominal <- if(!is.null(object$nominal)) update.formula(formula(attr(object$nominal, "terms")), nominal) else nominal if (length(extras)) { existing <- !is.na(match(names(extras), names(call))) for (a in names(extras)[existing]) call[[a]] <- extras[[a]] if (any(!existing)) { call <- c(as.list(call), extras[!existing]) call <- as.call(call) } } if (evaluate) eval(call, parent.frame()) else call } ordinal/R/clmm.formula.R0000644000176200001440000001007713332052252014624 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Functions to process lmer-style mixed-model formulae. These ## functions are borrowed from the lme4 package but have later been ## modified. findbars <- function(term) ### Return the pairs of expressions that separated by vertical bars { if (is.name(term) || !is.language(term)) return(NULL) if (term[[1]] == as.name("(")) return(findbars(term[[2]])) if (!is.call(term)) stop("term must be of class call") if (term[[1]] == as.name('|')) return(term) if (length(term) == 2) return(findbars(term[[2]])) c(findbars(term[[2]]), findbars(term[[3]])) } nobars <- function(term) ### term - usually the third element of a formula object: formula[[3]] ### returns a list of terms ### Return the formula omitting the pairs of expressions that are ### separated by vertical bars { if (!('|' %in% all.names(term))) return(term) if (is.call(term) && term[[1]] == as.name('|')) return(NULL) if (length(term) == 2) { nb <- nobars(term[[2]]) if (is.null(nb)) return(NULL) term[[2]] <- nb return(term) } nb2 <- nobars(term[[2]]) nb3 <- nobars(term[[3]]) if (is.null(nb2)) return(nb3) if (is.null(nb3)) return(nb2) term[[2]] <- nb2 term[[3]] <- nb3 term } subbars <- function(term) ### Substitute the '+' function for the '|' function { if (is.name(term) || !is.language(term)) return(term) if (length(term) == 2) { term[[2]] <- subbars(term[[2]]) return(term) } stopifnot(length(term) >= 3) if (is.call(term) && term[[1]] == as.name('|')) term[[1]] <- as.name('+') for (j in 2:length(term)) term[[j]] <- subbars(term[[j]]) term } subnms <- function(term, nlist) ### Substitute any names from nlist in term with 1 { if (!is.language(term)) return(term) if (is.name(term)) { if (any(unlist(lapply(nlist, get("=="), term)))) return(1) return(term) } stopifnot(length(term) >= 2) for (j in 2:length(term)) term[[j]] <- subnms(term[[j]], nlist) term } slashTerms <- function(x) ### Return the list of '/'-separated terms in an expression that ### contains slashes { if (!("/" %in% all.names(x))) return(x) if (x[[1]] != as.name("/")) stop("unparseable formula for grouping factor") list(slashTerms(x[[2]]), slashTerms(x[[3]])) } makeInteraction <- function(x) ### from a list of length 2 return recursive interaction terms { if (length(x) < 2) return(x) trm1 <- makeInteraction(x[[1]]) trm11 <- if(is.list(trm1)) trm1[[1]] else trm1 list(substitute(foo:bar, list(foo=x[[2]], bar = trm11)), trm1) } expandSlash <- function(bb) ### expand any slashes in the grouping factors returned by findbars { if (!is.list(bb)) return(expandSlash(list(bb))) ## I really do mean lapply(unlist(... - unlist returns a ## flattened list in this case unlist(lapply(bb, function(x) { if (length(x) > 2 && is.list(trms <- slashTerms(x[[3]]))) return(lapply(unlist(makeInteraction(trms)), function(trm) substitute(foo|bar, list(foo = x[[2]], bar = trm)))) x })) } ordinal/R/clm.anova.R0000644000176200001440000002721013332052252014103 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# # clm.anova.R single_anova <- function(object, type = c("III", "II", "I", "3", "2", "1", "marginal", "2b")) { type <- type[1L] if(!is.character(type)) type <- as.character(type) type <- match.arg(type) if(type %in% c("I", "II", "III")) type <- as.character(as.integer(as.roman(type))) if(any(is.na(vcov(object)))) stop("anova table not available with non-finite values in vcov(object)") # Get list of contrast matrices (L) - one for each model term: L_list <- if(type == "1") { get_contrasts_type1(object) } else if(type == "2") { get_contrasts_type2_unfolded(object) } else if(type == "2b") { get_contrasts_type2(object) } else if(type == "3") { get_contrasts_type3(object) } else if(type == "marginal") { get_contrasts_marginal(object) } else { stop("'type' not recognized") } # Add cols to L for alpha, zeta and lambda params: L_list <- adjust_contrast_for_param(object, L_list) # Get F-test for each term and collect in table: table <- rbindall(lapply(L_list, function(L) contestMD(object, L))) # Format ANOVA table and return: if(length(nm <- setdiff(names(L_list), rownames(table)))) { tab <- array(NA_real_, dim=c(length(nm), ncol(table)), dimnames = list(nm, colnames(table))) table <- rbind(table, tab)[names(L_list), ] } # Format 'type': type <- if(type == "marginal") { "Marginal" } else if(grepl("b|c", type)) { alph <- gsub("[0-9]", "", type) paste0("Type ", as.roman(as.integer(gsub("b|c", "", type))), alph) } else paste("Type", as.roman(as.integer(type))) attr(table, "heading") <- paste(type, "Analysis of Deviance Table with Wald chi-square tests\n") attr(table, "hypotheses") <- L_list class(table) <- c("anova", "data.frame") table } adjust_contrast_for_param <- function(model, L) { nalpha <- length(model$alpha) nzeta <- if(is.null(model$zeta)) 0L else length(model$zeta) nlambda <- if(is.null(model$lambda)) 0L else length(model$lambda) nextra <- nzeta + nlambda # pre and post add extra cols to L: add <- function(L) { pre <- array(0, dim=c(nrow(L), nalpha)) post <- array(0, dim=c(nrow(L), nextra)) cbind(pre, L[, -1L, drop=FALSE], post) } if(!is.list(L)) add(L) else lapply(L, add) } model_matrix <- function(object, ...) { if(!inherits(object, "clm")) return(model.matrix(object, ...)) X <- model.matrix(object)$X if(!any(object$aliased$beta)) return(X) remove <- c(FALSE, object$aliased$beta) newX <- X[, !remove, drop=FALSE] attr(newX, "assign") <- attr(X, "assign")[!remove] contr <- attr(X, "contrasts") if(!is.null(contr)) attr(newX, "contrasts") <- contr newX } contestMD <- function(model, L, rhs=0, eps=sqrt(.Machine$double.eps), ...) { mk_Qtable <- function(Qvalue, df) { pvalue <- pchisq(q=Qvalue, df=df, lower.tail=FALSE) data.frame("Df"=df, "Chisq"=Qvalue, "Pr(>Chisq)"=pvalue, check.names = FALSE) } if(!is.matrix(L)) L <- matrix(L, ncol=length(L)) stopifnot(is.matrix(L), is.numeric(L), ncol(L) == length(coef(model, na.rm=TRUE))) if(length(rhs) == 1L) rhs <- rep(rhs, nrow(L)) stopifnot(is.numeric(rhs), length(rhs) == nrow(L)) if(nrow(L) == 0L) { # May happen if there are no fixed effects x <- numeric(0L) return(mk_Qtable(x, x)) } if(any(is.na(L))) return(mk_Qtable(NA_real_, NA_real_)) beta <- coef(model, na.rm=TRUE) vcov_beta <- vcov(model) # Adjust beta for rhs: if(!all(rhs == 0)) beta <- beta - drop(MASS::ginv(L) %*% rhs) # Compute Var(L beta) and eigen-decompose: VLbeta <- L %*% vcov_beta %*% t(L) # Var(contrast) = Var(Lbeta) eig_VLbeta <- eigen(VLbeta) P <- eig_VLbeta$vectors d <- eig_VLbeta$values tol <- max(eps * d[1], 0) pos <- d > tol q <- sum(pos) # rank(VLbeta) if(q < nrow(L) && !all(rhs == 0)) warning("Contrast is rank deficient and test may be affected") if(q <= 0) { # shouldn't happen if L is a proper contrast x <- numeric(0L) return(mk_Qtable(x, x)) } PtL <- crossprod(P, L)[1:q, ] # Compute t-squared values and Q-value: t2 <- drop(PtL %*% beta)^2 / d[1:q] Qvalue <- sum(t2) mk_Qtable(Qvalue, df=q) } ############################################## ######## get_contrasts_type3 ############################################## get_contrasts_type3 <- function(model, which=NULL) { term_names <- attr(terms(model), "term.labels") # Extract original design matrix: Xorig <- model_matrix(model) # Assumes Xorig is full (column) rank if(is.null(which)) { which <- term_names # If model has at most one term return Type I contrasts: if(ncol(Xorig) <= 1L || length(term_names) <= 1L) return(get_contrasts_type1(model)) } else stopifnot(is.character(which), all(which %in% term_names)) # Extract contrast coding in Xorig: codings <- unlist(attr(Xorig, "contrast")) # If only treatment contrasts are used we can just return the type 3 # contrasts for contr.treatment coding: if(length(codings) > 0 && all(is.character(codings)) && all(codings %in% c("contr.treatment"))) return(extract_contrasts_type3(model, X=Xorig)) # otherwise we need to map the type III contrasts to whatever contrast # coding was used: X <- get_model_matrix(model, type="remake", contrasts="contr.treatment") # Ensure that X is full (column) rank: X <- ensure_full_rank(X, silent=TRUE, test.ans=FALSE) # Extract contrasts assuming contr.treatment coding: type3ctr <- extract_contrasts_type3(model, X=X) map <- zapsmall(ginv(X) %*% Xorig) # Maps between contrast codings rownames(map) <- colnames(X) lapply(type3ctr[which], function(L) L %*% map) } ############################################## ######## get_contrasts_type1 ############################################## get_contrasts_type1 <- function(model) { terms <- terms(model) X <- model_matrix(model) nalpha <- length(model$alpha) p <- ncol(X) if(p == 0L) return(list(matrix(numeric(0L), nrow=0L))) # no fixef if(p == 1L && attr(terms, "intercept")) # intercept-only model return(list(matrix(numeric(0L), ncol=nalpha))) # Compute 'normalized' doolittle factorization of XtX: L <- if(p == 1L) matrix(1L) else t(doolittle(crossprod(X))$L) dimnames(L) <- list(colnames(X), colnames(X)) # Determine which rows of L belong to which term: ind.list <- term2colX(terms, X)[attr(terms, "term.labels")] lapply(ind.list, function(rows) L[rows, , drop=FALSE]) } ############################################## ######## get_contrasts_type2_unfolded ############################################## get_contrasts_type2_unfolded <- function(model, which=NULL) { # Computes the 'genuine type II contrast' for all terms that are # contained in other terms. For all terms which are not contained in other # terms, the simple marginal contrast is computed. X <- model_matrix(model) Terms <- terms(model) term_names <- attr(Terms, "term.labels") if(is.null(which)) { which <- term_names # If model has at most one term return Type I contrasts: if(ncol(X) <= 1L || length(term_names) <= 1L) return(get_contrasts_type1(model)) } else stopifnot(is.character(which), all(which %in% term_names)) is_contained <- containment(model) do_marginal <- names(is_contained)[sapply(is_contained, length) == 0L] do_type2 <- setdiff(term_names, do_marginal) if(!length(do_marginal)) list() else Llist <- get_contrasts_marginal(model, which=do_marginal) if(length(do_type2)) Llist <- c(Llist, get_contrasts_type2(model, which=do_type2)) Llist[term_names] } ############################################## ######## get_contrasts_type2 ############################################## get_contrasts_type2 <- function(model, which=NULL) { # Computes the type 2 contrasts - either for all terms or for those # included in 'which' (a chr vector naming model terms). # returns a list X <- model_matrix(model) nalpha <- length(model$alpha) terms <- terms(model) data_classes <- attr(terms(model), "dataClasses") if(is.null(asgn <- attr(X, "assign"))) stop("design matrix 'X' should have a non-null 'assign' attribute") term_names <- attr(terms, "term.labels") if(is.null(which)) { which <- term_names # If model has at most one term return Type I contrasts: if(ncol(X) <= 1L || length(term_names) <= 1L) return(get_contrasts_type1(model)) } else stopifnot(is.character(which), all(which %in% term_names)) which <- setNames(as.list(which), which) # Compute containment: is_contained <- containment(model) # Compute term asignment list: map from terms to columns in X has_intercept <- attr(terms, "intercept") > 0 col_terms <- if(has_intercept) c("(Intercept)", term_names)[asgn + 1] else term_names[asgn[asgn > 0]] if(!length(col_terms) == ncol(X)) # should never happen. stop("An error happended when computing Type II contrasts") term2colX <- split(seq_along(col_terms), col_terms)[unique(col_terms)] # Compute contrast for each term - return as named list: lapply(which, function(term) { # Reorder the cols in X to [, unrelated_to_term, term, contained_in_term] cols_term <- unlist(term2colX[c(term, is_contained[[term]])]) Xnew <- cbind(X[, -cols_term, drop=FALSE], X[, cols_term, drop=FALSE]) # Compute order of terms in Xnew: newXcol_terms <- c(col_terms[-cols_term], col_terms[cols_term]) # Compute Type I contrasts for the reordered X: Lc <- t(doolittle(crossprod(Xnew))$L) dimnames(Lc) <- list(colnames(Xnew), colnames(Xnew)) # Extract rows for term and get original order of columns: Lc[newXcol_terms == term, colnames(X), drop=FALSE] }) } ############################################## ######## get_contrasts_marginal ############################################## #' @importFrom stats model.matrix terms get_contrasts_marginal <- function(model, which=NULL) { # Computes marginal contrasts. # # No tests of conformity with coefficients are implemented # # returns a list X <- model_matrix(model) terms <- terms(model) term_names <- attr(terms, "term.labels") if(is.null(which)) { which <- term_names # If model has at most one term return Type I contrasts: if(ncol(X) <= 1L || length(term_names) <= 1L) return(get_contrasts_type1(model)) } else stopifnot(is.character(which), all(which %in% term_names)) # Compute map from terms to columns in X and contrasts matrix term2colX <- term2colX(terms, X) L <- structure(diag(ncol(X)), dimnames = list(colnames(X), colnames(X))) # Extract contrast for each term - return as named list: which <- setNames(as.list(which), which) lapply(which, function(term) { L[term2colX[[term]], , drop=FALSE] }) } ############################################## ######## rbindall ############################################## rbindall <- function(...) do.call(rbind, ...) cbindall <- function(...) do.call(cbind, ...) ordinal/R/clm.frames.R0000644000176200001440000002410113460411313014247 0ustar liggesusers############################################################################# # Copyright (c) 2010-2019 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## methods for computing, manipulating and extracting design matrices, ## weights, offsets, model.frames and things like that. ## ################################# ## ## call sequence ## clm() { ## get_clmFormulas() ## get_clm.mf() ## get_clmTerms() # optionally ## get_clmDesign() ## ## makeThresholds() ## drop.cols() ## ## clm.fit.default() ## get_clmInfoTab() ## } ## ## get_clmFormulas() { ## getFullForm() ## } ## ## get_clm.mf() { ## model.frame() ## } ## ## get_clmTerms() { ## get_clm.mf() ## } ## ## get_clmDesign() { ## checkContrasts() ## get_clmDM() ## for formula, scale, nominal ## getWeights() ## get_clmY() ## } ## ## get_clmDM() { ## model.matrix() ## getContrasts() ## getOffset() ## } get_clmTerms <- function(mc, formulas, call.envir=parent.frame(2L)) ### Compute terms objects for each of the formulas. { ## We need this approach in order to get the predvars and ## dataClasses attributes of the terms objects. nms <- c("formula", "scale", "nominal") keep <- match(nms, names(formulas), nomatch=0) lapply(formulas[keep], function(form) { terms(get_clm.mf(mc, form, attr(formulas, "envir"), call.envir)) }) } get_clmDesign <- function(fullmf, terms.list, contrasts) { ### Compute (y, X, weights, off, S, NOM etc.) for a clm object. ### clm-internal+external ### ### terms.list: list of terms.objects. stopifnot(all(sapply(terms.list, inherits, "terms"))) ## Check that contrasts are specified only for terms in the model: checkContrasts(terms=attr(fullmf, "terms"), contrasts=contrasts) ## Extract X (design matrix for location effects) + terms, offset: res <- get_clmDM(fullmf, terms.list[["formula"]], contrasts, type="formula") res$terms <- terms.list[["formula"]] res$contrasts <- attr(res$X, "contrasts") res$xlevels <- .getXlevels(res$terms, fullmf) res$na.action <- attr(fullmf, "na.action") ## Extract weights: res$weights <- getWeights(fullmf) ## Extract model response: res <- c(get_clmY(fullmf, res$weights), res) ## Extract S (design matrix for the scale effects): if(!is.null(terms.list$scale)) { ans <- get_clmDM(fullmf, terms.list[["scale"]], contrasts, type="scale") res$S <- ans$X res$S.terms <- terms.list[["scale"]] res$S.off <- ans$offset res$S.contrasts <- attr(res$S, "contrasts") res$S.xlevels <- .getXlevels(res$S.terms, fullmf) if(attr(res$S.terms, "response") != 0) stop("response not allowed in 'scale'", call.=FALSE) } ## Extract NOM (design matrix for the nominal effects): if(!is.null(terms.list$nominal)) { ans <- get_clmDM(fullmf, terms.list[["nominal"]], contrasts, type="nominal") res$NOM <- ans$X res$nom.terms <- terms.list[["nominal"]] res$nom.contrasts <- attr(res$NOM, "contrasts") res$nom.xlevels <- .getXlevels(res$nom.terms, fullmf) if(attr(res$nom.terms, "response") != 0) stop("response not allowed in 'nominal'", call.=FALSE) if(!is.null(attr(res$nom.terms, "offset"))) stop("offset not allowed in 'nominal'", call.=FALSE) } ## Return results (list of design matrices etc.): res ### NOTE: X, S and NOM are with dimnames and intercepts are ### guaranteed. They may be column rank deficient. } get_clmDM <- function(fullmf, terms, contrasts, check.intercept=TRUE, type="formula", get.offset=TRUE) ### Get DM (=Design Matrix): { X <- model.matrix(terms, data=fullmf, contrasts.arg=getContrasts(terms, contrasts)) ## Test for intercept in X(?): Xint <- match("(Intercept)", colnames(X), nomatch = 0) if(check.intercept && Xint <= 0) { X <- cbind("(Intercept)" = rep(1, nrow(X)), X) warning(gettextf("an intercept is needed and assumed in '%s'", type), call.=FALSE) } ## Intercept in X is guaranteed. res <- list(X=X) if(get.offset) res$offset <- getOffset(fullmf, terms) res } get_clm.mf <- function(mc, formula, form.envir, call.envir=parent.frame(2L)) ### clm-internal ### Extract the model.frame from formula ### mc - matched call containing: data, subset, weights, na.action { ## Extract the full model.frame(mf): m <- match(c("data", "subset", "weights", "na.action"), names(mc), 0) mfcall <- mc[c(1, m)] mfcall$formula <- formula mfcall$drop.unused.levels <- TRUE mfcall[[1]] <- as.name("model.frame") if(is.null(mfcall$data)) mfcall$data <- form.envir eval(mfcall, envir=call.envir) } get_clmY <- function(fullmf, weights) { y <- model.response(fullmf, "any") ## any storage mode if(is.null(y)) stop("'formula' needs a response", call.=FALSE) if(!is.factor(y)) stop("response in 'formula' needs to be a factor", call.=FALSE) ## y.levels are the levels of y with positive weights y.levels <- levels(droplevels(y[weights > 0])) ## check that y has at least two levels: if(length(y.levels) == 1L) stop(gettextf("response has only 1 level ('%s'); expecting at least two levels", y.levels), call.=FALSE) if(!length(y.levels)) stop("response should be a factor with at least two levels") ## return: list(y=y, y.levels=y.levels) } get_clmFormulas <- function(mc, envir=parent.frame(2L)) ### clm-internal ### Extracts and returns a list of formulas needed for further processing. ### mc: matched call ### envir: environment in which mc is to be evaluated { ## Collect all variables in a full formula: ## evaluate the formulae in the enviroment in which clm was called ## (parent.frame(2)) to get them evaluated properly: forms <- list(eval(mc$formula, envir=envir)) if(!is.null(mc$scale)) forms$scale <- eval(mc$scale, envir=envir) if(!is.null(mc$nominal)) forms$nominal <- eval(mc$nominal, envir=envir) ## get the environment of the formula. If this does not have an ## enviroment (it could be a character), then use the parent frame. form.envir <- if(!is.null(env <- environment(forms[[1]]))) env else envir ## ensure formula, scale and nominal are formulas: forms[] <- lapply(forms, function(x) { # 'is.character(.)' for scale = "~ ..." tryCatch(formula(if(is.character(x)) x else deparse(x), env = form.envir), error = function(e)e) }) if(any(vapply(forms, inherits, FUN.VALUE=logical(1), what="error"))) stop("unable to interpret 'formula', 'scale' or 'nominal'") ## collect all variables in a full formula: forms$fullForm <- do.call("getFullForm", forms) ### OPTION: do we actually need to set this name? names(forms)[1] <- "formula" ## set environment of 'fullForm' to the environment of 'formula': attr(forms, "envir") <- environment(forms$fullForm) <- form.envir ## return: forms } get_clmRho <- function(object, ...) { UseMethod("get_clmRho") } get_clmRho.default <- function(object, terms.list, contrasts, link, threshold, parent=parent.frame(), start=NULL, control=clm.control(), ...) ### .default method(?) ### object: model.frame (fullmf) with all variables present ### terms.list: list of terms.objects for each of the formulas in the ### clm object. { ## Get design matrices etc: design <- get_clmDesign(fullmf=object, terms.list=terms.list, contrasts=contrasts) ## Get threshold information: design <- c(design, makeThresholds(design$y.levels, threshold)) ## Drop columns for aliased coefs: design <- drop.cols(design, drop.scale=FALSE, silent=TRUE) ## Set envir rho with variables: B1, B2, o1, o2, weights, fitted: rho <- with(design, { clm.newRho(parent.frame(), y=y, X=X, NOM=design$NOM, S=design$S, weights=weights, offset=offset, S.offset=design$S.off, tJac=tJac, control=control) }) ## Set and check starting values for the parameters: start <- set.start(rho, start=start, get.start=is.null(start), threshold=threshold, link=link, frames=design) rho$par <- as.vector(start) ## remove attributes ## Set pfun, dfun and gfun in rho: setLinks(rho, link) ## Return: rho } get_clmRho.clm <- function(object, parent=parent.frame(), ...) { ### Safely generate the model environment from a model object. o <- object get_clmRho.default(object=model.frame(o), terms.list=terms(o, "all"), contrasts=o$contrasts, start=c(o$start), link=o$link, threshold=o$threshold, parent=parent, control=o$control, ...) } ## get_mfcall <- function(mc, envir=parent.frame(2)) { ## m <- match(c("data", "subset", "weights", "na.action"), ## names(mc), 0) ## mf <- mc[c(1, m)] ## ## mf$formula <- fullForm ## mf$drop.unused.levels <- TRUE ## mf[[1]] <- as.name("model.frame") ## ## if(is.null(mf$data)) mf$data <- form.envir ## list(mfcall=mf, envir=parent.frame(2)) ## } ordinal/R/control.R0000644000176200001440000000700713332052252013707 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Functions that set control parameters for clm() and clmm(). clm.control <- function(method = c("Newton", "model.frame", "design", "ucminf", "nlminb", "optim"), sign.location = c("negative", "positive"), sign.nominal = c("positive", "negative"), ..., trace = 0L, maxIter = 100L, gradTol = 1e-6, maxLineIter = 15L, relTol = 1e-6, tol = sqrt(.Machine$double.eps), maxModIter = 5L, convergence=c("warn", "silent", "stop", "message")) { method <- match.arg(method) convergence <- match.arg(convergence) sign.location <- match.arg(sign.location) sign.nominal <- match.arg(sign.nominal) if(!all(is.numeric(c(maxIter, gradTol, maxLineIter, relTol, tol, maxModIter)))) stop("maxIter, gradTol, relTol, tol, maxModIter and maxLineIter should all be numeric") ctrl <- list(method = method, sign.location = sign.location, sign.nominal = sign.nominal, convergence = convergence, trace = as.integer(trace), maxIter = as.integer(maxIter), gradTol = as.numeric(gradTol), relTol = as.numeric(relTol), tol = as.numeric(tol), maxLineIter = as.integer(maxLineIter), maxModIter = as.integer(maxModIter)) if(method %in% c("ucminf", "nlminb", "optim")) ctrl$ctrl <- list(trace = as.integer(abs(trace)), ...) return(ctrl) } clmm.control <- function(method = c("nlminb", "ucminf", "model.frame"), ..., trace = 0, maxIter = 50, gradTol = 1e-4, maxLineIter = 50, useMatrix = FALSE, innerCtrl = c("warnOnly", "noWarn", "giveError"), checkRanef = c("warn", "error", "message")) { method <- match.arg(method) innerCtrl <- match.arg(innerCtrl) checkRanef <- match.arg(checkRanef) useMatrix <- as.logical(useMatrix) stopifnot(is.logical(useMatrix)) ctrl <- list(trace=if(trace < 0) 1 else 0, maxIter=maxIter, gradTol=gradTol, maxLineIter=maxLineIter, innerCtrl=innerCtrl) optCtrl <- list(trace = abs(trace), ...) if(!is.numeric(unlist(ctrl[-5]))) stop("maxIter, gradTol, maxLineIter and trace should all be numeric") if(any(ctrl[-c(1, 5)] <= 0)) stop("maxIter, gradTol and maxLineIter have to be > 0") if(method == "ucminf" && !"grtol" %in% names(optCtrl)) optCtrl$grtol <- 1e-5 if(method == "ucminf" && !"grad" %in% names(optCtrl)) optCtrl$grad <- "central" namedList(method, useMatrix, ctrl, optCtrl, checkRanef) } ordinal/R/clmm.start.R0000644000176200001440000000251713332052252014314 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Functions to compute starting values for clmm()s. clmm.start <- function(frames, link, threshold) { ## get starting values from clm: fit <- with(frames, clm.fit(y=y, X=X, weights=wts, offset=off, link=link, threshold=threshold)) ## initialize variance parameters to zero: start <- c(fit$par, rep(0, length(frames$grList))) return(start) } ordinal/R/clm.predict.R0000644000176200001440000003400413332052252014430 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## The predict method for clm objects. predict.clm <- function(object, newdata, se.fit = FALSE, interval = FALSE, level = 0.95, type = c("prob", "class", "cum.prob", "linear.predictor"), na.action = na.pass, ...) ### result - a list of predictions (fit) ### OPTION: restore names of the fitted values ### ### Assumes object has terms, xlevels, contrasts, tJac { ## match and test arguments: type <- match.arg(type) se.fit <- as.logical(se.fit)[1] interval <- as.logical(interval)[1] stopifnot(length(level) == 1 && is.numeric(level) && level < 1 && level > 0) if(type == "class" && (se.fit || interval)) { warning("se.fit and interval set to FALSE for type = 'class'") se.fit <- interval <- FALSE } cov <- if(se.fit || interval) vcov(object) else NULL ### Get newdata object; fill in response if missing and always for ### type=="class": has.response <- TRUE if(type == "class" && missing(newdata)) ## newdata <- update(object, method="model.frame")$mf newdata <- model.frame(object) ## newdata supplied or type=="class": has.newdata <- !(missing(newdata) || is.null(newdata)) if(has.newdata || type=="class") { if(has.newdata && sum(unlist(object$aliased)) > 0) warning("predictions from column rank-deficient fit may be misleading") newdata <- as.data.frame(newdata) ## Test if response is in newdata: resp <- response.name(object$terms) ## remove response from newdata if type == "class" if(type == "class") newdata <- newdata[!names(newdata) %in% resp] has.response <- resp %in% names(newdata) ## FALSE for type == "class" if(!has.response) { ## fill in response variable in newdata if missing: ylev <- object$y.levels nlev <- length(ylev) nnd <- nrow(newdata) newdata <- cbind(newdata[rep(1:nnd, each=nlev) , , drop=FALSE], factor(rep(ylev, nnd), levels=ylev, ordered=TRUE)) names(newdata)[ncol(newdata)] <- resp } ### Set model matrices: if(is.null(attr(object$terms, "predvars"))) warning(paste0("terms object does not have a predvars attribute: ", "predictions may be misleading")) mf <- model.frame(object$terms, newdata, na.action=na.action, xlev=object$xlevels) ## model.frame will warn, but here we also throw an error: if(nrow(mf) != nrow(newdata)) stop("length of variable(s) found do not match nrow(newdata)") ## check that variables are of the right type: if (!is.null(cl <- attr(object$terms, "dataClasses"))) .checkMFClasses(cl, mf) ## make model.matrix: X <- model.matrix(object$terms, mf, contrasts = object$contrasts) Xint <- match("(Intercept)", colnames(X), nomatch = 0L) n <- nrow(X) if(Xint <= 0) X <- cbind("(Intercept)" = rep(1, n), X) # if(object$control$sign.location == "negative") NOM[, -1] <- -NOM[, -1] ## drop aliased columns: if(sum(object$aliased$beta) > 0) X <- X[, !c(FALSE, object$aliased$beta), drop=FALSE] ## handle offset (from predict.lm): ### NOTE: Could factor the offset handling out in its own function for ### code clarity: offset <- rep(0, nrow(X)) if(!is.null(off.num <- attr(object$terms, "offset"))) for(i in off.num) offset <- offset + eval(attr(object$terms, "variables")[[i + 1]], newdata) y <- model.response(mf) if(any(!levels(y) %in% object$y.levels)) stop(gettextf("response factor '%s' has new levels", response.name(object$terms))) ### make NOMINAL model.matrix: if(is.nom <- !is.null(object$nom.terms)) { ## allows NAs to pass through to fit, se.fit, lwr and upr: nom.mf <- model.frame(object$nom.terms, newdata, na.action=na.action, xlev=object$nom.xlevels) ## model.frame will warn, but here we also throw an error: if(nrow(nom.mf) != nrow(newdata)) stop("length of variable(s) found do not match nrow(newdata)") if (!is.null(cl <- attr(object$nom.terms, "dataClasses"))) .checkMFClasses(cl, nom.mf) NOM <- model.matrix(object$nom.terms, nom.mf, contrasts=object$nom.contrasts) NOMint <- match("(Intercept)", colnames(NOM), nomatch = 0L) if(NOMint <= 0) NOM <- cbind("(Intercept)" = rep(1, n), NOM) # if(object$control$sign.nominal == "negative") NOM[, -1] <- -NOM[, -1] alias <- t(matrix(object$aliased$alpha, nrow=length(object$y.levels) - 1))[,1] if(sum(alias) > 0) NOM <- NOM[, !c(FALSE, alias), drop=FALSE] } ### make SCALE model.matrix: if(is.scale <- !is.null(object$S.terms)) { ## allows NAs to pass through to fit, se.fit, lwr and upr: S.mf <- model.frame(object$S.terms, newdata, na.action=na.action, xlev=object$S.xlevels) ## model.frame will warn, but here we also throw an error: if(nrow(S.mf) != nrow(newdata)) stop("length of variable(s) found do not match nrow(newdata)") if (!is.null(cl <- attr(object$S.terms, "dataClasses"))) .checkMFClasses(cl, S.mf) S <- model.matrix(object$S.terms, S.mf, contrasts=object$S.contrasts) Sint <- match("(Intercept)", colnames(S), nomatch = 0L) if(Sint <= 0) S <- cbind("(Intercept)" = rep(1, n), S) if(sum(object$aliased$zeta) > 0) S <- S[, !c(FALSE, object$aliased$zeta), drop=FALSE] Soff <- rep(0, nrow(S)) if(!is.null(off.num <- attr(object$S.terms, "offset"))) for(i in off.num) Soff <- Soff + eval(attr(object$S.terms, "variables")[[i + 1]], newdata) } ### Construct model environment: tJac <- object$tJac dimnames(tJac) <- NULL env <- clm.newRho(parent.frame(), y=y, X=X, NOM=if(is.nom) NOM else NULL, S=if(is.scale) S else NULL, weights=rep(1, n), offset=offset, S.offset=if(is.scale) Soff else rep(0, n), tJac=tJac, control=object$control) setLinks(env, link=object$link) } ## end !missing(newdata) or type == "class" else { env <- get_clmRho.clm(object) ## env <- update(object, doFit=FALSE) } env$par <- as.vector(coef(object)) env$par <- env$par[!is.na(env$par)] ### OPTION: Are there better ways to handle NAs in coef? ## if(length(env$par) != ncol(env$B1)) ## stop(gettextf("design matrix has %d columns, but expecting %d (number of parameters)", ## ncol(env$B1), length(env$par))) ## Get predictions: pred <- switch(type, "prob" = prob.predict.clm(env=env, cov=cov, se.fit=se.fit, interval=interval, level=level), "class" = prob.predict.clm(env=env, cov=cov, se.fit=se.fit, interval=interval, level=level), "cum.prob" = cum.prob.predict.clm(env=env, cov=cov, se.fit=se.fit, interval=interval, level=level), "linear.predictor" = lin.pred.predict.clm(env=env, cov=cov, se.fit=se.fit, interval=interval, level=level) ##, ## "eta" = eta.pred.predict.clm(env=env, cov=cov, ## se.fit=se.fit, interval=interval, level=level) ) ### Arrange predictions in matrices if response is missing from ### newdata arg or type=="class": if(!has.response || type == "class") { pred <- lapply(pred, function(x) { x <- matrix(unlist(x), ncol=nlev, byrow=TRUE) dimnames(x) <- list(1:nrow(x), ylev) x }) ## if(type == "eta") ## pred <- lapply(pred, function(x) { ## x <- x[, -nlev, drop=FALSE] ## colnames(x) <- names(object$alpha) ## }) if(type == "class") pred <- lapply(pred, function(x) { factor(max.col(x), levels=seq_along(ylev), labels=ylev) }) } ### Filter missing values (if relevant): if(missing(newdata) && !is.null(object$na.action)) pred <- lapply(pred, function(x) napredict(object$na.action, x)) return(pred) } prob.predict.clm <- function(env, cov, se.fit=FALSE, interval=FALSE, level=0.95) ### Works for linear and scale models: ### env - model environment with par set. ### cov - vcov for the parameters { ## evaluate nll and grad to set dpi.psi in env: clm.nll(env) pred <- list(fit = as.vector(env$fitted)) if(se.fit || interval) { se.pr <- get.se(env, cov, type="prob") if(se.fit) pred$se.fit <- se.pr if(interval) { pred.logit <- qlogis(pred$fit) ## se.logit <- dlogis(pred$fit) * se.pr se.logit <- se.pr / (pred$fit * (1 - pred$fit)) a <- (1 - level)/2 pred$lwr <- plogis(pred.logit + qnorm(a) * se.logit) pred$upr <- plogis(pred.logit - qnorm(a) * se.logit) } } return(pred) } eta.pred.predict.clm <- function(env, cov, se.fit=FALSE, interval=FALSE, level=0.95) { ## clm.nll(env) pred <- list(eta = c(with(env, B1 %*% par[1:n.psi]))) if(se.fit || interval) { se <- get.se(env, cov, type="lp") if(se.fit) { pred$se.eta <- se[[1]] } if(interval) { a <- (1 - level)/2 pred$lwr1 <- env$eta1 + qnorm(a) * se[[1]] pred$upr1 <- env$eta1 - qnorm(a) * se[[1]] } } pred } lin.pred.predict.clm <- function(env, cov, se.fit=FALSE, interval=FALSE, level=0.95) ### get predictions on the scale of the linear predictor { ## evaluate nll and grad to set dpi.psi in env: clm.nll(env) pred <- list(eta1=env$eta1, eta2=env$eta2) if(se.fit || interval) { se <- get.se(env, cov, type="lp") if(se.fit) { pred$se.eta1 <- se[[1]] pred$se.eta2 <- se[[2]] } if(interval) { a <- (1 - level)/2 pred$lwr1 <- env$eta1 + qnorm(a) * se[[1]] pred$lwr2 <- env$eta2 + qnorm(a) * se[[2]] pred$upr1 <- env$eta1 - qnorm(a) * se[[1]] pred$upr2 <- env$eta2 - qnorm(a) * se[[2]] } } return(pred) ## list with predictions. } cum.prob.predict.clm <- function(env, cov, se.fit=FALSE, interval=FALSE, level=0.95) { ## evaluate nll and grad to set dpi.psi in env: clm.nll(env) pred <- list(cprob1=env$pfun(env$eta1), cprob2=env$pfun(env$eta2)) if(se.fit || interval) { se <- get.se(env, cov, type="gamma") if(se.fit) { pred$se.cprob1 <- se[[1]] pred$se.cprob2 <- se[[2]] } if(interval) { a <- (1 - level)/2 pred$lwr1 <- pred$cprob1 + qnorm(a) * se[[1]] pred$lwr2 <- pred$cprob2 + qnorm(a) * se[[2]] pred$upr1 <- pred$cprob1 - qnorm(a) * se[[1]] pred$upr2 <- pred$cprob2 - qnorm(a) * se[[2]] } } return(pred) } get.se <- function(rho, cov, type=c("lp", "gamma", "prob")) { ### Computes standard errors of predicted probabilities (prob), ### cumulative probabilities (gamma) or values of the linear ### predictor (lp) for linear (k<=0) or location-scale models ### (k>0). rho$xcovtx <- function(x, chol.cov) { ## Compute 'diag(x %*% cov %*% t(x))' diag(x %*% crossprod(chol.cov) %*% t(x)) ## colSums(tcrossprod(chol.cov, x)^2) } rho$type <- match.arg(type) ind <- seq_len(rho$n.psi + rho$k) rho$chol.cov <- try(chol(cov[ind, ind]), silent=TRUE) if(inherits(rho$chol.cov, "try-error")) stop(gettext("VarCov matrix of model parameters is not positive definite:\n cannot compute standard errors of predictions"), call.=FALSE) clm.nll(rho) ## just to be safe with(rho, { ### First compute d[eta, gamma, prob] / d par; then compute variance ### covariance matrix of the observations and extract SEs as the ### square root of the diagonal elements: if(type %in% c("lp", "gamma")) { D1 <- B1 D2 <- B2 if(k > 0) { D1 <- cbind(D1/sigma, -S*eta1) D2 <- cbind(D2/sigma, -S*eta2) } if(type == "gamma") { p1 <- if(!nlambda) dfun(eta1) else dfun(eta1, lambda) p2 <- if(!nlambda) dfun(eta2) else dfun(eta2, lambda) D1 <- D1*p1 D2 <- D2*p2 } se <- list(se1=sqrt(xcovtx(D1, chol.cov)), se2=sqrt(xcovtx(D2, chol.cov))) } if(type == "prob") { p1 <- if(!nlambda) dfun(eta1) else dfun(eta1, lambda) p2 <- if(!nlambda) dfun(eta2) else dfun(eta2, lambda) C2 <- if(k <= 0) B1*p1 - B2*p2 else cbind(B1*p1/sigma - B2*p2/sigma, -(eta1 * p1 - eta2 * p2) * S) se <- sqrt(xcovtx(C2, chol.cov)) } }) rho$se } ordinal/R/clm.nominal_test.R0000644000176200001440000001702113332052252015472 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Implementation of of nominal_test.clm() and scale_test.clm() for ## automatic testing of nominal and scale effects in clm()s. These ## functions work in a fashion similar to add1(). nominal_test <- function(object, ...) { UseMethod("nominal_test") } scale_test <- function(object, ...) { UseMethod("scale_test") } nominal_test.clm <- function(object, scope, trace=FALSE, ...) ### Test nominal effects for all (or selected) terms in location ### and scale formulas. { ## get scope: vector of terms names which to add to nominal: termsnm <- attr(object$terms, "term.labels") if(!is.null(object$S.terms)) termsnm <- union(termsnm, attr(object$S.terms, "term.labels")) if(!missing(scope) && !is.null(scope)) { if(!is.character(scope)) scope <- attr(terms(update.formula(object, scope)), "term.labels") if(!all(match(scope, termsnm, 0L) > 0L)) stop("scope is not a subset of term labels") } else { scope <- termsnm } if(!is.null(object$nom.terms)) { scope <- scope[!scope %in% attr(object$nom.terms, "term.labels")] } if(!length(scope)) message("\nno additional terms to add to nominal\n") env <- environment(formula(object)) ## get list of (updated) nominal formulas: nomforms <- if(!is.null(object$call$nominal)) lapply(scope, function(tm) { update.formula(old=formula(object$nom.terms), new=as.formula(paste("~. + ", tm))) }) else lapply(scope, function(tm) { as.formula(paste("~", tm), env=env) }) ns <- length(scope) ## results matrix: ans <- matrix(nrow = ns + 1L, ncol = 3L, dimnames = list(c("", scope), c("df", "logLik", "AIC"))) ans[1L, ] <- c(object$edf, object$logLik, AIC(object)) n0 <- nobs(object) ## for all terms in scope: i <- 1 for(i in seq(ns)) { if(trace) { cat("trying +", scope[i], "\n", sep = " ") utils::flush.console() } ## update and fit model with nominal effect added: nfit <- try(update(object, nominal=nomforms[[i]], convergence="silent"), silent=TRUE) ## model may not be identifiable or converge: if(!inherits(nfit, "try-error") && ### NOTE: non-negative convergence codes indicate that the likelihood ### is correctly determined: nfit$convergence$code >= 0) { ans[i + 1L, ] <- c(nfit$edf, nfit$logLik, AIC(nfit)) nnew <- nobs(nfit) if(all(is.finite(c(n0, nnew))) && nnew != n0) stop("number of rows in use has changed: remove missing values?") } } dfs <- ans[, 1L] - ans[1L, 1L] dfs[1L] <- NA aod <- data.frame(Df = dfs, logLik = ans[, 2L], AIC = ans[, 3L]) rownames(aod) <- rownames(ans) ## compute likelihood ratio statistic and p-values: LR <- 2*(ans[, 2L] - ans[1L, 2L]) LR[1L] <- NA nas <- !is.na(LR) P <- LR P[nas] <- pchisq(LR[nas], dfs[nas], lower.tail = FALSE) aod[, c("LRT", "Pr(>Chi)")] <- list(LR, P) head <- c("Tests of nominal effects", paste("\nformula:", Deparse(formula(object$terms)))) if(!is.null(object$call$scale)) head <- c(head, paste("scale: ", Deparse(formula(object$S.terms)))) if(!is.null(object$call$nominal)) head <- c(head, paste("nominal:", Deparse(formula(object$nom.terms)))) class(aod) <- c("anova", "data.frame") attr(aod, "heading") <- head aod } scale_test.clm <- function(object, scope, trace=FALSE, ...) ### Test scale effects for all (or selected) terms in formula { ## get scope: vector of terms names which to add to scale: termsnm <- attr(object$terms, "term.labels") if(!missing(scope) && !is.null(scope)) { if(!is.character(scope)) scope <- attr(terms(update.formula(object, scope)), "term.labels") if(!all(match(scope, termsnm, 0L) > 0L)) stop("scope is not a subset of term labels") } else { scope <- termsnm } ## if(!is.null(object$nom.terms)) { ## scope <- scope[!scope %in% attr(object$nom.terms, ## "term.labels")] ## } if(!is.null(object$S.terms)) { scope <- scope[!scope %in% attr(object$S.terms, "term.labels")] } if(!length(scope)) message("\nno relevant terms to add to scale\n") env <- environment(formula(object)) ## get list of (updated) scale formulas: scaleforms <- if(!is.null(object$call$scale)) lapply(scope, function(tm) { update.formula(old=formula(object$S.terms), new=as.formula(paste("~. + ", tm))) }) else lapply(scope, function(tm) as.formula(paste("~", tm), env=env)) ns <- length(scope) ## results matrix: ans <- matrix(nrow = ns + 1L, ncol = 3L, dimnames = list(c("", scope), c("df", "logLik", "AIC"))) ans[1L, ] <- c(object$edf, object$logLik, AIC(object)) n0 <- nobs(object) ## for all terms in scope: for(i in seq(ns)) { if(trace) { cat("trying +", scope[i], "\n", sep = " ") utils::flush.console() } ## update and fit model with scale effect added: nfit <- try(update(object, scale=scaleforms[[i]]), silent=TRUE) ## model may not be identifiable or converge: if(!inherits(nfit, "try-error") && nfit$convergence$code >= 0) { ans[i + 1L, ] <- c(nfit$edf, nfit$logLik, AIC(nfit)) nnew <- nobs(nfit) if (all(is.finite(c(n0, nnew))) && nnew != n0) stop("number of rows in use has changed: remove missing values?") } } dfs <- ans[, 1L] - ans[1L, 1L] dfs[1L] <- NA aod <- data.frame(Df = dfs, logLik = ans[, 2L], AIC = ans[, 3L]) rownames(aod) <- rownames(ans) ## compute likelihood ratio statistic and p-values: LR <- 2*(ans[, 2L] - ans[1L, 2L]) LR[1L] <- NA nas <- !is.na(LR) P <- LR P[nas] <- pchisq(LR[nas], dfs[nas], lower.tail = FALSE) aod[, c("LRT", "Pr(>Chi)")] <- list(LR, P) head <- c("Tests of scale effects", paste("\nformula:", Deparse(formula(object$terms)))) if(!is.null(object$call$scale)) head <- c(head, paste("scale: ", Deparse(formula(object$S.terms)))) if(!is.null(object$call$nominal)) head <- c(head, paste("nominal:", Deparse(formula(object$nom.terms)))) class(aod) <- c("anova", "data.frame") attr(aod, "heading") <- head aod } ordinal/R/clmm2.utils.R0000755000176200001440000003274313332052252014410 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Utility functions for fitting CLMMs with clmm2(). ### OPTION: Could make use of getFittedC throughout this file... .negLogLikMfastR <- function(rho) { ## negative log-likelihood ### .negLogLikMfast in R with(rho, { o21 <- u[grFac] * stDev o11 <- o1 - o21 o21 <- o2 - o21 eta1 <- (eta1Fix + o11)/sigma eta2 <- (eta2Fix + o21)/sigma pr <- if(nlambda) pfun(eta1, lambda) - pfun(eta2, lambda) else pfun(eta1) - pfun(eta2) if(any(is.na(pr)) || any(pr <= 0)) nll <- Inf else nll <- -sum(weights * log(pr)) - sum(dnorm(x=u, mean=0, sd=1, log=TRUE)) nll }) } .negLogLikM <- function(rho) { ## negative log-likelihood with(rho, { if(estimStDev) stDev <- exp(par[p+nxi+k+estimLambda+ 1:s]) o21 <- u[grFac] * stDev o11 <- o1 - o21 o21 <- o2 - o21 if(estimLambda > 0) lambda <- par[nxi + p + k + 1:estimLambda] sigma <- if(k > 0) expSoffset * exp(drop(Z %*% par[nxi+p + 1:k])) else expSoffset eta1Fix <- drop(B1 %*% par[1:(nxi + p)]) eta2Fix <- drop(B2 %*% par[1:(nxi + p)]) eta1 <- (eta1Fix + o11)/sigma eta2 <- (eta2Fix + o21)/sigma pr <- if(nlambda) pfun(eta1, lambda) - pfun(eta2, lambda) else pfun(eta1) - pfun(eta2) if(any(is.na(pr)) || any(pr <= 0)) nll <- Inf else nll <- -sum(weights * log(pr)) - sum(dnorm(x=u, mean=0, sd=1, log=TRUE)) nll }) } .gradM <- function(rho) { ## gradient of the negative log-likelihood with(rho, { if(nlambda) { p1 <- dfun(eta1, lambda) p2 <- dfun(eta2, lambda) } else { p1 <- dfun(eta1) p2 <- dfun(eta2) } wtprSig <- weights/pr/sigma .C("gradC", as.double(stDev), as.double(p1), as.double(p2), as.double(wtprSig), as.integer(grFac), length(wtprSig), u = as.double(u), length(u))$u ## tapply(stDev * wtprSig * (p1 - p2), grFac, sum) + u }) } .gradC <- function(rho) { tmp <- with(rho, { .C("grad_C", as.double(stDev), p1 = double(length(pr)), p2 = double(length(pr)), as.double(pr), as.double(weights), as.double(sigma), wtprSig = double(length(pr)), as.double(eta1), as.double(eta2), gradValues = double(length(u)), as.double(u), as.integer(grFac), length(pr), length(u), as.double(lambda), as.integer(linkInt))[c("p1", "p2", "wtprSig", "gradValues")] }) rho$wtprSig <- tmp$wtprSig rho$p1 <- tmp$p1 rho$p2 <- tmp$p2 tmp$gradValues } .hessC <- function(rho) { with(rho, { .C("hess", as.double(stDev), as.double(p1), as.double(p2), as.double(pr), as.double(wtprSig), as.double(eta1), as.double(eta2), as.integer(linkInt), as.integer(grFac), length(pr), hessValues = double(length(u)), as.double(lambda), length(u))$hessValues }) } .hessianM <- function(rho) ## hessian of the negative log-likelihood with(rho,{ if(nlambda) { g1 <- gfun(eta1, lambda) g2 <- gfun(eta2, lambda) } else { g1 <- gfun(eta1) g2 <- gfun(eta2) } .C("hessC", as.double(stDev), as.double(p1), as.double(p2), as.double(pr), as.double(g1), as.double(g2), as.double(wtprSig), as.integer(grFac), length(pr), z = double(length(u)), length(u))$z ## tapply(((p1 - p2)^2 / pr - g1 + g2) * wtprSig, grFac, sum) * ## stDev^2 + 1 }) update.u2.v2 <- function(rho) { ### second version: C-implementation of NR-algorithm. .negLogLikBase(rho) ## update: par, stDev, eta1Fix, eta2Fix eta2Fix, sigma fit <- with(rho, .C("NRalg", as.integer(ctrl$trace), as.integer(ctrl$maxIter), as.double(ctrl$gradTol), as.integer(ctrl$maxLineIter), as.integer(grFac), as.double(stDev), as.double(o1), as.double(o2), as.double(eta1Fix), as.double(eta2Fix), as.double(eta1), as.double(eta2), as.double(sigma), as.integer(linkInt), as.double(weights), u = as.double(uStart), pr = as.double(pr), funValue = as.double(nll), gradValues = as.double(uStart), hessValues = as.double(uStart), length(pr), length(uStart), maxGrad = double(1), conv = 0L, double(length(pr)), # p1 double(length(pr)), # p2 double(length(pr)), # wtprSig as.double(lambda), Niter = as.integer(Niter) )[c("u", "pr", "funValue", "gradValues", "hessValues", "maxGrad", "conv", "Niter")] ) ## Get message: message <- switch(as.character(fit$conv), "1" = "max|gradient| < tol, so current iterate is probably solution", "0" = "Non finite negative log-likelihood", "-1" = "iteration limit reached when updating the random effects", "-2" = "step factor reduced below minimum when updating the random effects") ## check for convergence and report warning/error: if(rho$ctrl$trace > 0 && fit$conv == 1) cat("\nOptimizer converged! ", "max|grad|:", fit$maxGrad, message, fill = TRUE) if(fit$conv != 1 && rho$ctrl$innerCtrl == "warnOnly") warning(message, "\n at iteration ", rho$Niter) else if(fit$conv != 1 && rho$ctrl$innerCtrl == "giveError") stop(message, "\n at iteration ", rho$Niter) ## Store values and return: rho$Niter <- fit$Niter rho$u <- fit$u rho$D <- fit$hessValue rho$gradient <- fit$gradValue if(!is.finite(rho$negLogLik <- fit$funValue)) return(FALSE) return(TRUE) } update.u2 <- function(rho) { stepFactor <- 1 innerIter <- 0 rho$u <- rho$uStart rho$negLogLik <- .negLogLikM(rho) if(!is.finite(rho$negLogLik)) return(FALSE) rho$gradient <- .gradC(rho) maxGrad <- max(abs(rho$gradient)) conv <- -1 ## Convergence flag message <- "iteration limit reached when updating the random effects" if(rho$ctrl$trace > 0) Trace(iter=0, stepFactor, rho$negLogLik, maxGrad, rho$u, first=TRUE) ## Newton-Raphson algorithm: for(i in 1:rho$ctrl$maxIter) { if(maxGrad < rho$ctrl$gradTol) { message <- "max|gradient| < tol, so current iterate is probably solution" if(rho$ctrl$trace > 0) cat("\nOptimizer converged! ", "max|grad|:", maxGrad, message, fill = TRUE) conv <- 0 break } rho$D <- .hessC(rho) ## rho$D <- .hessianM(rho) step <- rho$gradient / rho$D rho$u <- rho$u - stepFactor * step negLogLikTry <- .negLogLikMfast(rho) lineIter <- 0 ## simple line search, i.e. step halfing: while(negLogLikTry > rho$negLogLik) { stepFactor <- stepFactor/2 rho$u <- rho$u + stepFactor * step negLogLikTry <- .negLogLikMfast(rho) lineIter <- lineIter + 1 if(rho$ctrl$trace > 0) Trace(i+innerIter, stepFactor, rho$negLogLik, maxGrad, rho$u, first=FALSE) if(lineIter > rho$ctrl$maxLineIter){ message <- "step factor reduced below minimum when updating the random effects" conv <- 1 break } innerIter <- innerIter + 1 } rho$negLogLik <- negLogLikTry rho$gradient <- .gradC(rho) maxGrad <- max(abs(rho$gradient)) if(rho$ctrl$trace > 0) Trace(i+innerIter, stepFactor, rho$negLogLik, maxGrad, rho$u, first=FALSE) stepFactor <- min(1, 2 * stepFactor) } if(conv != 0 && rho$ctrl$innerCtrl == "warnOnly") { warning(message, "\n at iteration ", rho$Niter) utils::flush.console() } else if(conv != 0 && rho$ctrl$innerCtrl == "giveError") stop(message, "\n at iteration ", rho$Niter) rho$Niter <- rho$Niter + i rho$D <- .hessC(rho) if(!is.finite(rho$negLogLik)) return(FALSE) else return(TRUE) } .hessMinC <- function(rho) { with(rho,{ if(nlambda) { g1 <- gfun(eta1, lambda) g2 <- gfun(eta2, lambda) } else { g1 <- gfun(eta1) g2 <- gfun(eta2) } .C("hessC", as.double(stDev), as.double(p1), as.double(p2), as.double(pr), as.double(g1), as.double(g2), as.double(wtprSig), as.integer(grFac), length(pr), z = double(length(u)), length(u))$z }) } .gradMinC <- function(stDev, p1, p2, wtprSig, grFac, u) .C("gradC", as.double(stDev), as.double(p1), as.double(p2), as.double(wtprSig), as.integer(unclass(grFac)), as.integer(length(wtprSig)), u = as.double(u), as.integer(length(u)))$u .gradMinC <- function(rho) { with(rho, { if(nlambda) { p1 <- dfun(eta1, lambda) p2 <- dfun(eta2, lambda) } else { p1 <- dfun(eta1) p2 <- dfun(eta2) } wtprSig <- weights/pr/sigma .C("gradC", as.double(stDev), as.double(p1), as.double(p2), as.double(wtprSig), as.integer(grFac), length(wtprSig), u = as.double(u), length(u))$u }) } grFacSumC <- function(x, grFac, u) .C("grFacSum_C", as.double(x), as.integer(grFac), as.integer(length(x)), u = as.double(u), as.integer(length(u)))$u grFacSum <- function(x, grFac, n.x, u, n.u) { ## i, j, z z <- 0 for (i in 1:n.u) { for (j in 1:n.x) if(grFac[j] == i) z <- z + x[j] u[i] <- z + u[i] z <- 0 } u } getNAGQ2 <- function(rho, par) { ### Not in use if(!missing(par)) rho$par <- par if(!update.u2(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) with(rho, { K <- sqrt(2/D) agqws <- K %*% t(ghqws) agqns <- apply(K %*% t(ghqns), 2, function(x) x + u) ranNew <- apply(agqns, 2, function(x) x[grFac] * stDev) eta1Tmp <- (eta1Fix + o1 - ranNew) / sigma eta2Tmp <- (eta2Fix + o2 - ranNew) / sigma if(nlambda) ## PRnn <- (pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))^weights ## This is likely a computationally more safe solution: PRnn <- exp(weights * log(pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))) else ## PRnn <- (pfun(eta1Tmp) - pfun(eta2Tmp))^weights PRnn <- exp(weights * log(pfun(eta1Tmp) - pfun(eta2Tmp))) for(i in 1:r) ## PRrn[i,] <- apply(PRnn[grFac == i, ], 2, prod) PRrn[i,] <- apply(PRnn[grFac == i, ,drop = FALSE], 2, prod) PRrn <- PRrn * agqws * dnorm(x=agqns, mean=0, sd=1) ### OPTION: Could this be optimized by essentially computing dnorm 'by hand'? }) -sum(log(rowSums(rho$PRrn))) } getNGHQ <- function(rho, par) { ### Not in use if(!missing(par)) rho$par <- par .negLogLikM(rho) ## Update lambda, stDev, sigma and eta*Fix with(rho, { eta1Tmp <- (eta1Fix + o1 - ranNew * stDev) / sigma eta2Tmp <- (eta2Fix + o2 - ranNew * stDev) / sigma if(nlambda) PRnn <- exp(weights * log(pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))) else PRnn <- exp(weights * log(pfun(eta1Tmp) - pfun(eta2Tmp))) for(i in 1:r) PRrn[i,] <- apply(PRnn[grFac == i, ,drop = FALSE], 2, prod) PRrn <- PRrn * agqws * dnorm(x=agqns, mean=0, sd=1) }) -sum(log(rowSums(rho$PRrn))) } ordinal/R/clm.Thetamat.R0000644000176200001440000001221413332052252014544 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Functions (getThetamat) to compute a table of threshold ## coefficients from model fits (clm()s) with nominal effects. getThetamat <- function(terms, alpha, assign, contrasts, tJac, xlevels, sign.nominal) ### Compute matrix of thresholds for all combinations of levels of ### factors in the nominal formula. ### ### Input: ### terms: nominal terms object ### alpha: vector of threshold parameters ### assign: attr(NOM, "assign"), where NOM is the design matrix for ### the nominal effects ### contrasts: list of contrasts for the nominal effects ### tJac: threshold Jacobian with appropriate dimnames. ### xlevels: names of levels of factors among the nominal effects. ### sign.nominal: "positive" or "negative" ### ### Output: ### Theta: data.frame of thresholds ### mf.basic: if nrow(Theta) > 1 a data.frame with factors in columns ### and all combinations of the factor levels in rows. { ## Make matrix of thresholds; Theta: Theta <- matrix(alpha, ncol=ncol(tJac), byrow=TRUE) ## Matrix with variables-by-terms: factor.table <- attr(terms, "factors") all.varnm <- rownames(factor.table) ### NOTE: need to index with all.varnm not to include (weights) and ### possibly other stuff. var.classes <- attr(terms, "dataClasses")[all.varnm] numeric.var <- which(var.classes != "factor") ### NOTE: Logical variables are treated as numeric variables. numeric.terms <- factor.terms <- numeric(0) if(length(factor.table)) { ## Terms associated with numerical variables: numeric.terms <- which(colSums(factor.table[numeric.var, , drop=FALSE]) > 0) ## Terms only involving factor variables: factor.terms <- which(colSums(factor.table[numeric.var, , drop=FALSE]) == 0) } ## Remove rows in Theta for numeric variables: if(length(numeric.terms)) { ### NOTE: ncol(NOM) == length(asgn) == nrow(Theta) ### length(attr(terms, "term.labels")) == ncol(factor.table) ### NOTE: length(var.classes) == nrow(factor.table) numeric.rows <- which(assign %in% numeric.terms) Theta <- Theta[-numeric.rows, , drop=FALSE] ## Drop terms so the design matrix, X for the factors does not ## include numeric variables: if(length(factor.terms)) terms <- drop.terms(terms, dropx=numeric.terms, keep.response=FALSE) } ## if some nominal effects are factors: if(length(factor.terms)) { ## get xlevels for factors, not ordered (factors) factor.var <- which(var.classes == "factor") factor.varnm <- names(var.classes)[factor.var] xlev <- xlevels[factor.varnm] ## minimal complete model frame: mf.basic <- do.call(expand.grid, xlev) ## minimal complete design matrix: X <- model.matrix(terms, data=mf.basic, contrasts=contrasts[factor.varnm]) ### NOTE: get_clmDesign adds an intercept if its not there, so we need ### to do that as well here. Otherwise 'X[, keep, drop=FALSE]' will ### fail: if(!"(Intercept)" %in% colnames(X)) X <- cbind("(Intercept)" = rep(1, nrow(X)), X) if(sign.nominal == "negative") X[, -1] <- -X[, -1] ### NOTE: There are no contrasts for numerical variables, but there ### may be for ordered factors. ## From threshold parameters to thresholds: ### NOTE: some rows of Theta may contain NAs due to rank deficiency of ### the NOM design matrix. keep <- apply(Theta, 1, function(x) sum(is.na(x)) == 0) ## Theta <- apply(Theta, 2, function(th) X %*% th) tmp <- lapply(1:NCOL(Theta), function(i) { X[, keep, drop=FALSE] %*% Theta[keep, i] }) Theta <- do.call(cbind, tmp) } ## Adjust each row in Theta for threshold functions: tmp <- lapply(seq_len(nrow(Theta)), function(i) c(tJac %*% Theta[i, ])) Theta <- do.call(rbind, tmp) ### NOTE: apply returns a vector and not a matrix when ncol(Theta) == ### 1, so we need to avoid it here. ## Theta <- t(apply(Theta, 1, function(th) tJac %*% th)) colnames(Theta) <- rownames(tJac) res <- list(Theta = as.data.frame(Theta)) ## add factor information if any: if(NROW(Theta) > 1) res$mf.basic <- mf.basic ## return: res } ordinal/R/clm.profile.R0000644000176200001440000007334613332052252014452 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## profile and confint methods for clm objects. profile.clm <- function(fitted, which.beta = seq_len(nbeta), which.zeta = seq_len(nzeta), alpha = 0.001, max.steps = 50, nsteps = 8, trace = FALSE, step.warn = 5, control = list(), ...) { ### match and tests arguments and dispatch to .zeta and .beta ### functions for the actual profiling. ### which.[beta, zeta] - numeric or character vectors. ### Works for models with nominal and scale effects and for any number ### of aliased coefs. ## match and test arguments: if(fitted$link %in% c("Aranda-Ordaz", "log-gamma")) stop("Profiling not implemented for models with flexible link function") if(any(is.na(diag(vcov(fitted))))) stop("Cannot get profile when vcov(fitted) contains NAs", call.=FALSE) stopifnot(is.numeric(alpha) && length(alpha) == 1 && alpha > 0 && alpha < 1) stopifnot(round(max.steps) > round(nsteps)) stopifnot(round(nsteps) > round(step.warn)) stopifnot(round(nsteps) > 0 && round(step.warn) >= 0) max.steps <- round(max.steps) nsteps <- round(nsteps) step.warn <- round(step.warn) trace <- as.logical(trace)[1] ### BETA: beta.names <- names(fitted$beta) ## possible beta nbeta <- length(fitted$beta) if(is.character(which.beta)) which.beta <- match(which.beta, beta.names, nomatch = 0) ## which.beta is a numeric vector if(!all(which.beta %in% seq_len(nbeta))) stop("invalid 'parm' argument") ### ZETA: zeta.names <- names(fitted$zeta) ## possible zeta nzeta <- length(fitted$zeta) if(is.character(which.zeta)) which.zeta <- match(which.zeta, zeta.names, nomatch = 0) ## which.zeta is a numeric vector if(!all(which.zeta %in% seq_len(nzeta))) stop("invalid 'parm' argument") ## the actual profiling for beta and zeta par: prof.beta <- if(nbeta) profile.clm.beta(fitted, which.beta, alpha, max.steps, nsteps, trace, step.warn, control, ...) else NULL prof.zeta <- if(nzeta) profile.clm.zeta(fitted, which.zeta, alpha, max.steps, nsteps, trace, step.warn, control, ...) else NULL ## collect and return results: val <- structure(c(prof.beta, prof.zeta), original.fit = fitted) class(val) <- c("profile.clm") return(val) } profile.clm.beta <- function(fitted, which.beta, alpha = 0.001, max.steps = 50, nsteps = 8, trace = FALSE, step.warn = 5, control = list(), ...) ### which.beta is assumed to be a numeric vector { lroot.max <- qnorm(1 - alpha/2) delta = lroot.max/nsteps nbeta <- length(fitted$beta) beta.names <- names(fitted$beta) nalpha <- length(fitted$alpha) orig.par <- c(fitted$alpha, fitted$beta) if(!is.null(zeta <- fitted$zeta)) { names(zeta) <- paste("sca", names(fitted$zeta), sep=".") orig.par <- c(orig.par, zeta) } if(!is.null(lambda <- fitted$lambda)) { orig.par <- c(orig.par, lambda) } ### NOTE: we need to update zeta.names to make names(orig.par) ### unique. This is needed to correctly construct the resulting ### par.vals matrix and to extract from it again. std.err <- coef(summary(fitted))[nalpha + 1:nbeta, "Std. Error"] if(any(is.na(std.err))) stop("Cannot profile model where standard errors are NA", call.=FALSE) ## results list: prof.list <- vector("list", length = length(which.beta)) names(prof.list) <- beta.names[which.beta] ## get model matrices and model environment: ### NOTE: Fixing the fragile update approach: ## mf <- update(fitted, method = "model.frame") ## Need to subset by wts to make nrow(X) == nrow(B1) ## X <- with(mf, X[wts > 0, , drop=FALSE]) ## containing alias cols wts <- getWeights(model.frame(fitted)) X <- model.matrix(fitted)$X[wts > 0, , drop=FALSE] if(fitted$control$sign.location == "positive") X <- -X rho <- get_clmRho(fitted) ## rho <- update(fitted, doFit = FALSE) orig <- as.list(rho)[c("B1", "B2", "o1", "o2")] rho$n.psi <- rho$n.psi - 1 ## needed for models with scale nalpha.clean <- sum(!fitted$aliased$alpha) par.clean <- orig.par[!is.na(orig.par)] ## which of which.beta are NA: alias.wb <- fitted$aliased$beta[which.beta] ## For each which.beta move up or down, fit the model and store the ## signed likelihood root statistic and parameter values: for(wb in which.beta) { if(alias.wb[wb == which.beta]) next ## ignore aliased coef rem <- nalpha.clean + (which.beta - cumsum(alias.wb))[wb == which.beta] par.wb <- matrix(coef(fitted), nrow = 1) ## MLE wb.name <- beta.names[wb] lroot.wb <- 0 ## lroot at MLE ## set variables in fitting environment: rho$B1 <- orig$B1[, -rem, drop=FALSE] rho$B2 <- orig$B2[, -rem, drop=FALSE] for(direction in c(-1, 1)) { ## move down or up if(trace) { message("\nParameter: ", wb.name, c(" down", " up")[(direction + 1)/2 + 1]) utils::flush.console() } ## reset starting values: rho$par <- par.clean[-rem] for(step in seq_len(max.steps)) { ## increment beta.i, offset and refit model without wb parameter: beta.i <- fitted$beta[wb] + direction * step * delta * std.err[wb] new.off <- X[, 1+wb, drop=TRUE] * beta.i rho$o1 <- orig$o1 - new.off rho$o2 <- orig$o2 - new.off fit <- clm.fit.NR(rho, control) ## save likelihood root statistic: lroot <- -direction * sqrt(2*(fitted$logLik - fit$logLik)) ## save lroot and pararameter values: lroot.wb <- c(lroot.wb, lroot) temp.par <- orig.par temp.par[names(fit$par)] <- fit$par temp.par[wb.name] <- beta.i par.wb <- rbind(par.wb, temp.par) ## break for loop if profile is far enough: if(abs(lroot) > lroot.max) break } ## end 'step in seq_len(max.steps)' ## test that lroot.max is reached and enough steps are taken: if(abs(lroot) < lroot.max) warning("profile may be unreliable for ", wb.name, " because lroot.max was not reached for ", wb, c(" down", " up")[(direction + 1)/2 + 1]) if(step <= step.warn) warning("profile may be unreliable for ", wb.name, " because only ", step, "\n steps were taken ", c("down", "up")[(direction + 1)/2 + 1]) } ## end 'direction in c(-1, 1)' ## order lroot and par values and collect in a data.frame: lroot.order <- order(lroot.wb, decreasing = TRUE) prof.list[[wb.name]] <- structure(data.frame(lroot.wb[lroot.order]), names = "lroot") prof.list[[wb.name]]$par.vals <- par.wb[lroot.order, ] if(!all(diff(par.wb[lroot.order, wb.name]) > 0)) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for ", wb.name) } ## end 'wb in which.beta' prof.list } profile.clm.zeta <- function(fitted, which.zeta, alpha = 0.001, max.steps = 50, nsteps = 8, trace = FALSE, step.warn = 5, control = list(), ...) ### which.zeta is assumed to be a numeric vector { lroot.max <- qnorm(1 - alpha/2) delta = lroot.max/nsteps nzeta <- length(fitted$zeta) nbeta <- length(fitted$beta) zeta <- fitted$zeta names(zeta) <- zeta.names <- paste("sca", names(fitted$zeta), sep=".") ### NOTE: we need to update zeta.names to make names(orig.par) ### unique. This is needed to correctly construct the resulting ### par.vals matrix and to extract from it again. orig.par <- c(fitted$alpha, fitted$beta, zeta) nalpha <- length(fitted$alpha) std.err <- coef(summary(fitted))[nalpha+nbeta+1:nzeta, "Std. Error"] if(any(is.na(std.err))) stop("Cannot profile model where standard errors are NA", call.=FALSE) ## results list: prof.list <- vector("list", length = length(which.zeta)) names(prof.list) <- names(zeta)[which.zeta] ## get model environment: rho <- get_clmRho(fitted) ## rho <- update(fitted, doFit = FALSE) S <- rho$S ## S without intercept Soff <- rho$Soff rho$k <- max(0, rho$k - 1) ab <- c(fitted$alpha, fitted$beta) ab.clean <- ab[!is.na(ab)] zeta.clean <- zeta[!fitted$aliased$zeta] ## which of which.zeta are NA: alias.wz <- fitted$aliased$zeta[which.zeta] ## For each which.zeta move up or down, fit the model and store the ## signed likelihood root statistic and parameter values: for(wz in which.zeta) { if(alias.wz[wz]) next ## ignore aliased coef ## rem: which columns of S to remove rem <- (which.zeta - cumsum(alias.wz))[wz] par.wz <- matrix(coef(fitted), nrow = 1) ## MLE wz.name <- zeta.names[wz] lroot.wz <- 0 ## lroot at MLE ## set variables in fitting environment: rho$S <- S[, -rem, drop=FALSE] for(direction in c(-1, 1)) { ## move down or up if(trace) { message("\nParameter: ", wz.name, c(" down", " up")[(direction + 1)/2 + 1]) utils::flush.console() } ## reset starting values: rho$par <- c(ab.clean, zeta.clean[-rem]) ## rho$par <- coef(fitted, na.rm = TRUE)[-rem] for(step in seq_len(max.steps)) { ## increment zeta.i, offset and refit model without wz parameter: zeta.i <- zeta[wz] + direction * step * delta * std.err[wz] rho$Soff <- rho$sigma <- Soff * exp(S[, wz, drop=TRUE] * zeta.i) ### NOTE: Need to update sigma in addition to Soff since otherwise ### sigma isn't updated when k=0 (single scale par) fit <- clm.fit.NR(rho, control) ## save likelihood root statistic: lroot <- -direction * sqrt(2*(fitted$logLik - fit$logLik)) ## save lroot and pararameter values: lroot.wz <- c(lroot.wz, lroot) temp.par <- orig.par temp.par[names(fit$par)] <- fit$par temp.par[wz.name] <- zeta.i par.wz <- rbind(par.wz, temp.par) ## break for loop if profile is far enough: if(abs(lroot) > lroot.max) break } ## end 'step in seq_len(max.steps)' ## test that lroot.max is reached and enough steps are taken: if(abs(lroot) < lroot.max) warning("profile may be unreliable for ", wz.name, " because qnorm(1 - alpha/2) was not reached when profiling ", c(" down", " up")[(direction + 1)/2 + 1]) if(step <= step.warn) warning("profile may be unreliable for ", wz.name, " because only ", step, "\n steps were taken ", c("down", "up")[(direction + 1)/2 + 1]) } ## end 'direction in c(-1, 1)' ## order lroot and par values and collect in a data.frame: ## lroot.order <- order(lroot.wz, decreasing = TRUE) lroot.order <- order(par.wz[, wz.name], decreasing = FALSE) ### NOTE: Need to change how values are ordered here. We should order ### with par.wz[, wz.name] instead of lroot.wz since if lroot.wz is ### flat, the order may be incorrect. prof.list[[wz.name]] <- structure(data.frame(lroot.wz[lroot.order]), names = "lroot") prof.list[[wz.name]]$par.vals <- par.wz[lroot.order, ] if(!all(diff(lroot.wz[lroot.order]) <= sqrt(.Machine$double.eps))) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for ", wz.name) } ## end 'wz in which.zeta' prof.list } ## profile.sclm <- ## using clm.fit.env() ## function(fitted, which.beta = seq_len(nbeta), alpha = 0.001, ## max.steps = 50, nsteps = 8, trace = FALSE, ## step.warn = 5, control = list(), ...) ## ### NOTE: seq_len(nbeta) works for nbeta = 0: numeric(0), while ## ### 1:nbeta gives c(1, 0). ## ## ### This is almost a copy of profile.clm2, which use clm.fit rather ## ### than clm.fit.env. The current implementation is the fastest, but ## ### possibly less readable. ## { ## ## match and test arguments: ## stopifnot(is.numeric(alpha) && length(alpha) == 1 && ## alpha > 0 && alpha < 1) ## stopifnot(round(max.steps) > round(nsteps)) ## stopifnot(round(nsteps) > round(step.warn)) ## stopifnot(round(nsteps) > 0 && round(step.warn) >= 0) ## max.steps <- round(max.steps) ## nsteps <- round(nsteps) ## step.warn <- round(step.warn) ## trace <- as.logical(trace)[1] ## ## possible parameters on which to profile (including aliased coef): ## beta.names <- names(fitted$beta) ## nbeta <- length(fitted$beta) ## if(is.character(which.beta)) ## which.beta <- match(which.beta, beta.names, nomatch = 0) ## ## which.beta is a numeric vector ## if(!all(which.beta %in% seq_len(nbeta))) ## stop("invalid 'parm' argument") ## stopifnot(length(which.beta) > 0) ## std.err <- coef(summary(fitted))[-(1:length(fitted$alpha)), ## "Std. Error"] ## ## profile limit: ## lroot.max <- qnorm(1 - alpha/2) ## ## profile step length: ## delta <- lroot.max / nsteps ## ## results list: ## prof.list <- vector("list", length = length(which.beta)) ## names(prof.list) <- beta.names[which.beta] ## ## get model.frame: ## X <- update(fitted, method = "model.frame")$X ## containing alias cols ## rho <- update(fitted, doFit = FALSE) ## orig <- as.list(rho)[c("B1", "B2", "o1", "o2")] ## rho$n.psi <- rho$n.psi - 1 ## nalpha.clean <- sum(!fitted$aliased$alpha) ## ## which of which.beta are NA: ## alias.wb <- fitted$aliased$beta[which.beta] ## ## For each which.beta move up or down, fit the model and store the ## ## signed likelihood root statistic and parameter values: ## for(wb in which.beta) { ## if(alias.wb[wb]) next ## ignore aliased coef ## rem <- nalpha.clean + (which.beta - cumsum(alias.wb))[wb] ## par.wb <- matrix(coef(fitted), nrow = 1) ## MLE ## wb.name <- beta.names[wb] ## lroot.wb <- 0 ## lroot at MLE ## ## set variables in fitting environment: ## rho$B1 <- orig$B1[, -rem, drop=FALSE] ## rho$B2 <- orig$B2[, -rem, drop=FALSE] ## for(direction in c(-1, 1)) { ## move down or up ## if(trace) { ## message("\nParameter: ", wb.name, ## c(" down", " up")[(direction + 1)/2 + 1]) ## utils::flush.console() ## } ## ## reset starting values: ## rho$par <- coef(fitted, na.rm = TRUE)[-rem] ## ## rho$par <- orig.par[-wb.name] ## for(step in seq_len(max.steps)) { ## ## increment beta.i, offset and refit model without wb parameter: ## beta.i <- fitted$beta[wb] + ## direction * step * delta * std.err[wb] ## new.off <- X[, 1+wb, drop=TRUE] * beta.i ## rho$o1 <- orig$o1 - new.off ## rho$o2 <- orig$o2 - new.off ## fit <- clm.fit.env(rho, control) ## ## save likelihood root statistic: ## lroot <- -direction * sqrt(2*(fitted$logLik - fit$logLik)) ## ## save lroot and pararameter values: ## lroot.wb <- c(lroot.wb, lroot) ## temp.par <- coef(fitted) ## temp.par[names(fit$par)] <- fit$par ## temp.par[wb.name] <- beta.i ## par.wb <- rbind(par.wb, temp.par) ## ## break for loop if profile is far enough: ## if(abs(lroot) > lroot.max) break ## } ## end 'step in seq_len(max.steps)' ## ## test that lroot.max is reached and enough steps are taken: ## if(abs(lroot) < lroot.max) ## warning("profile may be unreliable for ", wb.name, ## " because lroot.max was not reached for ", ## wb, c(" down", " up")[(direction + 1)/2 + 1]) ## if(step <= step.warn) ## warning("profile may be unreliable for ", wb.name, ## " because only ", step, "\n steps were taken ", ## c("down", "up")[(direction + 1)/2 + 1]) ## } ## end 'direction in c(-1, 1)' ## ## order lroot and par. values and collect in a data.frame: ## lroot.order <- order(lroot.wb, decreasing = TRUE) ## prof.list[[wb.name]] <- ## structure(data.frame(lroot.wb[lroot.order]), names = "lroot") ## prof.list[[wb.name]]$par.vals <- par.wb[lroot.order, ] ## ## if(!all(diff(par.wb[lroot.order, wb.name]) > 0)) ## warning("likelihood is not monotonically decreasing from maximum,\n", ## " so profile may be unreliable for ", wb.name) ## } ## end 'wb in which.beta' ## val <- structure(prof.list, original.fit = fitted) ## class(val) <- c("profile.clm") ## return(val) ## } format.perc <- function(probs, digits) ### function lifted from stats:::format.perc to avoid using ':::' paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits), "%") confint.clm <- function(object, parm, level = 0.95, type = c("profile", "Wald"), trace = FALSE, ...) ### parm argument is ignored - use confint.profile for finer control. { ## match and test arguments type <- match.arg(type) if(object$link %in% c("Aranda-Ordaz", "log-gamma") && type == "profile") { message(paste("Profile intervals not available for models with flexible", "link function:\n reporting Wald intervals instead")) type <- "Wald" } stopifnot(is.numeric(level) && length(level) == 1 && level > 0 && level < 1) trace <- as.logical(trace)[1] if(!(missing(parm) || is.null(parm))) message("argument 'parm' ignored") ## Wald CI: if(type == "Wald") { a <- (1 - level)/2 a <- c(a, 1 - a) pct <- format.perc(a, 3) fac <- qnorm(a) coefs <- coef(object) ses <- coef(summary(object))[, 2] ci <- array(NA, dim = c(length(coefs), 2L), dimnames = list(names(coefs), pct)) ci[] <- coefs + ses %o% fac return(ci) } ## profile likelhood CI: if(trace) { message("Wait for profiling to be done...") utils::flush.console() } ## get profile: object <- profile(object, alpha = (1 - level)/4, trace = trace, ...) ## get and return CIs: confint(object, level = level, ...) } ## confint.clm <- ## function(object, parm = seq_len(npar), level = 0.95, ## type = c("profile", "Wald"), trace = FALSE, ...) ## ### parm: a 2-list with beta and zeta? ## ### or args which.beta, which.zeta while parm is redundant? ## ## ### make waldci.clm(object, which.alpha, which.beta, which.zeta, level ## ### = 0.95) ?? ## { ## ## match and test arguments ## type <- match.arg(type) ## stopifnot(is.numeric(level) && length(level) == 1 && ## level > 0 && level < 1) ## trace <- as.logical(trace)[1] ## mle <- object$beta ## if(!is.null(zeta <- object$zeta)) { ## names(zeta) <- paste("sca", names(zeta), sep=".") ## mle <- c(mle, zeta) ## } ## npar <- length(mle) ## beta.names <- names(mle) ## if(is.character(parm)) stop("parm should be numeric") ## ## parm <- match(parm, names(c(object$beta, object$zeta))), nomatch = 0) ## if(!all(parm %in% seq_len(npar))) stop("invalid 'parm' argument") ## stopifnot(length(parm) > 0) ## ## Wald CI: ## if(type == "Wald") ## return(waldci.clm(object, parm, level)) ## ## return(confint.default(object = object, parm = beta.names[parm], ## ## level = level)) ## ## profile likelhood CI: ## if(trace) { ## message("Waiting for profiling to be done...") ## utils::flush.console() ## } ## ## get profile: ## ### Edit these calls: ## object <- profile(object, which.beta = beta.names[parm], ## alpha = (1 - level)/4, trace = trace, ...) ## ## get and return CIs: ## confint(object, parm = beta.names[parm], level = level, ...) ## } confint.profile.clm <- function(object, parm = seq_len(nprofiles), level = 0.95, ...) ### parm index elements of object (the list of profiles) ### each par.vals matrix of each profile will have ### sum(!unlist(of$aliased)) columns. { ## match and test arguments: stopifnot(is.numeric(level) && length(level) == 1 && level > 0 && level < 1) of <- attr(object, "original.fit") prof.names <- names(object) nprofiles <- length(prof.names) if(is.character(parm)) ### Allow character here? parm <- match(parm, prof.names, nomatch = 0) if(!all(parm %in% seq_len(nprofiles))) stop("invalid 'parm' argument") stopifnot(length(parm) > 0) ## prepare CI: a <- (1-level)/2 a <- c(a, 1-a) pct <- paste(round(100*a, 1), "%") ci <- array(NA, dim = c(length(parm), 2), dimnames = list(prof.names[parm], pct)) cutoff <- qnorm(a) ## compute CI from spline interpolation of the likelihood profile: for(pr.name in prof.names[parm]) { if(is.null(pro <- object[[ pr.name ]])) next sp <- spline(x = pro[, "par.vals"][, pr.name], y = pro[, 1]) ## OBS ci[pr.name, ] <- approx(sp$y, sp$x, xout = rev(cutoff))$y } ## do not drop(ci) because rownames are lost for single coef cases: return(ci) } plot.profile.clm <- function(x, which.par = seq_len(nprofiles), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, root = FALSE, fig = TRUE, approx = root, n = 1e3, ask = prod(par("mfcol")) < length(which.par) && dev.interactive(), ..., ylim = NULL) { ## match and test arguments: stopifnot(is.numeric(level) && all(level > 0) && all(level < 1)) stopifnot(n == round(n) && n > 0) Log <- as.logical(Log)[1] relative <- as.logical(relative)[1] root <- as.logical(root)[1] fig <- as.logical(fig)[1] approx <- as.logical(approx)[1] of <- attr(x, "original.fit") mle <- of$beta if(!is.null(zeta <- of$zeta)) { names(zeta) <- paste("sca", names(zeta), sep=".") mle <- c(mle, zeta) } prof.names <- names(x) nprofiles <- length(prof.names) if(is.character(which.par)) which.par <- match(which.par, prof.names, nomatch = 0) if(!all(which.par %in% seq_len(nprofiles))) stop("invalid 'which.par' argument") stopifnot(length(which.par) > 0) ML <- of$logLik ## prepare return value: which.names <- prof.names[which.par] spline.list <- vector("list", length(which.par)) names(spline.list) <- which.names if(approx) { std.err <- coef(summary(of))[-(1:length(of$alpha)), 2] names(std.err) <- names(mle) } ## aks before "over writing" the plot? if(ask) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) } ## for each pm make the appropriate plot: for(pr.name in prof.names[which.par]) { ## confidence limits: lim <- sapply(level, function(x) exp(-qchisq(x, df=1)/2) ) if(is.null(pro <- x[[ pr.name ]])) next sp <- spline(x=pro[, "par.vals"][, pr.name], y=pro[, 1], n=n) if(approx) y.approx <- (mle[pr.name] - sp$x) / std.err[pr.name] if(root) { ylab <- "profile trace" lim <- c(-1, 1) %o% sqrt(-2 * log(lim)) sp$y <- -sp$y if(approx) y.approx <- -y.approx } else { ## !root: sp$y <- -sp$y^2/2 if(approx) y.approx <- -y.approx^2/2 if(relative && !Log) { sp$y <- exp(sp$y) if(approx) y.approx <- exp(y.approx) ylab <- "Relative profile likelihood" if(missing(ylim)) ylim <- c(0, 1) } if(relative && Log) { ylab <- "Relative profile log-likelihood" lim <- log(lim) } if(!relative && Log) { sp$y <- sp$y + ML if(approx) y.approx <- y.approx + ML ylab <- "Profile log-likelihood" lim <- ML + log(lim) } if(!relative && !Log) { sp$y <- exp(sp$y + ML) if(approx) y.approx <- exp(y.approx + ML) ylab <- "Profile likelihood" lim <- exp(ML + log(lim)) } } spline.list[[ pr.name ]] <- sp if(fig) { ## do the plotting: plot(sp$x, sp$y, type = "l", ylim = ylim, xlab = pr.name, ylab = ylab, ...) abline(h = lim) if(approx) lines(sp$x, y.approx, lty = 2) if(root) points(mle[pr.name], 0, pch = 3) } } attr(spline.list, "limits") <- lim invisible(spline.list) } profileAlt.clm <- ## using clm.fit() function(fitted, which.beta = seq_len(nbeta), alpha = 0.01, max.steps = 50, nsteps = 8, trace = FALSE, step.warn = 5, control = list(), ...) ### NOTE: seq_len(nbeta) works for nbeta = 0: numeric(0), while ### 1:nbeta gives c(1, 0). ### args: ### alpha - The likelihood is profiled in the 100*(1-alpha)% ### confidence region as determined by the profile likelihood ### max.steps - the maximum number of profile steps in each direction ### nsteps - the approximate no. steps determined by the quadratic ### approximation to the log-likelihood function ### trace - if trace > 0 information of progress is printed ### step.warn - a warning is issued if the profile in each direction ### contains less than step.warn steps (due to lack of precision). { ## match and test arguments: stopifnot(is.numeric(alpha) && length(alpha) == 1 && alpha > 0 && alpha < 1) stopifnot(round(max.steps) > round(nsteps)) stopifnot(round(nsteps) > round(step.warn)) stopifnot(round(nsteps) > 0 && round(step.warn) >= 0) max.steps <- round(max.steps) nsteps <- round(nsteps) step.warn <- round(step.warn) trace <- as.logical(trace)[1] beta.names <- names(fitted$beta) nbeta <- length(fitted$beta) if(is.character(which.beta)) which.beta <- match(which.beta, beta.names, nomatch = 0) if(!all(which.beta %in% seq_len(nbeta))) stop("invalid 'parm' argument") stopifnot(length(which.beta) > 0) ## Extract various things from the original fit: orig.par <- coef(fitted) ## c(alpha, beta) beta0 <- fitted$beta ## regression coef. nalpha <- length(fitted$alpha) ## no. threshold coef. nbeta <- length(beta0) beta.names <- names(beta0) orig.logLik <- fitted$logLik std.err <- coef(summary(fitted))[-(1:nalpha), "Std. Error"] link <- fitted$link threshold <- fitted$threshold ## profile limit: lroot.max <- qnorm(1 - alpha/2) ## profile step length: delta <- lroot.max / nsteps ## results list: prof.list <- vector("list", length = length(which.beta)) names(prof.list) <- beta.names[which.beta] ## get model.frame: ### NOTE: Attempting the following fix for a safer extraction of ### model-design-objects: ## mf <- update(fitted, method = "model.frame") contr <- c(fitted$contrasts, fitted$S.contrasts, fitted$nom.contrasts) mf <- get_clmDesign(fitted$model, fitted$terms.list, contr) y <- mf$y X <- mf$X wts <- mf$wts orig.off <- mf$off ## For each which.beta move up or down, fit the model and store the ## signed likelihood root statistic and parameter values: for(wb in which.beta) { par.wb <- matrix(orig.par, nrow = 1) ## MLE wb.name <- beta.names[wb] lroot.wb <- 0 ## lroot at MLE X.wb <- X[, -(1+wb), drop=FALSE] for(direction in c(-1, 1)) { ## move down or up if(trace) { message("\nParameter: ", wb.name, c(" down", " up")[(direction + 1)/2 + 1]) utils::flush.console() } ## (re)set starting values: start <- orig.par[-(nalpha + wb)] for(step in seq_len(max.steps)) { ## increment offset and refit model without wb parameter: beta.i <- beta0[wb] + direction * step * delta * std.err[wb] new.off <- orig.off + X[, 1+wb, drop=TRUE] * beta.i fit <- clm.fit(y=y, X=X.wb, weights=wts, offset=new.off, control=control, start=start, link=link, threshold=threshold) ## save likelihood root statistic: lroot <- -direction * sqrt(2*(fitted$logLik - fit$logLik)) ## save lroot and pararameter values: lroot.wb <- c(lroot.wb, lroot) temp.par <- orig.par temp.par[names(fit$par)] <- fit$par temp.par[wb.name] <- beta.i par.wb <- rbind(par.wb, temp.par) ## update starting values: start <- fit$par ## break for loop if profile is far enough: if(abs(lroot) > lroot.max) break } ## end 'step in seq_len(max.steps)' ## test that lroot.max is reached and enough steps are taken: if(abs(lroot) < lroot.max) warning("profile may be unreliable for ", wb.name, " because lroot.max was not reached for ", wb, c(" down", " up")[(direction + 1)/2 + 1]) if(step <= step.warn) warning("profile may be unreliable for ", wb.name, " because only ", step, "\n steps were taken ", c("down", "up")[(direction + 1)/2 + 1]) } ## end 'direction in c(-1, 1)' ## order lroot and par. values and collect in a data.frame: lroot.order <- order(lroot.wb, decreasing = TRUE) prof.list[[wb.name]] <- structure(data.frame(lroot.wb[lroot.order]), names = "lroot") prof.list[[wb.name]]$par.vals <- par.wb[lroot.order, ] if(!all(diff(par.wb[lroot.order, wb.name]) > 0)) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for ", wb.name) } ## end 'wb in which.beta' val <- structure(prof.list, original.fit = fitted) class(val) <- c("profile.clm") return(val) } ordinal/R/clmm.methods.R0000644000176200001440000002270613573721223014634 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Implementation of various methods for clmm objects. formatVC <- function(varc, digits = max(3, getOption("digits") - 2)) ### "format()" the 'VarCorr' matrix of the random effects -- for ### show()ing ### Borrowed from lme4/R/lmer.R with minor modifications. { recorr <- lapply(varc, attr, "correlation") reStdDev <- lapply(varc, attr, "stddev") reLens <- unlist(lapply(reStdDev, length)) nr <- sum(reLens) reMat <- array('', c(nr, 4), list(rep.int('', nr), c("Groups", "Name", "Variance", "Std.Dev."))) reMat[1+cumsum(reLens)-reLens, 1] <- names(reLens) reMat[,2] <- unlist(lapply(varc, colnames)) reMat[,3] <- format(unlist(reStdDev)^2, digits = digits) reMat[,4] <- format(unlist(reStdDev), digits = digits) if(any(reLens > 1)) { maxlen <- max(reLens) corr <- do.call("rbind", lapply(recorr, function(x, maxlen) { if(is.null(x)) return("") x <- as(x, "matrix") cc <- format(round(x, 3), nsmall = 3) cc[!lower.tri(cc)] <- "" nr <- dim(cc)[1] if (nr >= maxlen) return(cc) cbind(cc, matrix("", nr, maxlen-nr)) }, maxlen)) colnames(corr) <- c("Corr", rep.int("", maxlen - 1)) cbind(reMat, corr) } else reMat } varcov <- function(object, format=FALSE, digits=max(3, getOption("digits") - 2), ...) ### VarCorr method for model environments - should be the same for ### fitted model objects. { ## Compute variance-covariance matrices of the random effects. res <- lapply(object$ST, function(st) { ## Variance-covariance matrix for the random effects: VC <- tcrossprod(st) ## Standard deviations: stddev <- sqrt(diag(VC)) corr <- t(VC / stddev)/stddev attr(VC, "stddev") <- stddev ## correlation: if(NCOL(st) > 1) { diag(corr) <- 1 attr(VC, "correlation") <- corr } VC }) names(res) <- names(object$dims$nlev.re) if(format) noquote(formatVC(res, digits=digits)) else res } VarCorr <- function(x, ...) UseMethod("VarCorr") VarCorr.clmm <- function(x, ...) varcov(x, ...) print.clmm <- function(x, digits = max(3, getOption("digits") - 3), ...) { if(x$nAGQ >= 2) cat(paste("Cumulative Link Mixed Model fitted with the adaptive", "Gauss-Hermite \nquadrature approximation with", x$nAGQ ,"quadrature points"), "\n\n") else if(x$nAGQ <= -1) cat(paste("Cumulative Link Mixed Model fitted with the", "Gauss-Hermite \nquadrature approximation with", abs(x$nAGQ) ,"quadrature points"), "\n\n") else cat("Cumulative Link Mixed Model fitted with the Laplace approximation\n", fill=TRUE) cat("formula:", deparse(x$formula), fill=TRUE) if(!is.null(data.name <- x$call$data)) cat("data: ", deparse(data.name), fill=TRUE) if(!is.null(x$call$subset)) cat("subset: ", deparse(x$call$subset), fill=TRUE) cat("\n") print(x$info, row.names=FALSE, right=FALSE) cat("\nRandom effects:\n") print(formatVC(varcov(x), digits=digits), quote=FALSE, ...) nlev.char <- paste(names(x$dims$nlev.gf), " ", x$dims$nlev.gf, sep="", collapse=", ") cat("Number of groups: ", nlev.char, "\n") if(length(x$beta)) { cat("\nCoefficients:\n") print(x$beta, digits=digits, ...) } else { cat("\nNo Coefficients\n") } if(length(x$alpha) > 0) { cat("\nThresholds:\n") print(x$alpha, digits=digits, ...) } if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") return(invisible(x)) } vcov.clmm <- function(object, ...) vcov.clm(object, method="Cholesky") summary.clmm <- function(object, correlation = FALSE, ...) { if(is.null(object$Hessian)) stop("Model needs to be fitted with Hess = TRUE") nfepar <- object$dims$nfepar coef <- matrix(0, nfepar, 4, dimnames = list(names(object$coefficients[1:nfepar]), c("Estimate", "Std. Error", "z value", "Pr(>|z|)"))) coef[, 1] <- object$coefficients[1:nfepar] vc <- try(vcov(object), silent = TRUE) if(inherits(vc, "try-error")) { warning("Variance-covariance matrix of the parameters is not defined") coef[, 2:4] <- NaN if(correlation) warning("Correlation matrix is unavailable") object$condHess <- NaN } else { coef[, 2] <- sd <- sqrt(diag(vc)[1:nfepar]) ## Cond is Inf if Hessian contains NaNs: object$condHess <- if(any(is.na(object$Hessian))) Inf else with(eigen(object$Hessian, only.values = TRUE), abs(max(values) / min(values))) coef[, 3] <- coef[, 1]/coef[, 2] coef[, 4] <- 2 * pnorm(abs(coef[, 3]), lower.tail=FALSE) if(correlation) ## { ## sd <- sqrt(diag(vc)) object$correlation <- cov2cor(vc) ## (vc / sd) / rep(sd, rep(object$edf, object$edf)) } object$info$cond.H <- formatC(object$condHess, digits=1, format="e") object$coefficients <- coef class(object) <- "summary.clmm" return(object) } print.summary.clmm <- function(x, digits = max(3, getOption("digits") - 3), signif.stars = getOption("show.signif.stars"), ...) { if(x$nAGQ >= 2) cat(paste("Cumulative Link Mixed Model fitted with the adaptive", "Gauss-Hermite \nquadrature approximation with", x$nAGQ ,"quadrature points"), "\n\n") else if(x$nAGQ <= -1) cat(paste("Cumulative Link Mixed Model fitted with the", "Gauss-Hermite \nquadrature approximation with", abs(x$nAGQ) ,"quadrature points"), "\n\n") else cat("Cumulative Link Mixed Model fitted with the Laplace approximation\n", fill=TRUE) cat("formula:", deparse(x$formula), fill=TRUE) if(!is.null(data.name <- x$call$data)) cat("data: ", deparse(data.name), fill=TRUE) if(!is.null(x$call$subset)) cat("subset: ", deparse(x$call$subset), fill=TRUE) cat("\n") print(x$info, row.names=FALSE, right=FALSE) cat("\nRandom effects:\n") print(formatVC(varcov(x), digits=digits), quote=FALSE, ...) nlev.char <- paste(names(x$dims$nlev.gf), " ", x$dims$nlev.gf, sep="", collapse=", ") cat("Number of groups: ", nlev.char, "\n") nbeta <- length(x$beta) nalpha <- length(x$alpha) if(nbeta > 0) { cat("\nCoefficients:\n") printCoefmat(x$coefficients[nalpha + 1:nbeta, , drop=FALSE], digits=digits, signif.stars=signif.stars, has.Pvalue=TRUE, ...) } else { cat("\nNo Coefficients\n") } if(nalpha > 0) { ## always true cat("\nThreshold coefficients:\n") printCoefmat(x$coefficients[seq_len(nalpha), -4, drop=FALSE], digits=digits, has.Pvalue=FALSE, signif.stars=FALSE, ...) } if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") if(!is.null(correl <- x$correlation)) { cat("\nCorrelation of Coefficients:\n") ll <- lower.tri(correl) correl[ll] <- format(round(correl[ll], digits)) correl[!ll] <- "" print(correl[-1, -ncol(correl)], quote = FALSE, ...) } return(invisible(x)) } ## anova.clmm <- function(object, ...) ## anova.clm(object, ...) anova.clmm <- function(object, ...) { ### This essentially calls anova.clm(object, ...), but the names of ### the models were not displayed correctly in the printed output ### unless the following dodge is enforced. mc <- match.call() arg.list <- as.list(mc) arg.list[[1]] <- NULL return(do.call(anova.clm, arg.list)) } logLik.clmm <- function(object, ...) structure(object$logLik, df = object$edf, class = "logLik") extractAIC.clmm <- function(fit, scale = 0, k = 2, ...) { edf <- fit$edf c(edf, -2*fit$logLik + k * edf) } nobs.clmm <- function(object, ...) object$dims$nobs edf.clmm <- function(object, ...) object$dims$edf ## anova.clmm <- function(object, ...) ## anova.clm(object, ...) anova.clmm <- function(object, ...) { ### This essentially calls anova.clm(object, ...), but the names of ### the models were not displayed correctly in the printed output ### unless the following dodge is enforced. mc <- match.call() arg.list <- as.list(mc) arg.list[[1]] <- NULL return(do.call(anova.clm, arg.list)) } logLik.clmm <- function(object, ...) structure(object$logLik, df = object$edf, class = "logLik") extractAIC.clmm <- function(fit, scale = 0, k = 2, ...) { edf <- fit$edf c(edf, -2*fit$logLik + k * edf) } ordinal/R/AO.R0000644000176200001440000000472713332052252012534 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## [pdg]AO functions for the Aranda-Ordaz distribution. Here gAO is ## the gradient of the density function, dAO. The AO distribution is ## used as a flexible link function in clm2() and clmm2(). pAOR <- function(q, lambda, lower.tail = TRUE) { if(lambda < 1e-6) stop("'lambda' has to be positive. lambda = ", lambda, " was supplied") p <- 1 - (lambda * exp(q) + 1)^(-1/lambda) if(!lower.tail) 1 - p else p } pAO <- function(q, lambda, lower.tail = TRUE) .C("pAO_C", q = as.double(q), length(q), as.double(lambda[1]), as.integer(lower.tail), NAOK = TRUE)$q dAOR <- function(eta, lambda, log = FALSE) { ### exp(eta) * (lambda * exp(eta) + 1)^(-1-1/lambda) stopifnot(length(lambda) == 1 && length(log) == 1) if(lambda < 1e-6) stop("'lambda' has to be positive. lambda = ", lambda, " was supplied") log.d <- eta - (1 + 1/lambda) * log(lambda * exp(eta) + 1) if(!log) exp(log.d) else log.d } dAO <- function(eta, lambda, log = FALSE) { stopifnot(length(lambda) == 1 && length(log) == 1) .C("dAO_C", eta = as.double(eta), length(eta), as.double(lambda), as.integer(log), NAOK = TRUE)$eta } gAOR <- function(eta, lambda) { stopifnot(length(lambda) == 1) lex <- lambda * exp(eta) dAO(eta, lambda) * (1 - (1 + 1/lambda) * lex/(1 + lex)) } gAO <- function(eta, lambda) { stopifnot(length(lambda) == 1) .C("gAO_C", eta = as.double(eta), length(eta), as.double(lambda[1]), NAOK = TRUE)$eta } ordinal/R/clm2.R0000644000176200001440000015521113573721112013072 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## An alternate (and older) implementation of CLMs in clm2(). The new ## and recommended implementation is available in clm(), cf. ./R/clm.R clm2.control <- function(method = c("ucminf", "Newton", "nlminb", "optim", "model.frame"), ..., convTol = 1e-4, trace = 0, maxIter = 100, gradTol = 1e-5, maxLineIter = 10) { method <- match.arg(method) ctrl <- if(method == "Newton") list(convTol=convTol, trace=trace, maxIter=maxIter, gradTol=gradTol, maxLineIter=maxLineIter) else list(trace = abs(trace), ...) if(!all(is.numeric(c(maxIter, gradTol, maxLineIter, convTol)))) stop("maxIter, gradTol, maxLineIter, convTol should all be numeric") if(convTol <= 0) stop("convTol should be > 0") if(method == "ucminf" && !"grtol" %in% names(ctrl)) ctrl$grtol <- gradTol ## if(method == "ucminf" && convTol > ctrl$grtol) ## stop("convTol should be <= grtol/gradTol") ## if(method == "Newton" && convTol > gradTol) ## stop("convTol should be <= gradTol") list(method = method, convTol = convTol, ctrl = ctrl) } newRho <- function(parent, XX, X, Z, y, weights, Loffset, Soffset, ## OK link, lambda, theta, threshold, Hess, control) ### OPTION: Could we remove the theta argument? { rho <- new.env(parent = parent) rho$X <- X rho$dnX <- dimnames(X) dimnames(rho$X) <- NULL rho$Z <- Z rho$dnZ <- dimnames(Z) dimnames(rho$Z) <- NULL rho$weights <- weights rho$Loffset <- Loffset rho$expSoffset <- rho$sigma <- exp(Soffset) rho$Hess <- ifelse(Hess, 1L, 0L) rho$method <- control$method rho$convTol <- control$convTol rho$ctrl <- control$ctrl rho$pfun <- switch(link, logistic = plogis, probit = pnorm, cloglog = function(x) pgumbel(x, max=FALSE), cauchit = pcauchy, loglog = pgumbel, "Aranda-Ordaz" = function(x, lambda) pAO(x, lambda), "log-gamma" = function(x, lambda) plgamma(x, lambda)) rho$dfun <- switch(link, logistic = dlogis, probit = dnorm, cloglog = function(x) dgumbel(x, max=FALSE), cauchit = dcauchy, loglog = dgumbel, "Aranda-Ordaz" = function(x, lambda) dAO(x, lambda), "log-gamma" = function(x, lambda) dlgamma(x, lambda)) rho$gfun <- switch(link, logistic = glogis, probit = function(x) -x * dnorm(x), cloglog = function(x) ggumbel(x, max=FALSE), cloglog = ggumbel, cauchit = gcauchy, "Aranda-Ordaz" = function(x, lambda) gAO(x, lambda), ## shouldn't happen "log-gamma" = function(x, lambda) glgamma(x, lambda) ) rho$link <- link rho$linkInt <- switch(link, logistic = 1L, probit = 2L, cloglog = 3L, loglog = 4L, cauchit = 5L, "Aranda-Ordaz" = 6L, "log-gamma" = 7L) rho$estimLambda <- ifelse(link %in% c("Aranda-Ordaz", "log-gamma") && is.null(lambda), 1L, 0L) rho$nlambda <- 0L rho$lambda <- if(!is.null(lambda)) lambda else 1 if(link %in% c("Aranda-Ordaz", "log-gamma")) rho$nlambda <- 1L if(rho$estimLambda > 0 & rho$link == "Aranda-Ordaz" & rho$method != "nlminb"){ message("Changing to nlminb optimizer to accommodate optimization with bounds") m <- match( names(rho$ctrl), "grtol", 0) rho$ctrl <- rho$ctrl[!m] rho$method <- "nlminb" } if(rho$method == "nlminb") { rho$limitUp <- Inf rho$limitLow <- -Inf } rho$n <- n <- length(y) rho$p <- ifelse(missing(X), 0, ncol(X)) rho$k <- ifelse(missing(Z), 0, ncol(Z)) rho$y <- y rho$threshold <- threshold rho$ncolXX <- ncol(XX) rho$dnXX <- dimnames(XX) rho$lev <- levels(y) rho$ntheta <- nlevels(y) - 1 rho$B2 <- 1 * (col(matrix(0, n, rho$ntheta + 1)) == c(unclass(y))) ### Setting elements of o[12] to [+-]Inf cause problems in ### getGnll and clmm-related functions because 1) 0*Inf = NaN, while ### 0*large.value = 0, so several computations have to be handled ### specially and 2) Inf-values are not by default allowed in .C calls ### and all specials would have to be handled separately. ## o1 <- B2[, rho$ntheta + 1, drop = TRUE] ## o1[o1 == 1] <- Inf ## rho$o1 <- o1 - rho$Loffset ## o2 <- B2[,1, drop = TRUE] ## o2[o2 == 1] <- -Inf ## rho$o2 <- o2 - rho$Loffset inf.value <- 1e5 rho$o1 <- c(inf.value * rho$B2[, rho$ntheta + 1]) - rho$Loffset rho$o2 <- c(-inf.value * rho$B2[,1]) - rho$Loffset rho$B1 <- rho$B2[,-(rho$ntheta + 1), drop = FALSE] rho$B2 <- rho$B2[,-1, drop = FALSE] makeThresholds2(rho, threshold) rho$B1 <- rho$B1 %*% rho$tJac rho$B2 <- rho$B2 %*% rho$tJac rho$xiNames <- rho$alphaNames rho$nxi <- rho$nalpha * rho$ncolXX if(rho$ncolXX > 1) { ## test actually not needed rho$xiNames <- paste(rep(rho$alphaNames, rho$ncolXX), ".", rep(colnames(XX), each=rho$nalpha), sep="") LL1 <- lapply(1:rho$ncolXX, function(x) rho$B1 * XX[,x]) rho$B1 <- do.call(cbind, LL1) LL2 <- lapply(1:rho$ncolXX, function(x) rho$B2 * XX[,x]) rho$B2 <- do.call(cbind, LL2) } if(rho$p > 0) { rho$B1 <- cbind(rho$B1, -X) rho$B2 <- cbind(rho$B2, -X) } dimnames(rho$B1) <- NULL dimnames(rho$B2) <- NULL return(rho) } # populates the rho environment setStart <- function(rho) ## Ok { ## set starting values in the rho environment ## try logistic/probit regression on 'middle' cut q1 <- max(1, rho$ntheta %/% 2) y1 <- (c(unclass(rho$y)) > q1) x <- cbind(Intercept = rep(1, rho$n), rho$X) fit <- switch(rho$link, "logistic"= glm.fit(x, y1, rho$weights, family = binomial(), offset = rho$Loffset), "probit" = glm.fit(x, y1, rho$weights, family = binomial("probit"), offset = rho$Loffset), ## this is deliberate, a better starting point "cloglog" = glm.fit(x, y1, rho$weights, family = binomial("probit"), offset = rho$Loffset), "loglog" = glm.fit(x, y1, rho$weights, family = binomial("probit"), offset = rho$Loffset), "cauchit" = glm.fit(x, y1, rho$weights, family = binomial("cauchit"), offset = rho$Loffset), "Aranda-Ordaz" = glm.fit(x, y1, rho$weights, family = binomial("probit"), offset = rho$Loffset), "log-gamma" = glm.fit(x, y1, rho$weights, family = binomial("probit"), offset = rho$Loffset)) if(!fit$converged) stop("attempt to find suitable starting values failed") coefs <- fit$coefficients if(any(is.na(coefs))) { warning("design appears to be rank-deficient, so dropping some coefs") keep <- !is.na(coefs) coefs <- coefs[keep] rho$X <- rho$X[, keep[-1], drop = FALSE] rho$dnX[[2]] <- rho$dnX[[2]][keep[-1]] rho$B1 <- rho$B1[, c(rep(TRUE, rho$nxi), keep[-1]), drop = FALSE] rho$B2 <- rho$B2[, c(rep(TRUE, rho$nxi), keep[-1]), drop = FALSE] rho$p <- ncol(rho$X) } ## Intercepts: spacing <- qlogis((1:rho$ntheta)/(rho$ntheta+1)) # just a guess if(rho$link != "logit") spacing <- spacing/1.7 ## if(rho$threshold == "flexible") # default alphas <- -coefs[1] + spacing - spacing[q1] if(rho$threshold == "symmetric" && rho$ntheta %% 2) ## ntheta odd alphas <- c(alphas[q1+1],cumsum(rep(spacing[q1+2], rho$nalpha-1))) if(rho$threshold == "symmetric" && !rho$ntheta %% 2) ## ntheta even alphas <- c(alphas[q1:(q1+1)], cumsum(rep(spacing[q1+1], rho$nalpha-2))) if(rho$threshold == "symmetric2" && rho$ntheta %% 2) ## ntheta odd alphas <- cumsum(rep(spacing[q1+2], rho$nalpha-1)) if(rho$threshold == "symmetric2" && !rho$ntheta %% 2) ## ntheta even alphas <- cumsum(rep(spacing[q1+1], rho$nalpha-2)) if(rho$threshold == "equidistant") alphas <- c(alphas[1], mean(diff(spacing))) ## initialize nominal effects to zero: if(rho$ncolXX > 1) { xi <- c(alphas, rep(rep(0, rho$nalpha), rho$ncolXX-1)) stopifnot(length(xi) == rho$nalpha * rho$ncolXX)} else xi <- alphas if(rho$estimLambda > 0){ rho$lambda <- 1 names(rho$lambda) <- "lambda" } start <- c(xi, coefs[-1], rep(0, rho$k), rep(1, rho$estimLambda)) names(start) <- NULL rho$start <- rho$par <- start } getPar <- function(rho) rho$par ## OK getNll <- function(rho, par) { ## ok if(!missing(par)) rho$par <- par with(rho, { if(estimLambda > 0) lambda <- par[nxi + p + k + 1:estimLambda] sigma <- if(k > 0) expSoffset * exp(drop(Z %*% par[nxi+p + 1:k])) else expSoffset eta1 <- (drop(B1 %*% par[1:(nxi + p)]) + o1)/sigma eta2 <- (drop(B2 %*% par[1:(nxi + p)]) + o2)/sigma pr <- if(nlambda) pfun(eta1, lambda) - pfun(eta2, lambda) else pfun(eta1) - pfun(eta2) if(all(is.finite(pr)) && all(pr > 0)) -sum(weights * log(pr)) else Inf }) } getGnll <- function(rho, par) { ## ok if(!missing(par)) rho$par <- par with(rho, { if(estimLambda > 0) lambda <- par[nxi + p + k + 1:estimLambda] sigma <- if(k > 0) expSoffset * exp(drop(Z %*% par[nxi+p + 1:k])) else expSoffset eta1 <- (drop(B1 %*% par[1:(nxi + p)]) + o1)/sigma eta2 <- (drop(B2 %*% par[1:(nxi + p)]) + o2)/sigma if(nlambda) { pr <- pfun(eta1, lambda) - pfun(eta2, lambda) p1 <- dfun(eta1, lambda) p2 <- dfun(eta2, lambda) } else { pr <- pfun(eta1) - pfun(eta2) p1 <- dfun(eta1) p2 <- dfun(eta2) } prSig <- pr * sigma ## eta1 * p1 is complicated because in theory eta1 contains ## Inf(-Inf) where p1 contains 0 and 0 * Inf = NaN... ## eta.p1 <- ifelse(p1 == 0, 0, eta1 * p1) ## eta.p2 <- ifelse(p2 == 0, 0, eta2 * p2) gradSigma <- ## if(k > 0) crossprod(Z, weights * (eta.p1 - eta.p2)/pr) if(k > 0) crossprod(Z, weights * (eta1 * p1 - eta2 * p2)/pr) else numeric(0) gradThetaBeta <- if(nxi > 0) -crossprod((B1*p1 - B2*p2), weights/prSig) else -crossprod((X * (p2 - p1)), weights/prSig) grad <- ## if (all(is.finite(pr)) && all(pr > 0)) ## c(gradThetaBeta, gradSigma) ## else rep(Inf, nxi + p + k) c(gradThetaBeta, gradSigma) }) if(rho$estimLambda > 0) c(rho$grad, grad.lambda(rho, rho$lambda, rho$link)) else rho$grad } getHnll <- function(rho, par) { ## ok if(!missing(par)) rho$par <- par with(rho, { eta1 <- drop(B1 %*% par[1:(nxi + p)]) + o1 eta2 <- drop(B2 %*% par[1:(nxi + p)]) + o2 pr <- pfun(eta1) - pfun(eta2) p1 <- dfun(eta1) p2 <- dfun(eta2) g1 <- gfun(eta1) g2 <- gfun(eta2) wtpr <- weights/pr dS.psi <- -crossprod(B1 * g1*wtpr, B1) + crossprod(B2 * g2*wtpr, B2) dpi.psi <- B1 * p1 - B2 * p2 ### dS.pi <- dpi.psi * wtpr/pr if (all(pr > 0)) dS.psi + crossprod(dpi.psi, (dpi.psi * wtpr/pr)) else array(NA, dim = c(nxi + p, nxi + p)) }) } .negLogLik <- function(rho) { ## negative log-likelihood ## OK with(rho, { eta1 <- drop(B1 %*% par[1:(nxi + p)]) + o1 eta2 <- drop(B2 %*% par[1:(nxi + p)]) + o2 pr <- pfun(eta1) - pfun(eta2) if (all(pr > 0)) -sum(weights * log(pr)) else Inf }) } .grad <- function(rho) { ## gradient of the negative log-likelihood ## OK with(rho, { p1 <- dfun(eta1) p2 <- dfun(eta2) wtpr <- weights/pr if (all(pr > 0)) -crossprod((B1 * p1 - B2 * p2), wtpr) else rep(NA, nalpha + p) }) } .hessian <- function(rho) { ## hessian of the negative log-likelihood ## OK with(rho, { dS.psi <- crossprod(B1 * gfun(eta1)*wtpr, B1) - crossprod(B2 * gfun(eta2)*wtpr, B2) dpi.psi <- B1 * p1 - B2 * p2 if (all(pr > 0)) -dS.psi + crossprod(dpi.psi, (dpi.psi * wtpr/pr)) else array(NA, dim = c(nxi+p, nxi+p)) }) } fitNR <- function(rho) ## OK { ctrl <- rho$ctrl stepFactor <- 1 innerIter <- 0 conv <- 1 ## Convergence flag message <- "iteration limit reached" rho$negLogLik <- .negLogLik(rho) if(rho$negLogLik == Inf) stop("Non-finite log-likelihood at starting value") rho$gradient <- .grad(rho) maxGrad <- max(abs(rho$gradient)) if(ctrl$trace > 0) Trace(iter=0, stepFactor, rho$negLogLik, maxGrad, rho$par, first=TRUE) ## Newton-Raphson algorithm: for(i in 1:ctrl$maxIter) { if(maxGrad < ctrl$gradTol) { message <- "max|gradient| < tol, so current iterate is probably solution" if(ctrl$trace > 0) cat("\nOptimizer converged! ", "max|grad|:", maxGrad, message, fill = TRUE) conv <- 0 break } rho$Hessian <- .hessian(rho) ## step <- .Call("La_dgesv", rho$Hessian, rho$gradient, .Machine$double.eps, ## PACKAGE = "base") ## solve H*step = g for 'step' step <- as.vector(solve(rho$Hessian, rho$gradient)) rho$par <- rho$par - stepFactor * step negLogLikTry <- .negLogLik(rho) lineIter <- 0 ## simple line search, i.e. step halfing: while(negLogLikTry > rho$negLogLik) { stepFactor <- stepFactor/2 rho$par <- rho$par + stepFactor * step negLogLikTry <- .negLogLik(rho) lineIter <- lineIter + 1 if(ctrl$trace > 0) Trace(i+innerIter, stepFactor, rho$negLogLik, maxGrad, rho$par, first=FALSE) if(lineIter > ctrl$maxLineIter){ message <- "step factor reduced below minimum" conv <- 2 break } innerIter <- innerIter + 1 } rho$negLogLik <- negLogLikTry rho$gradient <- .grad(rho) maxGrad <- max(abs(rho$gradient)) if(ctrl$trace > 0) Trace(iter=i+innerIter, stepFactor, rho$negLogLik, maxGrad, rho$par, first=FALSE) stepFactor <- min(1, 2 * stepFactor) } if(conv > 0) if(ctrl$trace > 0) cat(message, fill = TRUE) ## Save info rho$optRes$niter <- c(outer = i, inner = innerIter) rho$logLik <- -rho$negLogLik rho$maxGradient <- maxGrad rho$gradient <- as.vector(rho$gradient) rho$Hessian <- .hessian(rho) rho$optRes$message <- message rho$optRes$convergence <- conv } fitCLM <- function(rho) { ## OK if(rho$method == "Newton") { if(rho$k != 0) stop("Newton scheme not implemented for models with scale") if(rho$ncolXX > 1) stop("Newton scheme not implemented for models with nominal effects") if(rho$link %in% c("Aranda-Ordaz", "log-gamma")) stop("Newton scheme not implemented for models with", rho$link, "link function") fitNR(rho) return(invisible()) } optRes <- switch(rho$method, "nlminb" = nlminb(getPar(rho), function(par) getNll(rho, par), function(par) getGnll(rho, par), control=rho$ctrl, lower = rho$limitLow, upper = rho$limitUp), "ucminf" = ucminf(getPar(rho), function(par) getNll(rho, par), function(par) getGnll(rho, par), control=rho$ctrl), "optim" = optim(getPar(rho), function(par) getNll(rho, par), function(par) getGnll(rho, par), method="BFGS", control=rho$ctrl), ) rho$par <- optRes[[1]] rho$logLik <- - getNll(rho, optRes[[1]]) rho$optRes <- optRes rho$gradient <- c(getGnll(rho)) rho$maxGradient <- max(abs(rho$gradient)) if(rho$maxGradient > rho$convTol) warning("clm2 may not have converged:\n optimizer ", rho$method, " terminated with max|gradient|: ", rho$maxGradient, call.=FALSE) return(invisible()) } finalizeRho <- function(rho) { ## OK if(rho$method != "Newton") { rho$gradient <- c(getGnll(rho)) rho$maxGradient <- max(abs(rho$gradient)) rho$par <- rho$optRes[[1]] if(rho$Hess) { if(rho$k > 0 || rho$threshold != "flexible" || rho$ncolXX > 1 || rho$nlambda > 0) { if(rho$link == "Aranda-Ordaz" && rho$estimLambda > 0 && rho$lambda < 1e-3) message("Cannot get Hessian because lambda = ",rho$lambda ," is too close to boundary.\n", " Fit model with link == 'logistic' to get Hessian") else { rho$Hessian <- myhess(function(par) getNll(rho, par), rho$par) getNll(rho, rho$optRes[[1]]) # to reset the variables: # (par, pr) } } else rho$Hessian <- getHnll(rho, rho$optRes[[1]]) } } rho$convergence <- ifelse(rho$maxGradient > rho$convTol, FALSE, TRUE) with(rho, { if(nxi > 0) { xi <- par[seq_len(nxi)] names(xi) <- xiNames thetaNames <- paste(lev[-length(lev)], lev[-1], sep="|") Alpha <- Theta <- matrix(par[1:nxi], nrow=ncolXX, byrow=TRUE) Theta <- t(apply(Theta, 1, function(x) c(tJac %*% x))) if(ncolXX > 1){ dimnames(Theta) <- list(dnXX[[2]], thetaNames) dimnames(Alpha) <- list(dnXX[[2]], alphaNames) } else { Theta <- c(Theta) Alpha <- c(Alpha) names(Theta) <- thetaNames names(Alpha) <- alphaNames } coefficients <- xi } else coefficients <- numeric(0) if(p > 0) { beta <- par[nxi + 1:p] names(beta) <- dnX[[2]] coefficients <- c(coefficients, beta) } if(k > 0) { zeta <- par[nxi+p + 1:k] names(zeta) <- dnZ[[2]] coefficients <- c(coefficients, zeta) } if(estimLambda > 0) { names(lambda) <- "lambda" coefficients <- c(coefficients, lambda) } names(gradient) <- names(coefficients) edf <- p + nxi + k + estimLambda nobs <- sum(weights) fitted.values <- pr df.residual <- nobs - edf if(exists("Hessian", inherits=FALSE)) { dimnames(Hessian) <- list(names(coefficients), names(coefficients)) } }) res <- as.list(rho) keepNames <- c("df.residual", "fitted.values", "edf", "start", "beta", "coefficients", "zeta", "Alpha", "Theta", "xi", "lambda", "convergence", "Hessian", "convTol", "gradient", "optRes", "logLik", "call", "scale", "location", "nominal", "method", "y", "lev", "nobs", "threshold", "estimLambda", "link", "contrasts", "na.action") m <- match(keepNames, names(res), 0) res <- res[m] res } clm2 <- ## OK function(location, scale, nominal, data, weights, start, subset, na.action, contrasts, Hess = TRUE, model = TRUE, link = c("logistic", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), lambda, doFit = TRUE, control, threshold = c("flexible", "symmetric", "equidistant"), ...) { L <- match.call(expand.dots = FALSE) if(missing(location)) stop("Model needs a specification of the location") if(missing(lambda)) lambda <- NULL if(missing(contrasts)) contrasts <- NULL link <- match.arg(link) if(!(link %in% c("Aranda-Ordaz", "log-gamma")) & !is.null(lambda)){ warning("lambda ignored with link ", link) lambda <- NULL } if(!is.null(lambda) & length(lambda) > 1) { lambda <- lambda[1] warning("lambda is ", length(lambda), " long. Only the first element ", lambda[1], " is used") } if(!is.null(lambda) & link == "Aranda-Ordaz") if(lambda < 1e-6) stop("lambda has to be positive and lambda < 1e-6 not allowed for numerical reasons. lambda = ", lambda, " was supplied.") if (missing(control)) control <- clm2.control(...) if(!setequal(names(control), c("method", "convTol", "ctrl"))) stop("specify 'control' via clm2.control()") if (missing(data)) L$data <- environment(location) if (is.matrix(eval.parent(L$data))) L$data <- as.data.frame(L$data) ### Collect variables in location, scale and nominal formulae in a ### single formula, evaluate the model.frame and get index of row ### names for the rows to keep in the individual model.frames: m <- match(c("location", "scale", "nominal"), names(L), 0) F <- lapply(as.list(L[m]), eval.parent) ## evaluate in parent ## frame to allow 'f <- formula(sureness ~ prod); clm2(f, ...)' varNames <- unique(unlist(lapply(F, all.vars))) longFormula <- eval(parse(text = paste("~", paste(varNames, collapse = "+")))[1]) m <- match(c("location", "data", "subset", "weights", "na.action"), names(L), 0) L0 <- L[c(1, m)] if(!missing(scale) || !missing(nominal)) L0$location <- longFormula L0$drop.unused.levels <- TRUE L0[[1]] <- as.name("model.frame") names(L0)[names(L0) == "location"] <- "formula" L0 <- eval.parent(L0) m <- match(c("location", "scale", "nominal", "data", "subset", "weights", "na.action"), names(L), 0) L <- L[c(1, m)] L$drop.unused.levels <- TRUE L[[1]] <- as.name("model.frame") S <- L ## L: Location, S: Scale L$scale <- L$nominal <- NULL names(L)[names(L) == "location"] <- "formula" L <- eval.parent(L) keep <- match(rownames(L0), rownames(L)) L <- L[keep, , drop = FALSE] TermsL <- attr(L, "terms") ### format response: y <- model.response(L) if(!is.factor(y)) stop("response needs to be a factor") ### format thresholds: threshold <- match.arg(threshold) ### format location: X <- model.matrix(TermsL, L, contrasts) Xint <- match("(Intercept)", colnames(X), nomatch = 0) if (Xint > 0) X <- X[, -Xint, drop = FALSE] else warning("an intercept is needed and assumed in the location") n <- nrow(X) if(is.null(wt <- model.weights(L))) wt <- rep(1, n) if(is.null(Loffset <- model.offset(L))) Loffset <- rep(0, n) ### Format nominal: if(!missing(nominal)) { Nom <- S Nom$location <- Nom$scale <- NULL names(Nom)[names(Nom) == "nominal"] <- "formula" Nom <- eval.parent(Nom) Nom <- Nom[match(rownames(L0), rownames(Nom)), ,drop=FALSE] TermsNom <- attr(Nom, "terms") XX <- model.matrix(TermsNom, Nom)## , contrasts) ### Not allowing other than treatment contrasts in nominal if(is.null(Noffset <- model.offset(Nom))) Noffset <- rep(0, n) Nint <- match("(Intercept)", colnames(XX), nomatch = 0) if(Nint != 1) stop("An intercept is needed in the nominal formula") ### Are there any requirements about the presence of an ### intercept in the nominal formula? } else XX <- array(1, dim=c(n, 1)) ### format scale: if(!missing(scale)) { S$location <- S$nominal <- NULL names(S)[names(S) == "scale"] <- "formula" S <- eval.parent(S) S <- S[match(rownames(L0), rownames(S)), ,drop=FALSE] TermsS <- attr(S, "terms") ### Should contrasts be allowed for the scale? Z <- model.matrix(TermsS, S, contrasts) Zint <- match("(Intercept)", colnames(Z), nomatch = 0) if(Zint > 0) Z <- Z[, -Zint, drop = FALSE] else warning("an intercept is needed and assumed in the scale") if(is.null(Soffset <- model.offset(S))) Soffset <- rep(0, n) if(ncol(Z) > 0 && n != nrow(Z)) # This shouldn't happen stop("Model needs same dataset in location and scale") } else if(missing(scale) && !is.factor(y)){ Z <- array(1, dim = c(n, 1)) Soffset <- rep(0, n) } else { Z <- array(dim = c(n, 0)) Soffset <- rep(0, n) } ### return model.frame? if(control$method == "model.frame") { mf <- list(location = L) if(!missing(scale)) mf$scale <- S if(!missing(nominal)) mf$nominal <- Nom return(mf) } ### initialize and populate rho environment: rho <- newRho(parent.frame(), XX = XX, X=X, Z=Z, y=y, weights=wt, Loffset=Loffset, Soffset=Soffset, link=link, lambda = lambda, threshold=threshold, Hess = Hess, control = control) ### get starting values: if(missing(start)) setStart(rho) else rho$start <- rho$par <- start if(rho$estimLambda > 0 & rho$link == "Aranda-Ordaz") rho$limitLow <- c(rep(-Inf, length(rho$par)-1), 1e-5) if(length(rho$start) != with(rho, nxi + p + k + estimLambda)) stop("'start' is not of the correct length") ### OPTION: Could consider better check of increasing thresholds when ### ncol(XX) > 0 if(ncol(XX) == 0) { if(!all(diff(c(rho$tJac %*% rho$start[1:rho$nalpha])) > 0)) stop("Threshold starting values are not of increasing size") } if(!getNll(rho) < Inf) stop("Non-finite log-likelihood at starting values") if(model) { rho$location <- L if(!missing(scale)) rho$scale <- S if(!missing(nominal)) rho$nominal <- Nom } ### fit the model: if(!doFit) return(rho) fitCLM(rho) res <- finalizeRho(rho) ### add to output: res$call <- match.call() res$na.action <- attr(L0, "na.action") res$contrasts <- contrasts class(res) <- "clm2" res } print.clm2 <- function(x, ...) { if(!is.null(cl <- x$call)) { cat("Call:\n") dput(cl, control=NULL) } if(length(x$beta)) { cat("\nLocation coefficients:\n") print(x$beta, ...) } else { cat("\nNo location coefficients\n") } if(length(x$zeta)) { cat("\nScale coefficients:\n") print(x$zeta, ...) } else { cat("\nNo Scale coefficients\n") } if(x$estimLambda > 0) { cat("\nLink coefficient:\n") print(x$lambda) } if(length(x$xi) > 0) { cat("\nThreshold coefficients:\n") print(x$Alpha, ...) if(x$threshold != "flexible") { cat("\nThresholds:\n") print(x$Theta, ...) } } cat("\nlog-likelihood:", format(x$logLik, nsmall=2), "\n") cat("AIC:", format(-2*x$logLik + 2*x$edf, nsmall=2), "\n") if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") invisible(x) } vcov.clm2 <- function(object, ...) { if(is.null(object$Hessian)) { message("\nRe-fitting to get Hessian\n") utils::flush.console() object <- update(object, Hess=TRUE, start=object$coefficients) } dn <- names(object$coefficients) H <- object$Hessian ## To handle NaNs in the Hessian resulting from parameter ## unidentifiability: if(any(His.na <- !is.finite(H))) { H[His.na] <- 0 VCOV <- ginv(H) VCOV[His.na] <- NaN } else VCOV <- ginv(H) structure(VCOV, dimnames = list(dn, dn)) } summary.clm2 <- function(object, digits = max(3, .Options$digits - 3), correlation = FALSE, ...) { if(is.null(object$Hessian)) stop("Model needs to be fitted with Hess = TRUE") coef <- matrix(0, object$edf, 4, dimnames = list(names(object$coefficients), c("Estimate", "Std. Error", "z value", "Pr(>|z|)"))) coef[, 1] <- object$coefficients vc <- try(vcov(object), silent = TRUE) if(inherits(vc, "try-error")) { warning("Variance-covariance matrix of the parameters is not defined") coef[, 2:4] <- NaN if(correlation) warning("Correlation matrix is unavailable") object$condHess <- NaN } else { coef[, 2] <- sd <- sqrt(diag(vc)) ## Cond is Inf if Hessian contains NaNs: object$condHess <- if(any(is.na(object$Hessian))) Inf else with(eigen(object$Hessian, only.values = TRUE), abs(max(values) / min(values))) coef[, 3] <- coef[, 1]/coef[, 2] coef[, 4] <- 2*pnorm(abs(coef[, 3]), lower.tail=FALSE) if(correlation) object$correlation <- (vc/sd)/rep(sd, rep(object$edf, object$edf)) } object$coefficients <- coef object$digits <- digits class(object) <- "summary.clm2" object } print.summary.clm2 <- function(x, digits = x$digits, signif.stars = getOption("show.signif.stars"), ...) { if(!is.null(cl <- x$call)) { cat("Call:\n") dput(cl, control=NULL) } coef <- format(round(x$coefficients, digits=digits)) coef[,4] <- format.pval(x$coefficients[, 4]) p <- length(x$beta); nxi <- length(x$xi) k <- length(x$zeta); u <- x$estimLambda if(p > 0) { cat("\nLocation coefficients:\n") print(coef[nxi + 1:p, , drop=FALSE], quote = FALSE, ...) } else { cat("\nNo location coefficients\n") } if(k > 0) { cat("\nScale coefficients:\n") print(coef[(nxi+p+1):(nxi+p+k), , drop=FALSE], quote = FALSE, ...) } else { cat("\nNo scale coefficients\n") } if(x$estimLambda > 0) { cat("\nLink coefficients:\n") print(coef[(nxi+p+k+1):(nxi+p+k+u), , drop=FALSE], quote = FALSE, ...) } if(nxi > 0) { cat("\nThreshold coefficients:\n") print(coef[seq_len(nxi), -4, drop=FALSE], quote = FALSE, ...) } cat("\nlog-likelihood:", format(x$logLik, nsmall=2), "\n") cat("AIC:", format(-2*x$logLik + 2*x$edf, nsmall=2), "\n") cat("Condition number of Hessian:", format(x$condHess, nsmall=2), "\n") if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") if(!is.null(correl <- x$correlation)) { cat("\nCorrelation of Coefficients:\n") ll <- lower.tri(correl) correl[ll] <- format(round(correl[ll], digits)) correl[!ll] <- "" print(correl[-1, -ncol(correl)], quote = FALSE, ...) } invisible(x) } anova.clm2 <- function (object, ..., test = c("Chisq", "none")) { test <- match.arg(test) dots <- list(...) if (length(dots) == 0) stop('anova is not implemented for a single "clm2" object') mlist <- list(object, ...) nt <- length(mlist) dflis <- sapply(mlist, function(x) x$df.residual) s <- order(dflis, decreasing = TRUE) mlist <- mlist[s] if (any(!sapply(mlist, inherits, "clm2"))) stop('not all objects are of class "clm2"') ns <- sapply(mlist, function(x) length(x$fitted.values)) if(any(ns != ns[1])) stop("models were not all fitted to the same size of dataset") rsp <- unique(sapply(mlist, function(x) { tmp <- attr(x$location, "terms") class(tmp) <- "formula" paste(tmp[2]) } )) mds <- sapply(mlist, function(x) { tmp1 <- attr(x$location, "terms") class(tmp1) <- "formula" if(!is.null(x$scale)) { tmp2 <- attr(x$scale, "terms") class(tmp2) <- "formula" tmp2 <- tmp2[2] } else tmp2 <- "" if(!is.null(x$nominal)) { tmp3 <- attr(x$nominal, "terms") class(tmp3) <- "formula" tmp3 <- tmp3[2] } else tmp3 <- "" paste(tmp1[3], "|", tmp2, "|", tmp3) } ) dfs <- dflis[s] lls <- sapply(mlist, function(x) -2*x$logLik) tss <- c("", paste(1:(nt - 1), 2:nt, sep = " vs ")) df <- c(NA, -diff(dfs)) x2 <- c(NA, -diff(lls)) pr <- c(NA, 1 - pchisq(x2[-1], df[-1])) out <- data.frame(Model = mds, Resid.df = dfs, '-2logLik' = lls, Test = tss, Df = df, LRtest = x2, Prob = pr) names(out) <- c("Model", "Resid. df", "-2logLik", "Test", " Df", "LR stat.", "Pr(Chi)") if (test == "none") out <- out[, 1:6] class(out) <- c("Anova", "data.frame") attr(out, "heading") <- c("Likelihood ratio tests of cumulative link models\n", paste("Response:", rsp)) out } predict.clm2 <- function(object, newdata, ...) { if(!inherits(object, "clm2")) stop("not a \"clm2\" object") if(missing(newdata)) pr <- object$fitted else { newdata <- as.data.frame(newdata) Terms <- attr(object$location, "terms") m <- model.frame(Terms, newdata, na.action = function(x) x)#, if (!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) X <- model.matrix(Terms, m, contrasts = object$contrasts) xint <- match("(Intercept)", colnames(X), nomatch=0) if(xint > 0) X <- X[, -xint, drop=FALSE] n <- nrow(X) y <- m[,names(cl)[attr(Terms, "response")]] if(length(object$zeta) > 0) { Terms <- attr(object$scale, "terms") m <- model.frame(Terms, newdata, na.action = function(x) x)#, if (!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) Z <- model.matrix(Terms, m, contrasts = object$contrasts) zint <- match("(Intercept)", colnames(Z), nomatch=0) if(zint > 0) Z <- Z[, -zint, drop=FALSE] } if(!is.null(object$nominal)) { Terms <- attr(object$nominal, "terms") m <- model.frame(Terms, newdata, na.action = function(x) x)#, if (!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) XX <- model.matrix(Terms, m, contrasts = object$contrasts) namC <- colnames(XX) } B2 <- 1 * (col(matrix(0, n, nlevels(y))) == unclass(y)) o1 <- c(100 * B2[, nlevels(y)]) o2 <- c(-100 * B2[,1]) B1 <- B2[,-nlevels(y), drop=FALSE] B2 <- B2[,-1, drop=FALSE] locationPar <- c(t(object$Theta)) if(!is.null(object$nominal)) { ncolXX <- ncol(XX) LL1 <- lapply(1:ncolXX, function(x) B1 * XX[,x]) B1 <- do.call(cbind, LL1) LL2 <- lapply(1:ncolXX, function(x) B2 * XX[,x]) B2 <- do.call(cbind, LL2) } if(ncol(X) > 0) { B1 <- cbind(B1, -X) B2 <- cbind(B2, -X) locationPar <- c(locationPar, object$beta) } pfun <- switch(object$link, logistic = plogis, probit = pnorm, cloglog = function(x) pgumbel(x, max=FALSE), ## cloglog = pgumbel, cauchit = pcauchy, loglog = pgumbel, "Aranda-Ordaz" = function(x, lambda) pAO(x, lambda), "log-gamma" = function(x, lambda) plgamma(x, lambda)) sigma <- 1 if(length(object$zeta) > 0) sigma <- sigma * exp(drop(Z %*% object$zeta)) eta1 <- (drop(B1 %*% locationPar) + o1) / sigma eta2 <- (drop(B2 %*% locationPar) + o2) / sigma if(object$link %in% c("Aranda-Ordaz", "log-gamma")) pr <- pfun(eta1, object$lambda) - pfun(eta2, object$lambda) else pr <- pfun(eta1) - pfun(eta2) } if(missing(newdata) && !is.null(object$na.action)) pr <- napredict(object$na.action, pr) as.vector(pr) } profile.clm2 <- function(fitted, whichL = seq_len(p), whichS = seq_len(k), lambda = TRUE, alpha = 0.01, maxSteps = 50, delta = LrootMax/10, trace = 0, stepWarn = 8, ...) { rho <- update(fitted, doFit=FALSE) if(rho$estimLambda > 0 & rho$link == "Aranda-Ordaz") rho$limitLow <- c(rep(-Inf, length(rho$par)-2), 1e-5) nxi <- rho$nxi; k <- rho$k; p <- rho$p; X <- rho$X; Z <- rho$Z B1 <- rho$B1; B2 <- rho$B2 sO <- rho$expSoffset; O1 <- rho$o1; O2 <- rho$o2 beta0 <- with(fitted, coefficients[nxi + seq_len(p+k)]) Lnames <- names(beta0[seq_len(p)]) Snames <- names(beta0[p + seq_len(k)]) Pnames <- c(Lnames, Snames) if(is.character(whichL)) whichL <- match(whichL, Lnames) if(is.character(whichS)) whichS <- match(whichS, Snames) nL <- length(whichL); nS <- length(whichS) summ <- summary(fitted) std.err <- summ$coefficients[nxi + seq_len(p+k), "Std. Error"] if(trace < 0) rho$ctrl$trace <- trace <- 1 origLogLik <- fitted$logLik LrootMax <- qnorm(1 - alpha/2) prof <- vector("list", length = nL + nS) names(prof) <- c(paste("loc", Lnames[whichL], sep=".")[seq_len(nL)], paste("scale", Snames[whichS], sep=".")[seq_len(nS)]) for(where in c("loc", "scale")[c(nL>0, nS>0)]) { if(where == "loc") { rho$p <- max(0, p - 1) which <- whichL } if(where == "scale") { which <- whichS rho$o1 <- O1 rho$o2 <- O2 rho$p <- p rho$k <- max(0, k - 1) rho$X <- X if(rho$nxi > 0) { rho$B1 <- B1 rho$B2 <- B2 } } for(i in which) { if(where == "loc") { rho$X <- X[, -i, drop=FALSE] if(nxi > 0) { rho$B1 <- B1[, -(nxi+i), drop=FALSE] rho$B2 <- B2[, -(nxi+i), drop=FALSE] } } else { rho$Z <- Z[, -i, drop=FALSE] i <- i + p } res.i <- c(0, beta0[i]) for(sgn in c(-1, 1)) { if(trace) { message("\nParameter: ", where, ".", c(Lnames, Snames)[i], c(" down", " up")[(sgn + 1)/2 + 1]) utils::flush.console() } rho$par <- fitted$coefficients[-(nxi+i)] step <- 0; Lroot <- 0 while((step <- step + 1) < maxSteps && abs(Lroot) < LrootMax) { beta.i <- beta0[i] + sgn * step * delta * std.err[i] if(where=="loc") { rho$o1 <- O1 - X[, i] * beta.i rho$o2 <- O2 - X[, i] * beta.i } else rho$expSoffset <- exp(sO + Z[, (i - p)] * beta.i) fitCLM(rho) Lroot <- sgn * sqrt(2*(-rho$logLik + origLogLik)) res.i <- rbind(res.i, c(Lroot, beta.i)) } if(step - 1 < stepWarn) warning("profile may be unreliable for ", where, ".", c(Lnames, Snames)[i], " because only ", step - 1, "\n steps were taken ", c("downwards", "upwards")[(sgn + 1)/2 + 1]) } rownames(res.i) <- NULL prof[[paste(where, c(Lnames, Snames)[i], sep=".")]] <- # -p+nL structure(data.frame(res.i[order(res.i[,1]),]), names = c("Lroot", c(Lnames, Snames)[i])) if(!all(diff(prof[[length(prof)]][,2]) > 0)) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for ", names(prof)[length(prof)]) } } if(lambda & rho$nlambda) prof$lambda <- profileLambda(fitted, trace = trace, ...) val <- structure(prof, original.fit = fitted, summary = summ) class(val) <- c("profile.clm2", "profile") val } profileLambda <- function(fitted, link = fitted$link, range, nSteps = 20, trace = 0, ...) { if(link == "log-gamma" & missing(range)) range <- c(-4, 4) if(link == "Aranda-Ordaz" & missing(range)) range <- c(1e-4, 4) if(!link %in% c("log-gamma", "Aranda-Ordaz")) stop("link needs to be 'log-gamma' or 'Aranda-Ordaz';", link, "not recognized") if(link == "Aranda-Ordaz" & min(range) <= 0) stop("range should be > 0 for the 'Aranda-Ordaz' link") if(fitted$estimLambda == 0) fitted <- update(fitted, Hess = FALSE, link = link, lambda = NULL) MLogLik <- fitted$logLik MLlambda <- fitted$lambda logLik <- numeric(nSteps) rho <- update(fitted, Hess = FALSE, link = link, lambda = min(range)) logLik[1] <- rho$logLik rho <- update(rho, doFit = FALSE) lambdaSeq <- seq(min(range), max(range), length.out = nSteps) if(trace) message("\nNow profiling lambda with ", nSteps - 1, " steps: i =") for(i in 2:nSteps){ if(trace) cat(i-1, " ") rho$lambda <- lambdaSeq[i] fitCLM(rho) logLik[i] <- rho$logLik } if(trace) cat("\n\n") if(any(logLik > fitted$logLik)) warning("Profiling found a better optimum,", " so original fit had not converged") sgn <- 2*(lambdaSeq > MLlambda) -1 Lroot <- sgn * sqrt(2) * sqrt(-logLik + MLogLik) res <- data.frame("Lroot" = c(0, Lroot), "lambda" = c(MLlambda, lambdaSeq)) res <- res[order(res[,1]),] if(!all(diff(res[,2]) > 0)) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for lambda") res } confint.clm2 <- function(object, parm, level = 0.95, whichL = seq_len(p), whichS = seq_len(k), lambda = TRUE, trace = 0, ...) { p <- length(object$beta); k <- length(object$zeta) if(trace) { message("Waiting for profiling to be done...") utils::flush.console() } object <- profile(object, whichL = whichL, whichS = whichS, alpha = (1. - level)/4., lambda = lambda, trace = trace) confint(object, level=level, ...) } confint.profile.clm2 <- function(object, parm = seq_along(Pnames), level = 0.95, ...) { of <- attr(object, "original.fit") Pnames <- names(object) if(is.character(parm)) parm <- match(parm, Pnames, nomatch = 0) a <- (1-level)/2 a <- c(a, 1-a) pct <- paste(round(100*a, 1), "%") ci <- array(NA, dim = c(length(parm), 2), dimnames = list(Pnames[parm], pct)) cutoff <- qnorm(a) for(pm in parm) { pro <- object[[ Pnames[pm] ]] sp <- spline(x = pro[, 2], y = pro[, 1]) ci[Pnames[pm], ] <- approx(sp$y, sp$x, xout = cutoff)$y } ci } plot.profile.clm2 <- function(x, parm = seq_along(Pnames), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, fig = TRUE, n = 1e3, ..., ylim = NULL) ### Should this function have a 'root' argument to display the ### likelihood root statistic (approximate straight line)? { Pnames <- names(x) ML <- attr(x, "original.fit")$logLik for(pm in parm) { lim <- sapply(level, function(x) exp(-qchisq(x, df=1)/2) ) pro <- x[[ Pnames[pm] ]] sp <- spline(x = pro[, 2], y = pro[, 1], n=n) sp$y <- -sp$y^2/2 if(relative & !Log) { sp$y <- exp(sp$y) ylab <- "Relative likelihood" dots <- list(...) if(missing(ylim)) ylim <- c(0, 1) } if(relative & Log) { ylab <- "Relative log-likelihood" lim <- log(lim) } if(!relative & Log) { sp$y <- sp$y + ML ylab <- "Log-likelihood" lim <- ML + log(lim) } if(!relative & !Log) { stop("Not supported: at least one of 'Log' and 'relative' ", "have to be TRUE") sp$y <- exp(sp$y + ML) ylab <- "Likelihood" lim <- exp(ML + log(lim)) } x[[ Pnames[pm] ]] <- sp if(fig) { plot(sp$x, sp$y, type = "l", ylim = ylim, xlab = Pnames[pm], ylab = ylab, ...) abline(h = lim) } } attr(x, "limits") <- lim invisible(x) } logLik.clm2 <- function(object, ...) structure(object$logLik, df = object$edf, class = "logLik") extractAIC.clm2 <- function(fit, scale = 0, k = 2, ...) { edf <- fit$edf c(edf, -2*fit$logLik + k * edf) } update.clm2 <- function(object, formula., location, scale, nominal, ..., evaluate = TRUE) ### This method makes it possible to use the update.formula features ### for location and scale formulas in clm2 objects. This includes the ### possibility of using e.g. ### update(obj, loc = ~ . - var1, sca = ~ . + var2) { call <- object$call if (is.null(call)) stop("need an object with call component") extras <- match.call(expand.dots = FALSE)$... if (!missing(location)) call$location <- update.formula(formula(attr(object$location, "terms")), location) if (!missing(scale)) call$scale <- if(!is.null(object$scale)) update.formula(formula(attr(object$scale, "terms")), scale) else scale if (!missing(nominal)) call$nominal <- if(!is.null(object$nominal)) update.formula(formula(attr(object$nominal, "terms")), nominal) else nominal if (length(extras)) { existing <- !is.na(match(names(extras), names(call))) for (a in names(extras)[existing]) call[[a]] <- extras[[a]] if (any(!existing)) { call <- c(as.list(call), extras[!existing]) call <- as.call(call) } } if (evaluate) eval(call, parent.frame()) else call } dropterm.clm2 <- function(object, scope, scale = 0, test = c("none", "Chisq"), k = 2, sorted = FALSE, trace = FALSE, which = c("location", "scale"), ...) ### Most of this is lifted from MASS::dropterm.default, but adapted to ### the two formulas (location and scale) in the model. { which <- match.arg(which) Terms <- if(which == "location") attr(object$location, "terms") else attr(object$scale, "terms") tl <- attr(Terms, "term.labels") if(missing(scope)) scope <- drop.scope(Terms) else { if(!is.character(scope)) scope <- attr(terms(update.formula(Terms, scope)), "term.labels") if(!all(match(scope, tl, FALSE))) stop("scope is not a subset of term labels") } ns <- length(scope) ans <- matrix(nrow = ns + 1, ncol = 2, dimnames = list(c("", scope), c("df", "AIC"))) ans[1, ] <- extractAIC(object, scale, k = k, ...) n0 <- length(object$fitted) for(i in seq(ns)) { tt <- scope[i] if(trace) { message("trying -", tt) utils::flush.console() } Call <- as.list(object$call) Call[[which]] <- update.formula(Terms, as.formula(paste("~ . -", tt))) nfit <- eval.parent(as.call(Call)) ans[i+1, ] <- extractAIC(nfit, scale, k = k, ...) if(length(nfit$fitted) != n0) stop("number of rows in use has changed: remove missing values?") } dfs <- ans[1,1] - ans[,1] dfs[1] <- NA aod <- data.frame(Df = dfs, AIC = ans[,2]) o <- if(sorted) order(aod$AIC) else seq_along(aod$AIC) test <- match.arg(test) if(test == "Chisq") { dev <- ans[, 2] - k*ans[, 1] dev <- dev - dev[1] ; dev[1] <- NA nas <- !is.na(dev) P <- dev P[nas] <- pchisq(dev[nas], dfs[nas], lower.tail = FALSE) aod[, c("LRT", "Pr(Chi)")] <- list(dev, P) } aod <- aod[o, ] Call <- as.list(object$call) Call <- Call[names(Call) %in% c("location", "scale")] head <- c("Single term deletions", "\nModel:", paste(names(Call), ":", Call)) if(scale > 0) head <- c(head, paste("\nscale: ", format(scale), "\n")) class(aod) <- c("anova", "data.frame") attr(aod, "heading") <- head aod } addterm.clm2 <- function(object, scope, scale = 0, test = c("none", "Chisq"), k = 2, sorted = FALSE, trace = FALSE, which = c("location", "scale"), ...) ### Most of this is lifted from MASS::addterm.default, but adapted to ### the two formulas (location and scale) in the model. { which <- match.arg(which) if (which == "location") Terms <- attr(object$location, "terms") else if(!is.null(object$scale)) Terms <- attr(object$scale, "terms") else Terms <- as.formula(" ~ 1") if(missing(scope) || is.null(scope)) stop("no terms in scope") if(!is.character(scope)) scope <- add.scope(Terms, update.formula(Terms, scope)) if(!length(scope)) stop("no terms in scope for adding to object") ns <- length(scope) ans <- matrix(nrow = ns + 1, ncol = 2, dimnames = list(c("", scope), c("df", "AIC"))) ans[1, ] <- extractAIC(object, scale, k = k, ...) n0 <- length(object$fitted) for(i in seq(ns)) { tt <- scope[i] if(trace) { message("trying +", tt) utils::flush.console() } Call <- as.list(object$call) Call[[which]] <- update.formula(Terms, as.formula(paste("~ . +", tt))) nfit <- eval.parent(as.call(Call)) ans[i+1, ] <- extractAIC(nfit, scale, k = k, ...) if(length(nfit$fitted) != n0) stop("number of rows in use has changed: remove missing values?") } dfs <- ans[,1] - ans[1,1] dfs[1] <- NA aod <- data.frame(Df = dfs, AIC = ans[,2]) o <- if(sorted) order(aod$AIC) else seq_along(aod$AIC) test <- match.arg(test) if(test == "Chisq") { dev <- ans[,2] - k*ans[, 1] dev <- dev[1] - dev; dev[1] <- NA nas <- !is.na(dev) P <- dev P[nas] <- pchisq(dev[nas], dfs[nas], lower.tail=FALSE) aod[, c("LRT", "Pr(Chi)")] <- list(dev, P) } aod <- aod[o, ] Call <- as.list(object$call) Call <- Call[names(Call) %in% c("location", "scale")] head <- c("Single term additions", "\nModel:", paste(names(Call), ":", Call)) if(scale > 0) head <- c(head, paste("\nscale: ", format(scale), "\n")) class(aod) <- c("anova", "data.frame") attr(aod, "heading") <- head aod } ## addterm <- function(object, ...) UseMethod("addterm") ## dropterm <- function(object, ...) UseMethod("dropterm") ################################################################## ## Additional utility functions: grad.lambda <- function(rho, lambda, link, delta = 1e-6) { ll <- lambda + c(-delta, delta) if(link == "Aranda-Ordaz") ll[ll < 0] <- 0 par <- rho$par f <- sapply(ll, function(x) getNll(rho, c(par[-length(par)], x))) rho$lambda <- lambda rho$par <- par diff(f) / diff(ll) } TraceR <- function(iter, stepFactor, val, maxGrad, par, first=FALSE) { t1 <- sprintf(" %3d: %.2e: %.3f: %1.3e: ", iter, stepFactor, val, maxGrad) t2 <- formatC(par) if(first) cat("iter: step factor: Value: max|grad|: Parameters:\n") cat(t1, t2, "\n") } print.Anova <- function (x, ...) ## Lifted from package MASS: { heading <- attr(x, "heading") if (!is.null(heading)) cat(heading, sep = "\n") attr(x, "heading") <- NULL res <- format.data.frame(x, ...) nas <- is.na(x) res[] <- sapply(seq_len(ncol(res)), function(i) { x <- as.character(res[[i]]) x[nas[, i]] <- "" x }) print.data.frame(res) invisible(x) } fixed <- function(theta, eps = 1e-3) { res <- vector("list") res$name <- "fixed" if(!missing(theta) && length(theta) > 1) { if(length(theta) < 3) stop("'length(theta) = ", length(theta), ", but has to be 1 or >= 3") res$eps <- NULL res$theta <- theta res$getTheta <- function(y, theta, eps) theta } else if(!missing(theta) && length(theta) == 1) { if(as.integer(theta) < 3) stop("'as.integer(theta)' was ", as.integer(theta), ", but has to be > 2") res$eps <- NULL res$theta <- theta res$getTheta <- function(y, theta, eps) { eps <- diff(range(y)) / (theta - 1) seq(min(y) - eps/2, max(y) + eps/2, len = theta + 1) } } else if(missing(theta) && length(eps) == 1) { res$eps <- eps res$theta <- NULL res$getTheta <- function(y, theta, eps) { J <- diff(range(y))/eps + 1 seq(min(y) - eps/2, max(y) + eps/2, len = J) } } else stop("inappropriate arguments") class(res) <- "threshold" res } makeThresholds2 <- function(rho, threshold, ...) { if(threshold == "flexible") { rho$tJac <- diag(rho$ntheta) rho$nalpha <- rho$ntheta rho$alphaNames <- paste(rho$lev[-length(rho$lev)], rho$lev[-1], sep="|") } if(threshold == "symmetric") { if(!rho$ntheta >=2) stop("symmetric thresholds are only meaningful for responses with 3 or more levels") if(rho$ntheta %% 2) { ## ntheta is odd rho$nalpha <- (rho$ntheta + 1)/2 ## No. threshold parameters rho$tJac <- t(cbind(diag(-1, rho$nalpha)[rho$nalpha:1, 1:(rho$nalpha-1)], diag(rho$nalpha))) rho$tJac[,1] <- 1 rho$alphaNames <- c("central", paste("spacing.", 1:(rho$nalpha-1), sep="")) } else { ## ntheta is even rho$nalpha <- (rho$ntheta + 2)/2 rho$tJac <- cbind(rep(1:0, each=rho$ntheta/2), rbind(diag(-1, rho$ntheta/2)[(rho$ntheta/2):1,], diag(rho$ntheta/2))) rho$tJac[,2] <- rep(0:1, each=rho$ntheta/2) rho$alphaNames <- c("central.1", "central.2", paste("spacing.", 1:(rho$nalpha-2), sep="")) } } if(threshold == "equidistant") { if(!rho$ntheta >=2) stop("symmetric thresholds are only meaningful for responses with 3 or more levels") rho$tJac <- cbind(1, 0:(rho$ntheta-1)) rho$nalpha <- 2 rho$alphaNames <- c("threshold.1", "spacing") } } ordinal/R/terms_utils.R0000644000176200001440000001766713332052253014617 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# # terms_utils.R - utilities for computing on terms objects and friends # ------- Contents: -------- # # --- utility functions: --- # # term2colX # need_yates # no_yates # numeric_terms # get_model_matrix # get_contrast_coding # get_min_data # get_var_list # get_fac_list # get_num_list # get_pairs # get_trts # ############################################## ######## term2colX() ############################################## term2colX <- function(terms, X) { # Compute map from terms to columns in X using the assign attribute of X. # Returns a list with one element for each term containing indices of columns # in X belonging to that term. if(is.null(asgn <- attr(X, "assign"))) stop("Invalid design matrix:", "design matrix 'X' should have a non-null 'assign' attribute", call. = FALSE) term_names <- attr(terms, "term.labels") has_intercept <- attr(terms, "intercept") > 0 col_terms <- if(has_intercept) c("(Intercept)", term_names)[asgn + 1] else term_names[asgn[asgn > 0]] if(!length(col_terms) == ncol(X)) # should never happen. stop("An error happended when mapping terms to columns of X") # get names of terms (including aliased terms) nm <- union(unique(col_terms), term_names) res <- lapply(setNames(as.list(nm), nm), function(x) numeric(0L)) map <- split(seq_along(col_terms), col_terms) res[names(map)] <- map res[nm] # order appropriately } ############################################## ######## need_yates() ############################################## need_yates <- function(model) { ## Do not need yates for: ## - continuous variables ## - factors that are not contained in other factors ## Need yates for all other terms, i.e. terms which are: ## - contained in other terms, AND ## - which are not numeric/continuous term_names <- attr(terms(model), "term.labels") cont <- containment(model) is_contained <- names(cont[sapply(cont, function(x) length(x) > 0)]) nmt <- numeric_terms(model) num_terms <- names(nmt[nmt]) term_names[!term_names %in% num_terms & term_names %in% is_contained] } ############################################## ######## no_yates() ############################################## no_yates <- function(model) { setdiff(attr(terms(model), "term.labels"), need_yates(model)) } ############################################## ######## numeric_terms() ############################################## #' @importFrom stats delete.response terms numeric_terms <- function(model) { ## Determines for all terms (not just all variables) if the 'dataClass' ## is numeric ## (interactions involving one or more numerics variables are numeric). Terms <- delete.response(terms(model)) all_vars <- all.vars(attr(Terms, "variables")) data_classes <- attr(terms(model, fixed.only=FALSE), "dataClasses") var_class <- data_classes[names(data_classes) %in% all_vars] factor_vars <- names(var_class[var_class %in% c("factor", "ordered")]) num_vars <- setdiff(all_vars, factor_vars) term_names <- attr(terms(model), "term.labels") # term_names <- setNames(as.list(term_names), term_names) sapply(term_names, function(term) { vars <- unlist(strsplit(term, ":")) any(vars %in% num_vars) }) } ############################################## ######## get_model_matrix() ############################################## #' Extract or remake model matrix from model #' #' Extract or remake model matrix from model and potentially change the #' contrast coding #' #' @param model an \code{lm} or \code{lmerMod} model object. #' @param type extract or remake model matrix? #' @param contrasts contrasts settings. These may be restored to those in the #' model or they may be changed. If a length one character vector (e.g. #' \code{"contr.SAS"}) this is applied to all factors in the model, but it can #' also be a list naming factors for which the contrasts should be set as specified. #' #' @return the model (or 'design') matrix. #' @keywords internal #' @author Rune Haubo B Christensen get_model_matrix <- function(model, type=c("extract", "remake"), contrasts="restore") { type <- match.arg(type) # stopifnot(inherits(model, "lm") || inherits(model, "lmerMod")) if(type == "extract") return(model_matrix(model)) # Set appropriate contrasts: Contrasts <- get_contrast_coding(model, contrasts=contrasts) model.matrix(terms(model), data=model.frame(model), contrasts.arg = Contrasts) } ############################################## ######## get_contrast_coding() ############################################## get_contrast_coding <- function(model, contrasts="restore") { # Compute a list of contrasts for all factors in model Contrasts <- contrasts if(length(contrasts) == 1 && is.character(contrasts) && contrasts == "restore") { Contrasts <- attr(model_matrix(model), "contrasts") } else if(length(contrasts) == 1 && is.character(contrasts) && contrasts != "restore") { Contrasts <- .getXlevels(terms(model), model.frame(model)) Contrasts[] <- contrasts Contrasts } Contrasts } #' # #' get_min_data <- function(model, FUN=mean) #' # Get a minimum complete model.frame based on the variables in the model #' do.call(expand.grid, get_var_list(model, FUN=FUN)) #' #' get_var_list <- function(model, FUN=mean) #' # Extract a named list of variables in the model containing the levels of #' # factors and the mean value of numeric variables #' c(get_fac_list(model), get_num_list(model, FUN=FUN)) #' #' #' @importFrom stats .getXlevels #' get_fac_list <- function(model) { #' # Extract a named list of factor levels for each factor in the model #' res <- .getXlevels(Terms=terms(model), m=model.frame(model)) #' if(is.null(res)) list() else res #' } #' #' get_num_list <- function(model, FUN=mean) { # FUN=function(x) mean(x, na.rm=TRUE)) { #' # Extract named list of mean/FUN values of numeric variables in model #' deparse2 <- function(x) paste(safeDeparse(x), collapse = " ") #' Terms <- terms(model) #' mf <- model.frame(model) #' xvars <- sapply(attr(Terms, "variables"), deparse2)[-1L] #' if((yvar <- attr(Terms, "response")) > 0) #' xvars <- xvars[-yvar] #' if(!length(xvars)) return(list()) #' xlev <- lapply(mf[xvars], function(x) { #' if (is.numeric(x)) FUN(x) else NULL #' }) #' res <- xlev[!vapply(xlev, is.null, NA)] #' if(is.null(res)) list() else res #' } #' #' #' @importFrom utils combn #' get_pairs <- function(levs) { #' stopifnot(is.character(levs), length(levs) > 1) #' combs <- combn(seq_along(levs), 2) #' ind <- seq_len(ncombs <- ncol(combs)) #' A <- as.data.frame(array(0, dim=c(length(levs), ncombs))) #' dimnames(A) <- list(levs, paste(levs[combs[1, ]], levs[combs[2, ]], sep=" - ")) #' A[cbind(combs[1, ], ind)] <- 1 #' A[cbind(combs[2, ], ind)] <- -1 #' A #' } #' #' get_trts <- function(levs) { #' nlevs <- length(levs) #' ans <- t(cbind(-1, diag(nlevs - 1))) #' rownames(ans) <- levs #' colnames(ans) <- paste(levs[-1], levs[1], sep=" - ") #' ans #' } # get_trts(letters[1:5]) # get_pairs(letters[1:5]) ordinal/R/convergence.R0000644000176200001440000002560413332052252014530 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Functions to assess and check convergence of CLMs. Some ## functions/methods are exported and some are used internally in ## clm(). convergence <- function(object, ...) { UseMethod("convergence") } convergence.clm <- function(object, digits = max(3, getOption("digits") - 3), tol = sqrt(.Machine$double.eps), ...) ### Results: data.frame with columns: ### Estimate ### Std. Error ### Gradient - gradient of the coefficients at optimizer termination ### Error - the signed error in the coefficients at termination ### Rel. Error - the relative error in the coefficeints at termination ### ### The (signed) Error is determined as the Newton step, so this is ### only valid close to the optimum where the likelihood function is ### quadratic. ### ### The relative error equals step/Estimate. { ## get info table and coef-table: info <- object$info[c("nobs", "logLik", "niter", "max.grad", "cond.H")] ## Initialize coef-table with NAs: coefs <- coef(object, na.rm=TRUE) g <- object$gradient H <- object$Hessian tab <- matrix(NA_real_, nrow=length(coefs), ncol=6L, dimnames=list(names(coef(object, na.rm=TRUE)), c("Estimate", "Std.Err", "Gradient", "Error", "Cor.Dec", "Sig.Dig"))) tab[, c(1L, 3L)] <- cbind(coefs, g) res <- list(info=info, coefficients=tab, original.fit=object) class(res) <- "convergence.clm" if(!all(is.finite(H))) { warning("non-finite values in Hessian: illegitimate model fit") return(res) } ## Get eigen values of Hessian: res$eigen.values <- e.val <- eigen(H, symmetric=TRUE, only.values=TRUE)$values ## Compute Cholesky factor of Hessian: ch <- try(chol(H), silent=TRUE) if(any(abs(e.val) <= tol) || inherits(ch, "try-error")) { return(res) } ## Hessian is positive definite: ## Compute approximate error in the coefficients: step <- c(backsolve(ch, backsolve(ch, g, transpose=TRUE))) if(max(abs(step)) > 1e-2) warning("convergence assessment may be unreliable ", "due to large numerical error") ## Compute approximate error in the log-likelihood function: env <- get_clmRho(object) ## Note: safer to get env this way. ## env <- update(object, doFit=FALSE) env$par <- coef(object, na.rm=TRUE) - step new.logLik <- -env$clm.nll(env) new.max.grad <- max(abs(env$clm.grad(env))) if(new.max.grad > max(abs(g)) && max(abs(step)) > tol) warning("Convergence assessment may be unreliable: ", "please assess the likelihood with slice()") ### NOTE: we only warn if step is larger than a tolerance, since if ### step \sim 1e-16, the max(abs(grad)) may increase though stay ### essentially zero. logLik.err <- object$logLik - new.logLik err <- format.pval(logLik.err, digits=2, eps=1e-10) if(!length(grep("<", err))) err <- formatC(as.numeric(err), digits=2, format="e") res$info$logLik.Error <- err ## Fill in the coef-table: se <- sqrt(diag(chol2inv(ch))) res$coefficients[, c(2, 4:6)] <- cbind(se, step, cor.dec(step), signif.digits(coefs, step)) res } print.convergence.clm <- function(x, digits = max(3, getOption("digits") - 3), ...) { ## Prepare for printing: print(x$info, row.names=FALSE, right=FALSE) cat("\n") tab.print <- coef(x) for(i in 1:2) tab.print[,i] <- format(c(coef(x)[,i]), digits=digits) for(i in 3:4) tab.print[,i] <- format(c(coef(x)[,i]), digits=max(1, digits - 1)) print(tab.print, quote=FALSE, right=TRUE, ...) ## Print eigen values: cat("\nEigen values of Hessian:\n") cat(format(x$eigen.values, digits=digits), "\n") conv <- x$original.fit$convergence cat("\nConvergence message from clm:\n") for(i in seq_along(conv$code)) { Text <- paste("(", conv$code[i], ") ", conv$messages[i], sep="") cat(Text, "\n") } if(!is.null(alg.text <- conv$alg.message)) cat(paste("In addition:", alg.text), "\n") cat("\n") ## for(i in seq_along(conv$code)) { ## cat("Code: Message:\n", fill=TRUE) ## cat(conv$code[i], " ", conv$message[i], "\n", fill=TRUE) ## } ## if(!is.null(alg.text <- conv$alg.message)) { ## cat("\nIn addition: ", alg.text, "\n\n", fill=TRUE) ## } return(invisible(x)) } cor.dec <- function(error) { ### computes the no. correct decimals in a number if 'error' is the ### error in the number. ### The function is vectorized. xx <- -log10(abs(error)) lead <- floor(xx) res <- ifelse(xx < lead - log10(.5), lead-1, lead) res[abs(error) >= .05] <- 0 as.integer(round(res)) } signif.digits <- function(value, error) { ### Determines the number of significant digits in 'value' if the ### absolute error in 'value' is 'error'. ### The function is vectorized. res <- cor.dec(error) + ceiling(log10(abs(value))) res[res < 0] <- 0 as.integer(round(res)) } conv.check <- function(fit, control=NULL, Theta.ok=NULL, tol=sqrt(.Machine$double.eps), ...) ## function(gr, Hess, conv, method, gradTol, relTol, ## tol=sqrt(.Machine$double.eps), ...) ### Compute variance-covariance matrix and check convergence along the ### way. ### fit: clm-object or the result of clm.fit.NR() | gradient, Hessian, ### (control), convergence ### control: (tol), (method), gradTol, relTol ### ### Return: list with elements ### vcov, conv, cond.H, messages and { if(missing(control)) control <- fit$control if(is.null(control)) stop("'control' not supplied - cannot check convergence") if(!is.null(control$tol)) tol <- control$tol if(tol < 0) stop(gettextf("numerical tolerance is %g, expecting non-negative value", tol), call.=FALSE) ### OPTION: test this. H <- fit$Hessian g <- fit$gradient max.grad <- max(abs(g)) cov <- array(NA_real_, dim=dim(H), dimnames=dimnames(H)) cond.H <- NA_real_ res <- list(vcov=cov, code=integer(0L), cond.H=cond.H, messages=character(0L)) class(res) <- "conv.check" if(is.list(code <- fit$convergence)) code <- code[[1L]] mess <- switch(as.character(code), "0" = "Absolute and relative convergence criteria were met", "1" = "Absolute convergence criterion was met, but relative criterion was not met", "2" = "iteration limit reached", "3" = "step factor reduced below minimum", "4" = "maximum number of consecutive Newton modifications reached") if(control$method != "Newton") mess <- NULL ### OPTION: get proper convergence message from optim, nlminb, ucminf etc. res <- c(res, alg.message=mess) ## } evd <- eigen(H, symmetric=TRUE, only.values=TRUE)$values negative <- sum(evd < -tol) if(negative) { res$code <- -2L res$messages <- gettextf(paste("Model failed to converge:", "degenerate Hessian with %d negative eigenvalues"), negative) return(res) } ## Add condition number to res: res$cond.H <- max(evd) / min(evd) ## Compute Newton step: ch <- try(chol(H), silent=TRUE) if(max.grad > control$gradTol) { res$code <- -1L res$messages <- gettextf("Model failed to converge with max|grad| = %g (tol = %g)", max.grad, control$gradTol) ## Compute var-cov: vcov <- try(chol2inv(ch), silent=TRUE) if(!inherits(vcov, "try-error")) res$vcov[] <- vcov return(res) } if(!is.null(Theta.ok) && !Theta.ok) { res$code <- -3L res$messages <- "not all thresholds are increasing: fit is invalid" ## Compute var-cov: vcov <- try(chol2inv(ch), silent=TRUE) if(!inherits(vcov, "try-error")) res$vcov[] <- vcov return(res) } zero <- sum(abs(evd) < tol) if(zero || inherits(ch, "try-error")) { res$code <- 1L res$messages <- "Hessian is numerically singular: parameters are not uniquely determined" return(res) } ### NOTE: Only do the following if 'ch <- try(chol(H), silent=TRUE)' ### actually succedded: step <- c(backsolve(ch, backsolve(ch, g, transpose=TRUE))) ## Compute var-cov: res$vcov[] <- chol2inv(ch) ### NOTE: we want res$vcov to be present in all of the situations ### below. if(max(abs(step)) > control$relTol) { res$code <- c(res$code, 1L) corDec <- as.integer(min(cor.dec(step))) res$messages <- c(res$messages, gettextf("some parameters may have only %d correct decimals", corDec)) } if(max(evd) * tol > 1) { res$code <- c(res$code, 2L) res$messages <- c(res$messages, paste("Model is nearly unidentifiable: ", "very large eigenvalue", "\n - Rescale variables?", sep="")) } if((min(evd) / max(evd)) < tol) { res$code <- c(res$code, 3L) if(!5L %in% res$code) { res$messages <- c(res$messages, paste("Model is nearly unidentifiable: ", "large eigenvalue ratio", "\n - Rescale variables?", sep="")) } } if(!length(res$code)) { res$code <- 0L res$messages <- "successful convergence" } res } cov.conv <- conv.check ### OPTION: let convergence() print convergence info from clm using ### print.conv.check print.conv.check <- function(x, action=c("warn", "silent", "stop", "message"), ...) { action <- match.arg(action) if(x$code == 0L || action == "silent") return(invisible()) Text <- paste("(", x$code[1L], ") ", x$messages[1L], sep="") if(!is.null(alg.text <- x$alg.message)) Text <- paste(Text, "\nIn addition:", alg.text) switch(action, "stop" = stop(Text, call.=FALSE), "warn" = warning(Text, call.=FALSE), "message" = message(Text)) } ordinal/R/clmm.ranef.R0000644000176200001440000000673013332052252014253 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Implementation of ranef and condVar methods for clmm objects to ## extract the conditional model of the random-effects and their ## conditional variances. ## fixef.clmm <- function(object, ...) coef(object, ...) ## object$coefficients ### NOTE: Should return a *named* vector ranef <- function(object, ...) UseMethod("ranef") ## fixef <- function(object, ...) UseMethod("fixef") ranef.clmm <- function(object, condVar=FALSE, ...) ### This function... ### args... ### Returns.... { formatRanef <- function(relist, ST, gf.levels, assign, qi) { asgn <- split(seq_along(assign), assign) ## colnames of random effects: cn <- lapply(ST, colnames) cn <- lapply(asgn, function(ii) unlist(cn[ii])) ranefList <- lapply(seq_along(relist), function(i) { matrix(relist[[i]], ncol=qi[i]) }) ## Combine r.e. terms associated with the same grouping factors, ## set dimnames and coerce to data.frame: ranefList <- lapply(seq_along(asgn), function(i) { mat <- do.call(cbind, ranefList[ asgn[[i]] ]) dimnames(mat) <- list(gf.levels[[i]], cn[[i]]) as.data.frame(mat) }) ## list of r.e. by grouping factors: names(ranefList) <- names(gflevs) ranefList } ## which r.e. terms are associated with which grouping factors: asgn <- attributes(object$gfList)$assign ## names of levels of grouping factors: gflevs <- lapply(object$gfList, levels) ## random effects indicator factor: reind <- with(object$dims, factor(rep.int(seq_len(nretrms), nlev.re * qi))) ## list of random effects by r.e. term: relist <- split(object$ranef, reind) ranefList <- formatRanef(relist, object$ST, gflevs, asgn, object$dims$qi) if(condVar) { ### OPTION: Should we return matrices for vector-valued random effects ### as lmer does? ## Add conditional variances of the random effects: cond.var <- object$condVar if(NCOL(cond.var) > 1) cond.var <- diag(cond.var) cvlist <- split(cond.var, reind) cond.var <- formatRanef(cvlist, object$ST, gflevs, asgn, object$dims$qi) for(i in seq_along(ranefList)) attr(ranefList[[i]], "condVar") <- cond.var[[i]] } ranefList } condVar <- function(object, ...) UseMethod("condVar") condVar.clmm <- function(object, ...) lapply(ranef.clmm(object, condVar=TRUE), function(y) attr(y, "condVar")) ordinal/R/drop.coef.R0000644000176200001440000001504413332052252014106 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## Functions that can drop columns from rank-deficient design ## matrices. One is exported and others used internally. drop.coef <- function(X, silent = FALSE) ### works if ncol(X) >= 0 and nrow(X) >= 0 { ## test and match arguments: stopifnot(is.matrix(X)) silent <- as.logical(silent)[1] ## perform the qr-decomposition of X using LINPACK methods: qr.X <- qr(X, tol = 1e-7, LAPACK = FALSE) if(qr.X$rank == ncol(X)) return(X) ## return X if X has full column rank if(!silent) ## message the no. dropped columns: message(gettextf("design is column rank deficient so dropping %d coef", ncol(X) - qr.X$rank)) ## return the columns correponding to the first qr.x$rank pivot ## elements of X: newX <- X[, qr.X$pivot[1:qr.X$rank], drop = FALSE] ## did we succeed? stop-if-not: if(qr.X$rank != qr(newX)$rank) stop(gettextf("determination of full column rank design matrix failed"), call. = FALSE) return(newX) } drop.coef2 <- function(X, tol = 1e-7, silent = FALSE, test.ans = FALSE) ### works if ncol(X) >= 0 and nrow(X) >= 0 { ## test and match arguments: stopifnot(is.matrix(X)) silent <- as.logical(silent)[1] aliased <- rep.int(0, ncol(X)) ## perform the qr-decomposition of X using LINPACK methods: qr.X <- qr(X, tol = tol, LAPACK = FALSE) if(qr.X$rank == ncol(X)) { ## return X if X has full column rank attr(X, "aliased") <- aliased attr(X, "orig.colnames") <- colnames(X) return(X) } if(!silent) ## message the no. dropped columns: message(gettextf("design is column rank deficient so dropping %d coef", ncol(X) - qr.X$rank)) ## return the columns correponding to the first qr.x$rank pivot ## elements of X: newX <- X[, qr.X$pivot[1:qr.X$rank], drop = FALSE] sel <- qr.X$pivot[-(1:qr.X$rank)] aliased[sel] <- 1 attr(newX, "aliased") <- aliased attr(newX, "orig.colnames") <- colnames(X) ## Copy old attributes: attributes(newX)$contrasts <- attributes(X)$contrasts attr(newX, "assign") <- attr(X, "assign")[-sel] ## did we succeed? stop-if-not: if(test.ans && qr.X$rank != qr(newX)$rank) stop(gettextf("determination of full column rank design matrix failed"), call. = FALSE) return(newX) } drop.cols <- function(mf, silent = FALSE, drop.scale=TRUE) ### drop columns from X and possibly NOM and S to ensure full column ### rank. ### mf - list with X and possibly NOM and S design matrices. Includes ### alpha.names ### ### returns: updated version of mf. { nalpha <- length(mf$alpha.names) ## X is assumed to contain an intercept at this point: Xint <- match("(Intercept)", colnames(mf$X), nomatch = 0) if(Xint <= 0) { mf$X <- cbind("(Intercept)" = rep(1, nrow(mf$X)), mf$X) warning("an intercept is needed and assumed") } ## intercept in X is guaranteed. if(!is.null(mf[["NOM"]])){ ## store coef names: mf$coef.names <- list() mf$coef.names$alpha <- paste(rep(mf$alpha.names, ncol(mf$NOM)), ".", rep(colnames(mf$NOM), each=nalpha), sep="") mf$coef.names$beta <- colnames(mf$X)[-1] ## drop columns from NOM: mf$NOM <- drop.coef2(mf$NOM, silent=silent) ## drop columns from X: NOMX <- drop.coef2(cbind(mf$NOM, mf$X[,-1, drop=FALSE]), silent=silent) ## extract and store X: mf$X <- cbind("(Intercept)" = rep(1, nrow(mf$X)), NOMX[,-seq_len(ncol(mf$NOM)), drop=FALSE]) ## store alias information: mf$aliased <- list(alpha = rep(attr(mf$NOM, "aliased"), each=nalpha)) mf$aliased$beta <- attr(NOMX, "aliased")[-seq_len(ncol(mf$NOM))] if(drop.scale && !is.null(mf[["S"]])) { mf$coef.names$zeta <- colnames(mf$S)[-1] ## drop columns from S: NOMS <- drop.coef2(cbind(mf$NOM, mf$S[,-1, drop=FALSE]), silent=silent) ## extract and store S: mf$S <- cbind("(Intercept)" = rep(1, nrow(mf$S)), NOMS[,-seq_len(ncol(mf$NOM)), drop=FALSE]) mf$aliased$zeta <- attr(NOMS, "aliased")[-seq_len(ncol(mf$NOM))] } else if(!is.null(mf[["S"]])) { Sint <- match("(Intercept)", colnames(mf$S), nomatch = 0) if(Sint <= 0) { mf$S <- cbind("(Intercept)" = rep(1, nrow(mf$S)), mf$S) warning("an intercept is needed and assumed in 'scale'", call.=FALSE) } ## intercept in S is guaranteed. mf$coef.names$zeta <- colnames(mf$S)[-1] mf$S <- drop.coef2(mf$S, silent=silent) mf$aliased$zeta <- attr(mf$S, "aliased")[-1] } return(mf) } ## end !is.null(mf[["NOM"]]) ## drop columns from X assuming an intercept: mf$coef.names <- list(alpha = mf$alpha.names, beta = colnames(mf$X)[-1]) mf$X <- drop.coef2(mf$X, silent=silent) mf$aliased <- list(alpha = rep(0, nalpha), beta = attr(mf$X, "aliased")[-1]) ## drop columns from S if relevant: if(!is.null(mf[["S"]])) { Sint <- match("(Intercept)", colnames(mf$S), nomatch = 0) if(Sint <= 0) { mf$S <- cbind("(Intercept)" = rep(1, nrow(mf$S)), mf$S) warning("an intercept is needed and assumed in 'scale'", call.=FALSE) } ## intercept in S is guaranteed. mf$coef.names$zeta <- colnames(mf$S)[-1] mf$S <- drop.coef2(mf$S, silent=silent) mf$aliased$zeta <- attr(mf$S, "aliased")[-1] } return(mf) } ordinal/R/clm.fit.R0000644000176200001440000001605513332052252013566 0ustar liggesusers############################################################################# # Copyright (c) 2010-2018 Rune Haubo Bojesen Christensen # # This file is part of the ordinal package for R (*ordinal*) # # *ordinal* is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # *ordinal* is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # and/or # . ############################################################################# ## This file contains: ## The function clm.fit() - an lm.fit or glm.fit equivalent for CLMs. clm.fit <- function(y, ...) { UseMethod("clm.fit") } clm.fit.factor <- function(y, X, S, N, weights = rep(1, nrow(X)), offset = rep(0, nrow(X)), S.offset = rep(0, nrow(X)), control = list(), start, doFit=TRUE, link = c("logit", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) ### This function basically does the same as clm, but without setting ### up the model matrices from formulae, and with minimal post ### processing after parameter estimation. { ## Initial argument matching and testing: threshold <- match.arg(threshold) link <- match.arg(link) control <- do.call(clm.control, control) if(missing(y)) stop("please specify y") if(missing(X)) X <- cbind("(Intercept)" = rep(1, length(y))) stopifnot(is.factor(y), is.matrix(X)) if(missing(weights) || is.null(weights)) weights <- rep(1, length(y)) if(missing(offset) || is.null(offset)) offset <- rep(0, length(y)) if(missing(S.offset) || is.null(S.offset)) S.offset <- rep(0, length(y)) stopifnot(length(y) == nrow(X) && length(y) == length(weights) && length(y) == length(offset) && length(y) == length(S.offset)) frames <- list(y=y, X=X) y[weights <= 0] <- NA y.levels <- levels(droplevels(y)) struct <- namedList(y, X, weights, offset, S.offset, y.levels, threshold, link, control, doFit) ## S and N are optional: if(!missing(S) && !is.null(S)) { struct$S <- S stopifnot(is.matrix(S), length(y) == nrow(S)) } if(!missing(N) && !is.null(N)) { struct$NOM <- N stopifnot(is.matrix(N), length(y) == nrow(N)) } clm.fit.default(struct) } clm.fit.default <- function(y, ...) ### y: design object with the following components: ... ### (tJac=NULL), (y.levels=NULL), threshold, (aliased=NULL), ### (start=NULL), link, control, weights, (coef.names=NULL), y, X, ### (S=NULL), (NOM=NULL), doFit=TRUE, S.offset=NULL { ## check args: stopifnot(is.list(y)) y <- c(y, list(...)) stopifnot(all( c("y", "X", "offset", "weights", "link", "threshold", "control", "doFit") %in% names(y) )) ## preprocess design objects if needed: if(is.null(y$y.levels)) y$y.levels <- levels(y$y) if(is.null(y$tJac)) { y <- c(y, makeThresholds(y$y.levels, y$threshold)) } if(is.null(y$aliased)) y <- drop.cols(y, silent=TRUE, drop.scale=FALSE) ## Make model environment: rho <- do.call(clm.newRho, y) setLinks(rho, y$link) start <- set.start(rho, start=y$start, get.start=is.null(y$start), threshold=y$threshold, link=y$link, frames=y) rho$par <- as.vector(start) ## remove attributes if(y$doFit == FALSE) return(rho) if(length(rho$lambda) > 0 && y$control$method != "nlminb") { message("Changing to 'nlminb' optimizer for flexible link function") y$control$method <- "nlminb" } ## Fit the model: fit <- if(length(rho$lambda) > 0) { clm.fit.flex(rho, control=y$control$ctrl) } else if(y$control$method == "Newton") { clm.fit.NR(rho, y$control) } else { clm.fit.optim(rho, y$control$method, y$control$ctrl) } ## Adjust iteration count: if(y$control$method == "Newton" && !is.null(start.iter <- attr(start, "start.iter"))) fit$niter <- fit$niter + start.iter ## Update coefficients, gradient, Hessian, edf, nobs, n, ## fitted.values, df.residual: fit <- clm.finalize(fit, y$weights, y$coef.names, y$aliased) fit$tJac <- format_tJac(y$tJac, y$y.levels, y$alpha.names) th.res <- formatTheta(fit$alpha, fit$tJac, y, y$control$sign.nominal) ## Check convergence: conv <- conv.check(fit, control=y$control, Theta.ok=th.res$Theta.ok, tol=y$control$tol) print.conv.check(conv, action=y$control$convergence) ## print convergence message th.res$Theta.ok <- NULL fit <- c(fit, conv[c("vcov", "cond.H")], th.res) fit$convergence <- conv[!names(conv) %in% c("vcov", "cond.H")] fit <- fit[sort(names(fit))] class(fit) <- "clm.fit" fit } clm.finalize <- function(fit, weights, coef.names, aliased) ### extracFromFit ### ### distinguishing between par and coef where the former does not ### contain aliased coefficients. { nalpha <- length(aliased$alpha) nbeta <- length(aliased$beta) nzeta <- length(aliased$zeta) nlambda <- length(fit$lambda) ncoef <- nalpha + nbeta + nzeta + nlambda ## including aliased coef npar <- sum(!unlist(aliased)) + nlambda ## excluding aliased coef stopifnot(length(fit$par) == npar) if(nlambda) aliased <- c(aliased, list(lambda = FALSE)) if(nlambda) coef.names <- c(coef.names, list(lambda="lambda")) fit <- within(fit, { coefficients <- rep(NA, ncoef) ## ensure correct order of alpha, beta and zeta: keep <- match(c("alpha", "beta", "zeta", "lambda"), names(aliased), nomatch=0) aliased <- lapply(aliased[keep], as.logical) for(i in names(aliased)) names(aliased[[i]]) <- coef.names[keep][[i]] names(coefficients) <- unlist(coef.names[keep]) par.names <- names(coefficients)[!unlist(aliased)] coefficients[!unlist(aliased)] <- par alpha <- coefficients[1:nalpha] if(nbeta) beta <- coefficients[nalpha + 1:nbeta] if(nzeta) zeta <- coefficients[nalpha + nbeta + 1:nzeta] names(gradient) <- par.names dimnames(Hessian) <- list(par.names, par.names) edf <- npar ## estimated degrees of freedom nobs <- sum(weights) n <- length(weights) fitted.values <- fitted df.residual = nobs - edf ## keep <- i <- fitted <- par.names <- par <- coef.names <- NULL }) notkeep <- c("keep", "i", "fitted", "par.names", "par", "coef.names") fit[!names(fit) %in% notkeep] } ordinal/MD50000644000176200001440000001474113575527472012242 0ustar liggesusers3a0325ff4dcb211168aabebbbb45e5a5 *DESCRIPTION 9023f060a8aa0d9f16566887e1f561e7 *LICENCE.note 559ac0624f424a67aee8844691d58994 *NAMESPACE 75611024dc69bf1931574d6bb38a2a17 *NEWS 55659272104a9aad1734d272bd1f3a01 *R/AO.R 2e4653d22a116710b5592407067a163c *R/clm.R 68c2622c72a7bb94d9e3cd8f55bcf8d0 *R/clm.Thetamat.R 014f1ea93b846e9d7664f2b2f91ee949 *R/clm.anova.R ff9ae04aee9fc8759e2cb94493069895 *R/clm.fit.R 9368408f671dfd9bb25cac61008e2894 *R/clm.fitter.R b9d05ec3c1365b281f700a2ac74ab51b *R/clm.frames.R a8b9a1a9e11e5aca43abdd657778f0bd *R/clm.methods.R 4d66bcf34d3a1468bde432214c908b6f *R/clm.nominal_test.R 70508e68b520e5c2c6f28e9bb56edbec *R/clm.predict.R 3615c743fcd8caa71573285b4961c4c7 *R/clm.profile.R c722e34c70d53fa614514f5a2b184a71 *R/clm.simple.R d89cfd6e7df55d94c71dd33607b4307a *R/clm.slice.R 753b116331b820b564f354b4cb51aec3 *R/clm.slice2D.R 58fd99d651959406f6535d99add0794f *R/clm.start.R 9fa0894854f419fed388fa7649792106 *R/clm2.R 4e9e9a8a4c0d9e03ea8e8172b3997cc7 *R/clmm.R 50b4e71fd34be7d28f843b7027b62d04 *R/clmm.formula.R b234f7664542d9a1798490fd6781c4ac *R/clmm.methods.R 0076c7be4c3749aa6401c83308d68146 *R/clmm.ranef.R 9a4632ee4c1dd18acb4ec4cc3db0598b *R/clmm.ssr.R fc38006e5c921f44faa79985b2d3aac7 *R/clmm.start.R 00c455e5dec48deb8892bb92f1e6c134 *R/clmm2.R 6ab2547fc79f7db78530899fa0454aeb *R/clmm2.utils.R 880e9d0af35ba9ea3cbd9bf318941ef9 *R/contrast_utils.R 656050aa9c4f55a0c96245fa8ee8a87f *R/control.R 5202232599bfb42bec28cf47e35e88f5 *R/convergence.R c547e4fa4f51795298ad75d73007ee2b *R/derivatives.R 6aaacfe5fa77b811556e11aef44e27da *R/drop.coef.R e29ff0f24ae37c9a73ea5b6e008aa7fb *R/gdist.R e33c96829340e40bd7838024d137ab81 *R/gumbel.R 2f4ec0b833867c6c3299b8292bf78770 *R/lgamma.R 330321bee23e85384761aa86179ea288 *R/terms_utils.R a6915923a4a0dfb2cac5faf57800db64 *R/utils.R 7f89868e44455f3fded385b8b39af491 *R/warning_functions.R bc4f8e46e577ae4e8102aa582ab20a39 *build/vignette.rds 63eb28b797efedb31ad1f5db75ddfa0b *data/income.rda de5d075c95248681323ce9e571e7d0a2 *data/soup.rda 50b83b93508e5c5a9085d5204cb9ff09 *data/wine.rda 0b7473ccedfabb17989c25a0076fc293 *inst/CITATION 72fd1b913a32136cbf5f54e5dc8e27f9 *inst/doc/clm_article.R d89a278169dbd268c00ad2ec9c339181 *inst/doc/clm_article.Rnw 7dfec4939959570051a6cb95f29bd049 *inst/doc/clm_article.pdf 64aaa511c3b4dc72b6c004106553bf97 *inst/doc/clmm2_tutorial.R ddde6e9af6699c2d9c11002d12e3049e *inst/doc/clmm2_tutorial.Rnw 0ef02ad58faf90790b4a0c0b27000305 *inst/doc/clmm2_tutorial.pdf 0dc6dd5fc752782590a85c3eab31d5fb *man/VarCorr.Rd 4d295ce70b37b9409b0b74eb6ae7d0b2 *man/addtermOld.Rd fbbfa8a549ecc09ad66aa0e00d1f6d1b *man/anovaOld.Rd 14a35aa475723573c41e11dc049ae5f7 *man/clm.Rd eef6dbcf3a110b0221075d694991a70d *man/clm.anova.Rd 2c2b6be598033c141f2639fecbc25959 *man/clm.control.Rd aeedb960b5d0cd91648b2f43261c0010 *man/clm.controlOld.Rd 6b5c14c006fbfe9634616757bf287b9f *man/clm.fit.Rd 02cc7944a1cc6a14870e5c1ba33bd0ae *man/clmOld.Rd 5529ca85369ae94713f65470336e6e20 *man/clmm.Rd 861db001a071e534dc5021120bb62124 *man/clmm.control.Rd 3095fd227ece61f6b6a099d5de003313 *man/clmm.controlOld.Rd d8b5a448143a0da1495756ea2c48ab44 *man/clmmOld.Rd a03c75733972348ddd5f505dc472c26b *man/confint.clm.Rd 50c7e6ec194e8af3bfccca5e3e4e61fb *man/confint.clmmOld.Rd 3d881bc96a9fd9a56c4cd1451c708a7f *man/confintOld.Rd 8e1dcaa797916a35a9de4f1413dee029 *man/convergence.clm.Rd cb5e6dd9111063de64f3643568674fc4 *man/dropCoef.Rd 650996c7b48d859ae5e5ac751dfeaca2 *man/gfun.Rd 4d87ff9fa6c1729a7ad52be5c3f7a16f *man/gumbel.Rd 7f719c8b1d0ede27f15c1fa9cd3aedea *man/income.Rd c8a9522a62440ebb217e60f954731a9f *man/lgamma.Rd 51b4cdc005b1c26b5d23d04197965c8f *man/nominal.test.Rd d8c875d10d4669c6b8fe2d4d65860b6b *man/ordinal-package.Rd 1f050e8e469290a5c6c9c07e3ae08a29 *man/predict.Rd 731499033e04d0f739cad2d0ad13b9c1 *man/predictOld.Rd 835671b27d83487aebc16065b332aff4 *man/ranef.Rd ffeacc4ef5eb2b97d794c89afdb5c59c *man/slice.clm.Rd 2c66bfbfde8422891b1ca4359c858dc6 *man/soup.Rd 41562a0c8389e5fe01af70802a3e995f *man/updateOld.Rd 079335e2cb6d006b7040c330f4aabd59 *man/wine.Rd 2760b86f8f7e96052a30e34abc0248f5 *src/get_fitted.c f28e89f2d5d342bbb2c6539ba3703488 *src/init.c fc163caff757ffaf48eaa19cae6df78a *src/links.c 32db0a78531f815ae0b468686503509e *src/links.h a12c6c7b12e1447b52deacd9c1d0feea *src/utilityFuns.c 735d1e1f085ffaaacf1a7628930adc64 *tests/anova.R 89bb425a86eefa6d518534ba2bebffe9 *tests/clm.fit.R 7cf9e5abc7360d67304ac97cb1f4bbad *tests/clm.formula.R ce328403fdc7e00f0f90442cee2e3fc7 *tests/clmm.R 9ed01ea5d1feb4f302de5a957e195a3b *tests/clmm.control.R cadfa40f297ae2ad3013b99470d73116 *tests/clmm.formula.R bb53b627bd127be25140ca9b18cd7570 *tests/confint.R d8267669e5b9c3ab305c6036f1c8d623 *tests/nominal.test.R d1a9b3c673dfe17f1579cb8527af60d3 *tests/test-all.R 16a2a63ab5214f0a105692aedc0c8fc6 *tests/test.clm.Theta.R d4e39c9cbf18fb828ee6abde86063820 *tests/test.clm.convergence.R 34e94d40e8f807c7a1b69d9d1ca9f4a0 *tests/test.clm.flex.link.R 972645dadf3c58dda8dfba40191406f0 *tests/test.clm.model.matrix.R 65465ddc9177b9ef5e0c1eb7ed83bb39 *tests/test.clm.predict.R b237b3a6173025bf72810002e9e0b195 *tests/test.clm.profile.R e91f920a51deaa7c84a7d75f9220486a *tests/test.clm.single.anova.R b666a698afa2ebefbc12b52358969a05 *tests/test.general.R 6faea911a5b2575b5acb57087c697201 *tests/test.makeThresholds.R 434ae1cd4de96ac0d2ac54db684ac7d5 *tests/test.sign.R 0452a68e5a919248553360c9622eb926 *tests/test0weights.R dd771457105e82780b335f6ced52e736 *tests/testAnova.clm2.R ca67691eee70bdd41b3ae5c71f5b61e6 *tests/testCLM.R ed362bbfee46a83b45c0958d8a03806c *tests/testthat/test-clm-formula.R 5a26fb2b90f90bd574da4ed0758abfe4 *tests/testthat/test-clm-predict.R ff1d040fe6da8b6ffe3e26b2546aa27d *tests/testthat/test-clm-profile.R bfa792df07f282e896fb427ed763abb4 *tests/testthat/test-clm.R 0263b906dbd4420343b72f0b9316ea73 *tests/testthat/test-clmm-checkRanef.R a9c6572a4ca505408b5ee7204021b905 *tests/testthat/test-contrasts.R 49fd8f2e430e2be16217acbc1008a209 *tests/testthat/test-misc.R 234b3c903ae3e070034dff21ff97de82 *tests/testthat/test-utils.R d89a278169dbd268c00ad2ec9c339181 *vignettes/clm_article.Rnw 3fed23e229d83c7f3fb2734e070ff1a3 *vignettes/clm_article_refs.bib ddde6e9af6699c2d9c11002d12e3049e *vignettes/clmm2_tutorial.Rnw b79878774fe08d5c9e41784df0e084fb *vignettes/ordinal.bib 611a9529149925e573c10c3e44e35f65 *vignettes/static_figs/fig-fig2.pdf bfd5f3a90bf5e07ea91dbe7989988e73 *vignettes/static_figs/fig-figEqui.pdf 4ef0c7b6ada9dbe4f654e78f23c09f00 *vignettes/static_figs/fig-figFlex.pdf 3c8fe3fcbc9df098a95ded136c94b2c5 *vignettes/static_figs/fig-figNom2.pdf 606db1c50178c05344e9f26b31c45375 *vignettes/static_figs/fig-figSca.pdf ordinal/inst/0000755000176200001440000000000013575515131012665 5ustar liggesusersordinal/inst/doc/0000755000176200001440000000000013575515125013435 5ustar liggesusersordinal/inst/doc/clm_article.pdf0000644000176200001440000125645313575515130016422 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 6124 /Filter /FlateDecode /N 100 /First 850 >> stream x\ks6~~m:%qL4i8q4iQ,f#K.M~)JdUs2E^;!2̸LeJLgdޫfIss\ʸf8OpwnfIT:JiћLxP8Ϥeң23e- L+d]$3L2a2˩2YИˬ1xg[g9gl&EB#2Jy-q_g8G_I>;<2$z9ShOQ5P3PS4Hxz Cܣw (8E1.S4jV \2Y LFRc5jƸX T5+5jVhKSkԬEaw",iib٠f0F9l91jv&0nIrg@ZNPgQ=:YU5jv=b*&t>IiL(29 NG̔ b-HBÐa TYh G€f]YFׄ2Q䚀BȨ>) Cx?GVxNj9k+.q/7eV2Pj6eha=ʎbYN+<1/Gj6}__*w[npmNNUɡ_Hr"t,{ʁ/CぁׄЀɔ:3,TI&PDIbWeNQBN5< $[We]z^hQ?__ήGS.~4|tHќ,@-=(2JxK<2fz^}[W4{D2U~qg]nIsI>13tA%"4;h" V] #9~QgCt$!]wPHc28'pY\Z}]Y]^է΃a8*-ϊqxUY|6MqQ⢸`[\WrZTbR\b u3)kWq1/Ţ bY,eY,?͊U[\|)U`_Cm$&K6k*܃}C>&%,L/GN?Q?^BWЙ}5qlY^!; wg_C>o |p+ +kj Agu ֛ϭ?m1&[5JGU;xO}lp > =8,$xK`."n,V;$E`4_}J׊Ż`9>L&-:2|vj /_V[۪uR[u'PK/0/~dI@vn>k Us aj6;nMU% $Ŵ2K 6n%-hOC+Pyû[ &Е.v6v1'ð\k d3% 9VϥvBeseZGA-I袪TIʱv%켍Of4Q( |=>zb֔'g1Fk WX@Q20~ݖmI۶U[BsCNF $ᯛrm^^R`j^ eAdID-Uf\CqC}F!Zq _V}eJQ(\}L2VYxW$"#pET$IeE3סla\_MjSoRy׀h6-Ϋ*#c1Z556x6ec+<Pλ`R.EBԶhM|[l^!X';̞x>M0b.^2,>IJ:3CR\z9ärK.s]:? Sv@ ѧ}J\*% ­cu?kd tLy)4yʛH^.X:9dr>E񃆢x-c8%X1uh$Xk@]#.HwR){m:DI WJ%uN)Vƙ܉C5LCa9Ŭr+BN0]̚(M\*x,' K OI^$MHrSH1ua[Ф$Ub[D 柳'&& b!&=`6T_E!('O^d4]64Au]~h3rn)xܑAa36vFfOmř|ڄ=XgPMS4 z"t^;1[&zqvZ!P}wvtmԾ0~ASݬ?v> .fb"8)[{b휠dYPO-,u;gsp v(<|{ǭ(ݠ{c ue[4 IMf0%Ht6AY|LI;1b!E0߾铺mqdEnZzW։ jzhǗ hȧ/)A{ʺ^7GG:OaDS\,d[?ڌ`.˥!J7%O%e+=q$mGN( Α~uvMݮ|9Ck|wnc7txڠ`=4mW섶L)" Ns 9#g_Qݒc3zםJ+P X%ĉh4F]tjpo]+0 E4*(Zը~9k7w:y$MM}ZmhSi> : 6.:Dml6 vxfYcIN Vv!|T={KrW^מ!wᘗ?M[,\XÈ+Sm'$Na6{ CCW\_0M?ƤD.N>+86f5*7n zYܚ os7t?䣣d}Y.&&s/˛OSdU^?ԻBz6lk66=HvŊC[HH5&1jnrL=}#!uc7 lkkƬݢ~)WSui-HCM@b:)z?F;Ķm"SvIZmS~sLA9OAPDS]$l^w+!O@q,JqZĖMIVшo єNϛnP{s& &+SzrsŃ{\UI(o t[|@Ǹͨ?G1=P)WPQFquqěM՘LףuPyN L$ (Jpj+h`\:@{ z-îj>`z2{E]?тߧiw}df&PKIZvnv{3X}w/f9_+a{^1]w-"II̾M%lbۿןz?Ȯs+;/~lB5w/|9ڏ|)چu>Rd#P\ '?t2f"-^jeOx%##8G1๕8:}2?򇶂zFyٜD)xV9iBhwW(u C%0n]!U9 a&io>ìAH[X1Ld4#:#imn=x pEk9caϴa/2_ܩ3ƕ19mXocX_<=BBf 90a;C;G|BwvKƃ)ޔ:'%r\Y2=c\YǠcr5𹤭&D?N`4OHn/=\ CmrC89L?Y.cJЖ̝XJHupeU*G rPl_/ ia#I"38|${-m(<]҈$cZoM2:@du~]M$p(hu6}C%o=lh`:?#Y8˴Ѵ> j`(dx7M[dOTquqQɝ6wx>Q)].Uj:t(puJOCU>Ǘxؓ z{@Ͳ,-~vve>A$j^8{i-cHYj$w¦0쿼vOf-dغя܊׌a*Ē.;P} ֯ekIF?C A҃{G# 5hG i7?4ME@CaE&F&D'3 KѹHRx`i uLvRq)'(h b9[|4v=1eohfendstream endobj 102 0 obj << /Subtype /XML /Type /Metadata /Length 1721 >> stream GPL Ghostscript 9.19 ordinal, cumulative link models, proportional odds, scale effects, R 2019-12-15T21:40:54+01:00 2019-12-15T21:40:54+01:00 LaTeX with hyperref package Cumulative Link Models for Ordinal Regression with the R Package ordinalRune Haubo B Christensen endstream endobj 103 0 obj << /Type /ObjStm /Length 3782 /Filter /FlateDecode /N 100 /First 939 >> stream x\[sE}_яP[(@$YRfx#"K^Id3ˎd6[*g}D+' Ar"8^O"cƟsBk\4Oh-^Bv;c z2w o?L{3L"gsio)^LT?LW_ek4(1r=-7N<|;#hc˿/-tgXW bK+x[ǖ汥yli[Vgb31ǖ1oI8輓Zwwwji6Ym,}7O-_R˗%|I-_R˗Vn[>Ϲ/嶿%9.3[x4*hKC7o ;b$V o%o$W=ͣFZ8j,!Rar.yA@$L Y&. * k ! J҆%BB#y g@@6A)IdRK݀0 dl>w z 1À˰I  (*#U>U_B>c:(=k^Qzf*ˈcT8Ʋ"3Xxt'J LHeЩmRtYruxNN2(ȺRF]aڴkGA¬4oXflx 1{%K_sl w&u /^z/_L߽xq:LI}~5v^x^?LMՄ wwܿe<>%XPt8F&a ED?{wS8Ӈ_#Ĥ^ՠըդVWՇckDyU@r7j&~]]M tsGSӋ^j[=55ۄ|d~,¬_I$"茫A= V_=%5YkUv'dap9N1O1oRr4kRnԶJkcܼ{V,DX2e)i|t8FFg* T\+2[.S"2UՌ !@m&Za )<↉ q2-LF т`Izw͝uLy/-kؐlP9H CL~ee(]QZ'mE.7(ȵJu=ǽinl"DZcz ,rI{=1SҐ`:]pyUS7;AE$9sqNj * `(;_ yq#+=.$.#A:LsZBIRvYIV#C5Pj{a:(0DA-Qn%p{zhʋe snEJXgA!|q4L}DDh[-;$@L}R-~6Q%Ȧ6>aGel[A0jgdx8[p&mZ#O*'-b(V q>@ξDJ9XI\g >}Lu=mhd (Ab)OtܻI  Oͮ,N^ B1p2Uf!Y]zbTj̎[OJQ=~@ͥϸ?&WTN:]TQN˖~ zXMм:<A]e|ի:Wwy)8zfٲY8nV]rk-ٜ32 nee GWb8hv[w;h|^aj:~ KElKXuH[ WcLLCa0k5D:D*Hm#aǀ֔6iw+Bt =֑"ZK)OZrc?x;F/SK4.mxk|fSp. {';71< /yŽCHpdD#1djE+ >'Ikp';$wI Á_t苓a-wNG_%d?#j bI^OߍC~ ~w"{"Sk !8݊Fwww/=,_a+ =oAQUC7z\篥"advƜ{3{}2-W{V|"bDWzRͼR,֊s2aZ,jjX9eЈr4s!D[s- b.|=2õ 6U[gg-d0%?פendstream endobj 204 0 obj << /Type /ObjStm /Length 3225 /Filter /FlateDecode /N 100 /First 930 >> stream x[moF~b?ޡȾE4iq]rE?2c %W~=$%RZKr3+-LKTf%G5#J2%%*(ŔO4SÔ2F\Z0 *)U*1m<*k<3 ?2#=Ўj^kL]f:LDF2EJFLJBEPS)P1!x9!QzCK 2'*((B(A2!3IDZT EZIJ } &sI衉(Rd^Bh)-@+Ї $>L*(A(9ma%TMa ߾dz|d$i^?t٪ᭅ^3qR\LFOQ_KcuD8/^ٲi.ŒQO?~^8[VE^stc{ϊ=gMyF/7~Yv .{\^}u յ58踌l 3{q5adndn*J榒dn1Y+;N:Nkqj\J_t57N6;.5踜kJldJi(1kSoGM.9AqqrN]vn۸Cky6y=2_\ ̒7yV,!U!y?}֐֘NPZA*3g*br}n8,PnuxYW28M|K[RCd-"tKCcM oI{hYdeEo߼x@x~1]fFXd\=-hvx1Y9dʵ^SϪPlG(13OE.r9_,'דh,#rtt]^]Jjɴ0bm/#Wdxv9-PSLKH&QQ<bC:}ѰEQܳn(rAT7(dQaLz^%`3=]Tw`BIn@#JvAo'w5(-a0o JÁPe aO7d` =Pr[usoGf֋7"$B)dxi2]c<'^S&JtmO)ד UzѲh:a9~IPts=fD6wi┱1Y"0-Lփ.C]-`8,]卲X&9EIwE;L)EPi>rbJ L[ `4.1w06rs*v'G?jUPDPM>7pTT܆rHo1y&C1DQ1;"3F~d>+贸mqry!;ܚ-bN#]o3v`횱T\79)&w2Vze7\Nc픊X;ln.UcViC}=mSq(Jw0;(莍T4|Uf_]@*HQ99/X S(B<*(oPU6 ҖA` 2R:YNl98av B mWn"9 .t,+M.bY^uu5յ !iJt8$7Mxgl=]U7JMGҦkmwې-ԿGMWl˕ktSL(?KULo{1x2YMb1Y~|1ê,-\X֏Q._x>z$.| -ȘDh*>OrQ@J\}*fb">vPP\Mf9ވZ?\*;)$Nfz?n|*b)'|+ZX9v$b1o''SMz`![_{J$yxy&yPeJX6~eQ ]H.~GYoI N3i"'{.,@p着EF^UY/0(v D6tV)a9ּA`K@ Ntՠ&Ι _'`%p֠1(Qy4j͸^yٛ~*dICDXj"s\UdZ\Of`= 6+Yހ[ ёdDŻwJ.twJ}HXD7gff}>O[_ٮ=z)m:CgLE#[VI5GC1Z}ۘ=BVܙ{mGv06;r DRwPb(8셣!bq=}F[}U!8I>:I;IvdEaˇ=㟖>mbBh XZuC:u^sa+)K9(_ [M7(ޢF@5Y{3wCWj%i(ԣ-@IiPޛb[dd$#̶7i'+}g(8؁SQE$p\v1M5dGgqpH0kI°g]w9@)p&yНw*m) 4F?g ~cnsrd\+AxFmcH;t^ZGgÇB[>:L+Gv+bNW# z&VWTƤendstream endobj 305 0 obj << /Type /ObjStm /Length 3165 /Filter /FlateDecode /N 97 /First 883 >> stream x[ks_ɌMR)n0 IPJ_s ,Hv2b8{.e)0)-ւ)B0Z2/ZWh&xh&$eB ![x&l əK$ԐL #-4aZ yc2xITo)Y᮵t;0V 0=5,ӐcZ@c0mAL{Pn'=3h͌FG facE YNwJC (Y[ pV{,fcuYf}$xDdIh(esd% q^QC3<'  `КuyCѸ.)=@)YdAg 8˂);ǂtdz@R.`8SK0`,5MpvByC-r!ApCS/je-4tbP@ Zޅ aU^(<&R@4d )Ijbjl؅coafQ8e_/(?t r^,;6yUЏ4 ~,>61|U`p^}m, ~țCWv_͋E%>6&N36}tw\Ve`nVuSVK/AEu{(-]Uz9[*.2-wx ڍ>9+7zΊ0Ze׉hm:Ze,&U"C: 2L@bY0VݰqܦyƹJc@:t3!_{ܸ#˫=0\)bPGa7j=k֫mUwb~dg!$Qr rUPh⸨:>(nnYQ,G>Ġ^-%iBV@ /;`-99Eq^[ϸbMy?pD-VsX$sƍW&qn[U|ⷦZ^L󇻚Xlq[nv/>ԢrD/:،|gjXm=ظCX|6[c @7" |!ؐ^EAktvSXA4&1&=o@\ނB4Gn2΃a$)2(‡$?3(COb))!U?>Ƕ>l/Blq?Q&6H#$,^ 9l? m !/)Dk!1wf)X^|jB 3GQkRĂTUr8(Z4b3l ַ<5-j6I򶿂"}ën*j %L^ i Hb_nGuu¬ls6Ǿľ:fV‹.ꚖvZ6!6ۑUj=9UI*KE9Δ1? ;ؘQ&0g!2h 5\&^ zٌ@EXA r 'LЙ:uD8t#Z1@Da\QiٴcqON4 mFdZ V4JT{VVѳAX.fa;J]i1]]PV7\޿2gIIˌTְɝ-`ex޴)bs yQCC,z-SQ)!sXCMH:D>dECYoNc#WIX )*-D&v4~HΝ:T˸8T`˓M/|Y ~u \b Ų.w7b_+& 0٪|hU<ڲ!ysGa0wg/e]Qu˨ؓcE~[3kh9r͓W4ً}#~E<@uYPEDΤǝ7MqC֛ZO+ށ73rKy3u"S]|N ];ueV ±qAs̷p{ADbLe sj YيFNe%CZҸ(!+} :bo^Ȃ.VtԆe%9VPIuΞyF_?H:Tg^8JFDWsTEP>A-dl&#: 3ϕ:XL ;k2sqW-bZjGj  L wמуB~Q( J#-]sZs,AbSd_[3CtFѥ=+]]gɼkS hErGRe'Gv> };{s_HO]z`pHB2֌fiEqM8u߮~gTRղ G'VΨSI`=>4,<{ axwTcr[;NN1'br;)'N多<OVzUR`V2CEQNR06!3X2c%v1`xhۖ6h{{6cj5$;Ӊ='z=C(PUy=RL"_z7$R3~潸AiA/-+ IXWO=]Qk䞴+3QMq,Q{ Eut0}:<]=t:hGQ$l079{}~M?|mJL)k9Oc6Z㶧>$7jendstream endobj 403 0 obj << /Filter /FlateDecode /Length 4460 >> stream x[Ks/ĩSpNƕF99p%DkF#j_@ gص%x4_w݀~454/uћ^x&wu[ 35gJn.?^lLmpVse7W[qzڷc _QVvM:Bd>ntZ(Q?Vm3`O%T>wLJ.|Xg,1 K0Ym~;-ADdNh$pЯ䗰!G{/hR`uV ~ !\Di+HVbD @6"zdO7†ߏ]P928uL(yjѷsDN_BsDTԶ/}!8<7ʼՐbOEb0 C6k']_d@E@V7`N4/OmCNH,?j$X- {4 IiS'Lu? ?@< Gj| }09Gl/hi+}c5$>I;0Asc!_ 5Yry,PRA슐 J35]]dgT7A*+hC2Vy1?9oWh1/$2X}L `5 ةZj ŌT/aBn4][ aO66GR${SU5<f1r3v,/|i=QZcΆ}?(%wv]Hcz;&*a9vGW9F8 F <-ԥ0۰0 C;L= {C)A{o昚dQ ƐN.I ]RVRҨH!/;GZlہP+FdYw}GekmC+ p[N9%Ŧyja<u,fCs#mC'94$LnQ@pp a?#bFjxH7 kvf-6|؄ļbB.6ռf.i#nWinƥm tL] EJC9߻<%8ñ"]l^H+ 4%vS)`Am 'l$EhV7 ԯX&6¬駸!mUf\q2?w^(3EE!FxCRIWX|X'dECDh4K̀Uy=,9U?%T,Z&~G  $){H#ySB³S$x :8]ZE73̻upJ OֱהsPp/5$V+ 5X/KA "s\Db Pvj츪C/[4;a>C*a-U-5_"$-w.F $4paG\#D3| '؄ XM-k>Ujz6x,[9Zcv.= U-rl6P5ѩ LK"LS?_qH!y>tc~nqFۡ;B +O0AZ:d8r'՟SR&v~JtDD9wrgJW_=??? qWP쮣A"ik.$O8e-ax`mn#ӄ>6^!z#&OWc<-1kXpLAɛ}t?enǬρO5b蛺"H(bf;H=/у[3>)J4\\OiSPÐdW.|ÁXXiŒr$CR!dƧ9') |H-9V9$}CwqX#Ҩ5˶36Dv=:H,3ߴyXC'𧓒% WD:SmWa, ޙ wNpN2-Ц3D-20F*{azz_>r#@ʅc#CCH TXΒ@BI~n3K$qΩޝPoIC W%ݩq x^/1H:!Q-B-cjsG2c5NYᏨ7*_a|"H eUn./vKBIE8<(G4ʟe_@eC+A˟B5xQ3,a@UG} FrIq>_w`?˝:Cde o_BdB"`>vg&,8,})TH'7}?L%w~7|4HEQHDޅ (b++逖^+u?>&%Uؾ@]ZjI=^@ -*}vM]Mh]Jه,am гd!k?Ҵ)7eQ+ #0zCqb+l3PO4 wN]I=۱ R$հ-8,NEF.A.a,09OQ/?Bitnhd`Φ̔jS#of\6&yOv'71@&Lj?p_r2KDx *:,PQM _VC7٤2*j"͝}Yd.vFM$Y ^ f,=pJgy3NV;V3٦f]K*dAUm)4.qw4x*0WV^BoM AtkU+Ȋ\ P.4ՀV\T7L [pV W}}L{?WêZIP< ]}7gӑ#?b3hqb6E\)J,fɦP\Q0L게$eW}/2мY5-*e+<,2*37̚ ;'Kw7](_cL !WPߍiza+5]ߥgňqM@} Gk<=}ފ8.3EPq옖`ia(ơthfV~alAV2a- LwT1l;PVY.\\ۡe,m`B%9 .{OIOBG_=Ρ?&]X; t_do1OkЅ(c%ݣj'T,W@xd5Lϑ"MJf { ⩜h49o?_ Yf?C&IK؎yjrY<˒ULÓUz.S5w̥ ҽ}ʵ+0ǝg ȅrq6<6S X-g ~3zR|g?yF',y/QP5)ҭ_0 ?o&7t=8c%6E |쳴3U2CA3_|^#cSp'y! /%Vp辶df/1.I7(e<57~>$gCHY}H/@SCYv!ᤒg LC <(-ж!ǃ RW9RvӚIqfzPe^1Be$qȔժYendstream endobj 404 0 obj << /Filter /FlateDecode /Length 4150 >> stream x[Y"/ 0{ӼI  +vbCB|,+?Գ_*${w$'ЃzyUWU ]/=Z-6z9v)L+bkKW'/]0el&~ iͪeMj5mu<_.W RkZs6z\1#QY '֘ XSV=z}C C2ait}Xzhk:8ɗ'ߝנP.`Vb{"ĪM{ì( Q„&f\I?Oה-mׅoT 0o0bv6С񓨚Mҹ%AKw~R^s2ׄVg7?w6grvk7h-S0 sRUl -7Ge ?(8|JU!r`=S[Uݬ ?F%&qClM=v7|wyFƇ%1xuwMNm\'W4(S#ĉ-2+^Uզm(sFbRm\7 2OS\6zZ|eBQI(hu4̵PC`U{^+Y(hA~M7p=Y`4Azz"d@ .2x]Gw qkPn ?@ -ldUh'?<a4X}h g֕:'}VOtu7 z*:$mt9*"r[p0V֯1`'6fl S]0&d"BՔ#D{жqLM߭s;EJaշkK3g¢U>w)@YrUY%4LJFB*p8yB*V-$au:Bhc}{?ԁQ@'>}|r ئDSuosa4bpq%W-KrqA׻gQf~M\lrk}^^}no.'߮MjZ/SGO Uj/|:E?(L/`z^^4^apcHl~ǹWFXVA~3 xP{$n ^4'L'߼2%!@Rd4e\ɐƀ6kHNd>ҧy :nm2dއV,xCQmN ,TK{UQ= Е<} N]̲i~ c)_gz\3n^rXVL/ei\/SFa ͤjaÖіd$ |Ĥ6ۯc>"Kq0y4ݯH!]hfMSY6N%FѯvP@? UюNQ(lsGvanFS„W P$@xV }iצNd26gh]WqY%ۓ1<^eiBLCO1 N͠CsC.H^: @fK[NSC5Lk#MuibO-q DT"O df(WnGq!~eö.,ׂ"~◈z{hB=%Hhzh-(I#1Ьpsb;QR{s!#' A57Fピ.7} |$PLN.< b7a]Cn"I:Ny.:6҃0+M- X< K&`cp%b2gDUhIY<6iȊ{່x>4Ԃe*Б2L,"x"R2<-B?|giű֐ιƺ>LU]f33ڂQDED :ǡsʋ(~$!AtspCcZyiC{s3yV| 6|F@ ߃޴M?;{3\-blq};nM5dkֿ"B՞ZvM * ֕ǭud'=/):⇓Mߤ6дe?Hu8%1lA{죀(Ql@TA?Iƒ !?\Y贼+Mا&&P4=2n=U6Y iB0Å WR䢛2S҅h`R1,Vr("ueD q-Wxg>( +5]$IP`Ht;Ih+*o:#!գ_k0WK^ck v;_p/.]%USȁ5sT8$xK\lR]j/sF07.NwIoa_Fڑu&#u;v5ԊC҄s<uYGC2 S˩#.M Yf! yI:b4 ٥9!j EiBV}樂jrÌYAtQR 7ztoW5)߳$+ya0*EƮ,|'9 L5mCXغxƩ:qBm Q3Rs@5HVu;\Ǐ 14M$` ϊ"zkNϚmx3;;IG#+߭uaFՓkpQŔ؆%j5DcW3ɩb,VOk PrԇSWL<6?l)#xrTYj{!OƥP(3w{wi%#x/dfA2C].C-2 #ϊ ,94KJ(?7ᶡ, Em0J XӐd.&4D濾Hj s;nm>Wpr,Gb>S::iȀScY{v!ašjrA`6o<sXXGbAT WOտtc댂bRB;p՛$BmJ<, @` T@\"F@ő~3 Vs;#$*21}|"d/&E'"Q.>>D)I>106Abio=ExO*BV~~7cA"#7 _ͽޅÍ0[S?Bd9&pcYoivq-^ ,d#ư0Gs,J?Jj~%AF7smq&h<$-'W ̂Ɵ. ^(S.z@XTdGf$? zj| Ԝ:6gdm]GmWOC &fVu uaMB,ÄÛnRcrk 1 @ ;^S;lƲnǦy +%B#~d: `D[jR214 a~'OuuW c6E|w_Xܚ/endstream endobj 405 0 obj << /Filter /FlateDecode /Length 4959 >> stream x\͏#uј!Hf,v}WIPDr+ܙYj=ェ*Vsu ,쮪{=?κ:/{x{gnv{_g៫.+J:fo.lf(:fF/{W]j#닗W0sfq+U^]/\/77$k[%xl~/m;]sˬzVlڿߜf |?o Ui j~?Atm"+"Z"[$n.]I㥰Ҷy2~,s\חDͅ㮕p\#CBH/pmvfh$lcjX ^ooWiثcޥB81>G:3k[PGs-,n߯s_m 4~u5_Xi9hfao@ e$G¿)cy kHr *x,߿σVH7Mx @+AʂQ_{Ei^;̴3k3yBн?weL{lcz~\K|_ݾ%%~$p8(K#HwrE&9~_ "ȒL_ oƿ6T"* dFur6&&gHzـ@-G/7˛#EB>UJAۤm OZFH%n-,zH`k+pp:%uHL\Z̟(@xu+:)r.?F*7ʷiU:̂|M xU;2v'-QLа1DjasOKikh ZBk~Y[S2ߗxJ+.8GOo_Ə?{ƾ)Ƃ#~ 0!Q&0Ciܺh $"r j9CC߃l+bd^ސAuuh>x_eڛ !X`ScB-SQojr-ibi{>4MW tnF/l(jwVKL2$%f`Iòoj c|Y'yAڣ4|k8X@Ӟd ?0 s?`2QcPcy 1x :Av%XU5^TtgRIoRfZʌFha 27Ra-Ybe@M&P?3A! #Ja_< VNI6oGxZL'-ȵxƟaHpW* U@i + NR"su??+)l-Za,fA5;t-il_E-Lf՟0]Q7&nq.]T.D>ֲ~{;nbδ Wspn i7rLzq۹ eT8C{`uA MyTMd:q1Q@1baC&| 2% aR Q+@m0k\~*F\ZT]>4kBVNU{`g惬GSnp[Bxx$;ɑ[0P/E| M)Hc.,aYPФi `~k!ːηp#DE!1#,z[W]\]7}% x<]$oxeOg{l8OJa_0#9u\'F;Yd(!K1*^7G7*[fA 7xqs>ޭoWv߾o/yؼY׵?)F'GIdۿFV&pvZwe #BUp2*9)X36`zFk:]UKa"ľ88!-N8wI5Gq5gXa?+er67Ѕ6cW@vQÂKh !_7Rt ocC5]vfL`ցX[3.WW98ɗN2F ?r/u}6SXZ@vn dgU&$ !l>Z!~(rL 1-cVuz[-W(Ծ;rP鈈r\0pK:BNչoG_ o;W3)8;?>Ys̍K +rra?yqx1np}?1- դGLeP]N rGni)0!sW)b?:Ȍ7ҟ(ϼ|0Ei߯`LzJ\{+xcǘ:Plvyrb^oyJ9? _L),&KWs7'y!,aBg d  ͅZ0aVZEUSHAϓZkvX:ʌ! lc@=V.%̴=T8 z챋lamQۻ֢h{Jp輹ǯpN<!pdw *9oDVAޅ/ (`7ky rgeGfq[kE0=1v́ʴ%k T{h^,,1sSWY`~,݄3e~b|[8 }6E0fjee O1c烀82oEsɊHo<$bd{sbZڼ/呪;!xꇉ^wpz瑶zW_٥ԶV#O)*kюpxa,`XO%tP{ =(?!b8/)4ȟ׋eFt3 %6vD91iqn9~fFR Ix1fR;`_kǙY,u[9"88?ZM|`vp]ܨ'vr@G/|Zb b) ͢p[1I`-.9m aİ"! - _jWo@+epI8RS]-Zi鷄b8C.`Q@!|! W &o4ǩRPexT/aytk!N+}B'c,u@"U #TW5S¿ HmLs,=mJ0Zˢ5?¼ꅯl{ YeEб1sN8ʍh'#j?sZgVQՠjF+Pʵ>ӌ\ƙvb̴H1*ǧ :p]I|}ͺE5v\PV/BiP0YN ; gU iN8DqD3 O"N/`72t0Uj `s}61vc?N?D<P@q΅s;A -IukXni>nz7$I>T ^6Dk,A[Ih9 OqCI5$y{i_8JуawgN6!+mlo@Y]NCHh2 ZT,2-`P.3a6gY?*Kg͞ Mt& lcLV-i_z?c8s=c;7_8S5u``q 'O Nr~lNAX $YSOZnp>-(}V`h*!JOFOI#.4,ҔZ7<(d§ȹqTeί@ bWl+2` n{^AV&;^)%h\OxwaVBM&=#- *-+acbv.e1;G#m(\@۰4K 'NiNqfLl68UN0#hWVJ!&']:?LZUTF=[IbyESۯ?dB&`vQBN4<'T)]jb^$2"|;_tjg> stream x[Ksȑ4s/;ꅇDݙim=!nB@п~Q,6ˬ_.\].xQ\.~P9\>oh .kuY*o>\d\+D̕nOًO"/TdYjf?vQBMcfPeFM/>Ԫ.l?bE}Ԕ*}P%6T(y?{Z-Foohcxs5@VZiFoHHoxyV[aָ.Ke󂷵n T 2- ăqBзQ0鳸p?c)c-LvR C'8{]e @Q IS%oUۅS7 Ę}zZItqoedL:,a1TE;/rQ!9ؒu Q&wї?^\OV&S)ydrm^+:=DĨVDoTi5 gG UG:9&)٤qГk$`5!S{OrV{41<9p I8Nԉc%]pD?]T1e~A&r!@ M>#ylϏpZ\Agv{'liR_~~Qry#i Tŏ/G%h9v"aJ@H DsC~O"* 84%ڢ[c]?[녮+,tT%L-"Mփ Fd34F &@'+3\Zm0{mG\KISS$'ޣNh#WŽ4Z ,$X T\!M/{%߮׈vqt= ț5HZ Ն5>';^d#$.;ΎA.1LDhR '{PK1@IZX;' B_ݔg b@&>H]U&+ٶ^ c.C?vaxèXx["9 wF>2L8JҾcˣF7’hqj=1MWiIf_^zԗį}+4.-XkJ`¾B{hxщ7BQ({PTm/q/O5pG$3JNƼ@\DCj7tX38i)UXX2QOd…C`͔E^ehG*Vaptk*mu !cn'b) `8jrgkl]N*E/1ɕH{Q`b$C+K0螺!I;E^.@0Ⱦ˚D:-@[nNAńc&j;R~}$Oe` R.1=#mXS{)Nei<"ˆ="-I1aE-P8  ]иaK7aF,0(b .኱SP*cVe9a7kFǃ7d+}b#DΥnt~@XЂ3yGqo1L:@Ts;d'8M7X̖38\}*Tv5+" h{k)ɳ2-7%LbgN;t%YTa-#6S ')>p^ O_. \M<5Z atcIC&1,ɕMWo74.>a+6z W++I] IYQqEnLClтM(ޓ{ه6 :DYj%o`=Nit_pE+hz-@)JC 3*\G/"xaA%u' t;?QY9v':rjʤ"y lS<#T['ջ󍸰3N.=Mdl83HҤI%ᶗ_CtRM~ܘp:~xyaв֮mʤ[Es}=ƹcNv| $wP 0?  62UWzR&ǚnn#MjN^%+ ,O{Wj0Tqzd=ՁfA4ǁwQ:dˬmeY3P;2縏XSj-2 X8R7*MuWKiq)^EN*9hPd% 6{F$ n\/zQ~1qGNC&Ww=[!FLy?E_`E26tJ*_* xew]TX+S7y?.o`sQʕ0Blo7ŋ_x߃s HCE7= iEOL\ED@6=Ĺn\;M˰*5B}5)GʺW~ L L^u~*dkj /5z#LLe?炰m jsV\WWi?G25eXx)e5U4[֖"lug8X<'n Mty}y/R}\K^3/yj읋Srq|NFz$:xy?)hkAs2/WeYd ~7]N* &V.XxS\)Ư۾#ԦigZ)\?5ֵ7\R!#bT`v/JJqQ;iGR!/]UZ^:q:{ES)6\ 1!hP)R}JQvq'P߿ 9H鶻 @M]9 V%3}d$[JE 'A"F+|2{Os7s&{y `_bC!;yiL+#W|O(d<_aABL}MR~΂ 8 <]s.i@4XS+lG/7÷7 PX6{y7y({~NAg\%^ +1Qǝt̚' !dϪY,kcˍ? u*ÂF2endstream endobj 407 0 obj << /Filter /FlateDecode /Length 4061 >> stream xZKsF=8\xV==Hvb+lW*J$%H8xg@ΐ,G*_ϟ5f \~*|d'!MXm /r7v4(y),J--/='ϳHJUчHk6αXr%ia(qg\C{m_s 6dtᬃ=Ow;#`$1o4rk;/aBVGk)#xؑb3QrƖ~HlG9S98-,dP||:N%.n~Iž(%,;w ^Bh6'+q9{$PM7-os3mS N]K&ގSC.=V_(d)-=F\Zydlq]ǰT*ANّ޴t 5[7jR>JaרZ{Y$&ScM\Xm~.p4;씅& [4 cРf`KWh̦!9HiO+0 cmq`xi 9i,} UHpm>[V?v'zu:wf6o&h{ÎϮ]̃p5g_2Q?l;tvnW0T+: g;֞m؝ ьh"΅N<&rh9O$Nړ(0PpoSUBz͍6U8n*vBA<[>v8H[ʣKXN䳶z?Tm1m fB-퓮 !gmq5= b RڸV!>[g9:{a! vutwGٸujM*sd倱DB򫸤=UJ ,wA=XG6WvH!4:anM[G [x l~Tc =NyQ0"c1^|MbQ_'+z=H DT2]H%/Aq8;,I#L iQ}c/ BQ :&ƈh* S(r2C:P^TU|,narSq.ki)Ru z^ٓ,~tֵ\̅ڊǸ;#տR ԵtU*UG| DH/Dt 3+tap/fXɦդoX,t [o: V e.h\4@, if ,r.)*vuU8/"!kt0> uΊ]L8)z7U^feH^"`FPQ~vΦP ,_VkstB.>81jŤݬRppRy[: ]d`Ib6]//g5bےDȍx˫v܍wo AB1V%UC06`ØK6 +F8MKY s+!YZ& .ZzŭI`$k/@|hƏ&ϳ҉_ve޺dЁwb-qcYe$bZ59 G&Z[YthTac!OffX(B .ݹ~Y Gi<{%cu!q:Z6U6iWd`v8.r80yu0]#8ݘOiƸY|;e0m٠Y0 ltrrO;2۾Xά-h oA[ʌ 1FBeDf(bfK== iN _Y5j!G{Lo{hXwCbPPsњ\/}$vBmrM3Zxr уzceZi08^hAl(C59b}1\!4td EB8gu!1|<%(|) H[BUK5?}ԽygC bhn T (K䱯PZ*@P=|t4O#۪B (iwq0k(r+G J݉*҈rb9E lЍ7*)z;[ 9P }^-I/3%h|`84.Ujݡb>!/eaX;34M}/C:䀽%^Yɱ!} tmApF&:!~0 7kۛjogKZͶ!]Eq6= :Gy) 򰕯0^.HEN 7JR<tt8B:RKgHdI^_!|Bdu-&|R8 b"$WR82ɾN_lF!*D) PeQT}5.>7(A`fQ3-igJɡp( !9/346@a F"nQ'5/ji|I}bݝ3xfv=%_zeF/%7d+L)`J 12BUjbal֦' l5tNc6|}V U ӓ` `B3g7A9XS0!F4| nd%qt?Nbo>66 8vJ62|ȝI YP"{!-CV{"!P$ߨ&n \0N˗a, vۻѬ0x,qyc"^P<Ug ^t{%}p( 2~ʤ_p%0FH,^ۙ="7UD=#wie^FQ,ͣd=y-~x^,Ia6Wҍ!М5ِ˝>zĻlBV:IA=ěa=戀m7Hcm6ôu"VnPMgnA3=!o)9\ Ӑ0<\y MlkJ N8j^@ȵE-( +k2[mX"_K! _R,9Y;j T֚.@Y$a:CDO{2a8m"pJ㳤@OؚBrveoQîp)p#8zx9{8m> stream xX XW\B!V7MFD6C&"h7. 3$D}Y3Qc2-Ifޛ{9dWyO~y¼ҩL|8DdN7Kٗ/`\6dۂ۾[ jh[B"[K'.~3$W VlU^pzѪkLM0 2P݋×DDzGFoYm+|w_4}k/M8)S_yu0C0ƍάdF2(f5ìe2'3Yg&0DƋYLb\gf3Y,a0K++yq`^d ?3exfd3vJfLe,DCMgQ 1,}BXos=?CY|2|[梱?+u ,nu 6gI~g2ݾ0p]jr4zb 4.Z8hxA3 B 2`jXD-'h #8*^;-ҵs(M(.X~39(oɬ$K ^lU\4l8D=?_CfaRKh Ro,8—\ MJCwoM2UzM끓FT[wt\p|W c (~NuRFXE,%&UyYˤ4/e hh#k1D'ɴ`bwoNiwG[+ሒ {]$T;Qf д4xx-oƀH H=t3LRdIH^H$c2,)e7aS/xn' yP\UVAf0gs$d%>8Cd?ZhZ309ɶV1mDa]E8_*ؕ\ք y9Td|eM@sX~%-_t#RzI5lgpIkv*"EGtQq?gʄފ9 7,Ag'qi1Qr&E0&XvL Iie(ᴣ'x8[dm^wK*WW nA5 o*.e}|155 i:Gi=Gn=ٓ΀ 8 讳7;<'yI )p +"(Tdۙ#XC߯5_+rq pz1e+ 4]@yP+a2T_吳'E{tmrjzHopAd,Cq`xC6՘73Dx<R3b*²Q\cB8 сTf֌?ۄh9V8P. \V xptfFNvPENwruVyHf,MMwlv<6:5C N󘔑X u'D'PG'&9#R;%hڞ˓E%ZOv7gI9'x6ap,d/N3" 6 ZY?(([FB6I>+ۙqY&~ PJw^1kd"}I>QasfnB v^bqAm^! J אwgsr/A/-,Y׽@.⺷E?.-A5',6[^0+tntt̤}qY)ao?$ KW-W']dqB 9YɕP@q{1?&%yҿSFi}Nif; vZqѶVG RY{ (*{#N*uwctm}]EsyrYL`̫r6C%/)ȫ~z@{MQ *}E1 2  =5|Jkpmy!Kf9t(؟98I`@AÖWК&tcFc?Kӳ4zEgȨӁ󭡎>)*b_!'W-QN-5jÇꡝ^ӃWka5m߰@ou "`CO)[Z^Z^rm91/pe %lUa^`[I=3e&G_"wCå);%&v1pj l|(h':;lOYV2y/ui !.==E.yhB(ݺ|]#R;Dl pYilCF}!4sư >{wvQVep/Sk5Sm_Vrq;-wbT!9jITxǟvhsQ zNJOᘚn6{ qYH%/}7 Gr(\, }Q^"l$BbI:bO#Ti>T@^+-ZnFQe[p?NAXfp>*9\5]OiJ`}g )ZCGR؈$ZS_#ΐ-)ܝzQWv(O(]%s,M*[Q7 &jyp@)ZQ(J)5[jХW@H6\hBdùSyb׾HPCU٬j81 Ckq3L$z`xJL9EӎinJ)VZQuz]Z¹WٮbXB>#&|p#+&u}[Ր]oPq0p d+{aL{pώNVRbq)®K"hF *>S*ozu%Ƛq;T5 $Qa8RTxLE,6:)+2͝RtwVJNIBIBF?j;F'SFks _Ekö ^RJޟ28~Oj.R[nѕadRڂUu%YT:qEQWdXFp*Tz$\s\6_ܾ}%Zbw%(VBq8UϸVbۜZeb~Ad*OÉ?kL1ŒGmy:::բ_?IqQ8.0"AIRړƈMO,hdey[ޣh]t]L6o~涾oTQ\Jf /6ep@H;XyCtv>%ğs8xEl3zCcAoLŋA ] Ր! +?"x;]^ӎnݮ=9](hևVkKjDiC^C)*#Յ'xXL!z$ 3swqyBAAw6ގ5?k_R=N(UI痿RDy(wk2N ,dM~;-$6h%f`A3[ ~B "A73-S[Dfb=X_wUrPt 4R2R԰hYZ ْe؏8sr ^ +vQt $! TI{=-9:U, Sǽ`QqsD6Wi{[ _.*^cy nt2|\OS z.xIv&{S.P)Co<>~JdakjW\*uQ.A#w wn"rT >њU;3Ȃ*0r7tj>ZB#j{wc%"3^[{Hɪr+ ]p JKMԧR!ҸljoO+ym'D,<27@=wC9f[z67Mk3z; /BA* ҂ϡjb8s hN pBZ\cq}^>>Qf*wZiA-WB +2ҏX;f_j֢ԦF@Dgi՟s#I/Լk8`/Tvڧ wok\wJs$cmb_2(qO /"7\M{F]t?Pt f+Y VClu]fFff!#7)d>C^N!/˦z\TYendstream endobj 409 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2666 >> stream x}V{\W2HVPj]/"#Mx5䄀<塠$QVE>~]ڵUj] v'?ǽs=|#(DnBxmfuZxO"q~'(IĘ<}PRdIfa.0'-%Uzy)5UBjV*">1CW&""TtbjNJФg&tɪE U DE_cE|Bp}^|»IQ)i1WS?5E*ZL͢B@* fST06ABJERӨyXʙS_HfH;tS:]rw uyS*_- i$oUd 6.1B0l:twSɨ55w e<]:ꕕ6P'牟lZn| vX PAoaL|6^grrs7?i҄Zt_"3Kejl5$)9j=-)><54[͛p#BiyƍvJG/:rl`8O'C!QDXia8j<:mX.?y8?z j%/T&`qv%f 'c,9q &j2x=I }weroΙjlX#4lS Zpj+2 <\}؟P|c֦jN;ѝ'MtD>~3B>!rB]?ComVl=aqtdؔEudcys5X7f{]G|~ppwcX6tQ.\Ra0-- f߇"#ӉOy5:[vwRYS>03Xg8︃zإ$(WL&{{ }+~S₏$$8i'(0p&gqh_3{>·ڵa4߻JeV^Džz 7n$wHg_VE]ڶa%ˏhp\#yPW3"c . \ͦC~E9fhefCdlٺ-~PPUE}_З`c 6%\V,WP믄nBDq \`ȈdQkkس_@386C>Y z ᴨv8^߽޵CacUr)AԾI@}nG 4{ry{Ax g]$w;{d <$.Vܕ q2]uZ2A#ב]'0Tq >~)Oee%uhZ SsTʎt=Zz=mdKѼe,/=EUgUy,bj6nϞS+/Dy:L‹}X` +;eb[df-զF0A3 M^zqkTqd!15ؾD~$PA4s8 >A+wYVeimjX7[h*{lΣigxk%>o$.bKțC |I+SSELPHCBc?Uv=`nBouSŖ@lZiHY-:׃bdqkϰdw/Mg۽7vtQsKaj1ekwE" T+* 綻[̽ [kun/`Fendstream endobj 410 0 obj << /Filter /FlateDecode /Length 301 >> stream x]n0 yIXH(æi T~vv->qx>˴}'m:Oe\6DzTuz U_F╪okewSG-1˅ToLs/!)] "cm蝱A.pԀw.A5 q${`DzD sqyK>/^^gǀ,ߨm"Jv t&2cH1)Iμt+M*kmM~7 4~,Xendstream endobj 411 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 9408 >> stream xztWٚa;PC{mɽWrA6q^B $ P 9co;cBfF3ggDA7J$]ga9uDsGw;_(~ҍ@5b z26#}ю~H?dwngiSL8kjbdj;7 ?ݦvLWOZ7tW9t&ǭ[07`1vV+<<(^u׶|>]eAvk׆8 ݵiF,7{l3l7fLjtҏL8mgfCFQsj5H̩Q5D,fjOmPmRj"ZFMvPSj5ZIMVQө j 5ZK͢QʄS*P4IqCEQ)ՃGͧzQToʓCyQ}kT??eD%Q,5R ʘT"R)bʀ ^V,!.4ec(1T>x0Lewvz*{;>} {u;W/4Ef hHo T|e.+K3mpP)݆H,"7[ɫM]L2ya^N6!;$ nbD+ 6_]&9+Y-? HNɉxH4oΝ|PHm翻P v9^7fzz j,hzBגvzc~xН,#n XI 5@XJ4[3W.g[s8❫ ; nEm6ׁ`@=vFJ[Ab;ݤ?D'%)/a)L$d@ h '3zE܋PWb AV 5 p VɡPl4 94N$Bm_in $N;kIjQ[ѹ'Kh%<)~`|ڻKnr9I?q?tWfl,HԪsnfy9n.2wKv&Q@Z& ?E+jE{Zżr_޸8,;Srs2 66lJPp@åMpTVFpd ݽ/n l3i$7`8n.RG&\\W#qYTR]ANO;` F} 2> $[޸BRA7H4?2Q,id>mx>Lo̡m3/y=ⶉ(B Vqx\,f=޺G'Ж>PX ws\ܶ="s<M ߟg$h>A>z `q o󍣀ՙO>#K.oȜ D{tຘ/Ǜ]gh)U?$dq'Yjx)xEFyfF=?k⛖2Bh61r]4:q)ʖJ|$%x1.(@?v%ס"_L8n*BT&B,gt/`~J I%૫<ָW.q^inGyM&#z9Dc<*] *5TEU-p !!|Ww~cX aV\i|ɑ>sG̎vDivl+zI8Y`UZu1?ب?Ę)Z+."{e.c*nS\rL60Z,E I)cUNBqKWeƧ*MtV 改h`"!.Det(e&'sj5TtЇ>䅸a|GvqJ Y0 Ie(khlx{] léxt $Vcyj Ix#T+ hr&2h ݘ ({JCX=ZduA$=Ozwi%<, B9+Q0ЬྏoTݸujKz3O;m菄zu-\/qQ.&q/;G($u/H5_Bɣg|tN&Th#z_V0 Ǧ#D?acŒىOpl/tf;49G/)h™U4!o(n1?PCWvY,O;,tYT|UM_wXV ?Y/*#|*?$fA3dB IE# EJopHPpRsdi\ Wfd!ct[YVjHT%2zfdBeQBTin="];,6sxXvĉ=^%^rvY9 o=xq/ Re29.s*EEmmS``jJI Spl IQ JÕih$eEg{ $!<1͵+ GㆌMuZj/^7(C&}++:ϭ#^"EG?BoDAg|-荾 ăIC 4:7jVw( **IJWͮ]NVʵvnU{{ꡦ!ҋyǥl򄥻;Ҋ_x'IZ[B`J8#oQto"a7BK!¢cU|Bn?Bq) ¥i1e 8A~%C (7ᓍY%Kn5wB_ߜX>I*dlִ4)#2lYxdB-3K6DRdo!->dagX*iI;wlܐNj}r/ muϿ-P䆑 V1LJS =I&oXb :EN-jEj"w ϰdΐ0}Fwji/lLd)8%yZ;Z:Ys_T(b= Ő]xtF9zr {9kH\O*ɨ'C12He$$NѴ y?qz ;F<]fo +t);m]6z⻤g;UV%%D*#mbVR A^E !幠q"M^׀ {+HE -0A-.7=[ΡeŠǓ ]-:#G o#1?fEpb|9iu0G3;-'3/xd|*J \[?Sl A5WLPV*=u /BWhRްs kPwATÕr>F-52I6h2R, 4:OA .G"3#3D =+3rc!7% NJj7bN ȜαCe C #l_,+(CPUfIn1K+~0fWMbE&`?:! P7[p`>ۄ]q|kj#'f qn)z!ѫYY o55֧6VFHLP@UU^e-762&_'g(r2ʪ[{OmcG-?~о>2]y7k^:3I< E[4 1hoia ;wJ'h%5Oچj.BĔ/qPB С θVw[{$e 6 (AGG=ACf 9Jw%cW6m̖ޙt19~w,̙Q_$oŴN"\=:E':Hy/MH-s {B'[ѽ~! [(4{ >$34uPWe6H++FVjJǹ{T7O#-=}蜓Kn{˳G}Z'ʩ'\eۖ\zt3^ 7Ύn;=YhRjOw_q~@jYonN#2{ޯ1'M+hm֋u(Љ[Iq)yhf~-EPYIdn9K5/f'0+ 켢lu%Hyx[,𹏀WWHk3H Ob iD5ܸqxTϞH":BŪ,DBPDQCqOuǕHK[C[, $841P&-&@_}RhPo6~Na>!s:4L#I!)LhbMvjiW$ n!kc lE6ϷcbA֞P-Oke؂WugA1)^.YO]_҃SP<ߓ5TV_lbٽ mC#[mfƷ͌'!a/P@W_7*zgn})$w?UB0+cƵ >RF=砢S+O.g眅Rq~,x&Pi Ӽ_BJB&hs!)h.*C:te9Ȃ[tvV#xe@U[_X1<&|)+:ei Sypωc0,^.W0zwW9@dV%BZEwZ3.~աy9iOl[)͎TD'sV^:ݦn,\6wǽ?ͳAuWZ[i]ҺiܔqV>`i2gm஛;BL|V}(qKrmߡM}xA2 mwj ?o@mC+oϑ1_'[\3cb- NQ|O "zBrLvLФ&љB#\Z \ϒ` *|9$Jx9ε( <' :!Bkbdߗ^;h%yYiDt #▮]' _5E̶~Ұ|Cpay jQ.}G£ ʰBҙk!v+d$\iZH+B+"9URJXAR^JM/syIOHIPY{[+unZAg.-h"%O&>z nnQXLjl46_LZ&]p-"^Dxx Q)~:-<SV!vE s܃v$[ ]rVBBBV- k:sr!=QW1*KJ[a_Eam 3?&>11^ I!cI\D~(3SӲ~q-wCA|ċpEi\vEzP¶H#"Oۥe!"]۱riz=Iz4QHƽ G8 ujJ0h$K /\j%yP[-#@ o.)<բ bo_#Aza  D=vuG1bTB$ Bs!C ~ӥ=%}R1ee{ojY< ܊Eu" FJ㲿hw*"I^kvxlN=kx ;׈5+`31;FA;'*9QmgO:fY :圡+H]g{ա/8vW&TBULLbX4@HFxa<$N: u2;'FEbT̤pFЊO =1~#7:]k d13f߱2%?IZRP'rX&>ِ#.8żYWaHVr?$DBP3cR4THCzնɴV*mM)|V?-3Xc?myLm^ A}Uz৚멷kfN4U+k'{X.ؠ YqyW3N_fɲVw$W y!gԻS r]jnݚ&좔`jʹ j:v3_wDuu?­'~oИ}[%R_IGMtx\*2xK4qNs$ Ofq 4u9!.IRrej8Om7#rUf~ OwjA#P_.Dh#GB[P w^zgmw?jBWmpD[}l'Wsi>05*icn*V8D3t(H4,]TxE^>޻-fUH<|p5Oއ+ՏwϦ(~|};*!OsɎ B;z^v؊=ߊiqO czϭa̡-4˔Ɨ F}?{L)%$(bF9UXM~oL4ĆQRb՚.ԷptV!N]n.`gNJ'C3DC4UB]yy5%JΖf^ gr ס:DVTQUևMBEzv3RNW8mĕ( ?^[˅;{y`Of#s<K.A̲r4#ɧ&Kal u0Jq!>?0|E?uz v2!m2)N>XL+7I4ssnC.'lMŃ]FDCp0;NiS)ey\B,SP̛IbH/:hE (_!Üy謜 3 5{-~'N10ጅ0 GL> x' &~a FKp"¸A0 7CސF\XdXعfto1{kIE{-)o%v,٭ /%h8n(ܠTiV6eγ" قXP 'b9|/8Z {ɾ1H(x72ihU0p V+OvK9,St8C8:mL݉- J  +J`@ArϿk<_^U5UhFo5E=΢pRk[mV;?!G57"̪2}iCWs83l=׍o/5-w;va)dQU] nHit;ejf3]X>}B䙝{\0شɦWwS8+]\K׬ISi4Դ^)ӗ?Pendstream endobj 412 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2243 >> stream xU{PW{niy(= (Ƙ"F *q"A!Dy s`dx1&cǚE^dٸI=^ƭM֭Uqw~9(({;JPFnO|ZВP{SW>l4NVBGsTa+&.B5)-s.]7ewrf%}&A ԬߙLMħ%jȗ)?]&!)9].MtR&&*,2J)fsOQj̬/ XtVQ<*bPj5(gʕRQF;0Q3eOeQO>Wl^k?0ݡ^F0SOI+J;r?%.dHcㅰ,T(kw3P?TBFCrN`@ ;mV̆,}s3 26 Yj`ro8s <{0W-"e"F~CZ+tѝTeS3u[zݲ3i[~~FvkEkwqYdqI p&"i|G,2߭A#eq}]=m6V '{+ꁽNaIM1+Z./g?8*p!ۿ #t[=tܩE B 67 V͊[ܹti3y nKC0ɤ464܍#|7j6 vQ*AŃXtt@¦>#cflڈA(i|c>ԛ ʄsc%j.BhlPsbљ lk'CX/DF,*Wbd.PJVo %Ä&Wŵl' XT,oN8oeĥIERk 1gp?>x`P.lV^Ca褢(&BfJU*^* E7to&,!b0b ,VC,)ݚ1솨w[pȅ,p5(9 ,JxH48s,EPZ,dm`VI#Qk!#P^`ekUQFiPp皥FkicN"Cɕ/ b#2$sSAGt=!@i;q.Pހ";Ѵ Ee`_xuۆSwB;1tؿ7[`" rS頩 XΥHoQC4*?V >3Vܮ>U{epjO\|rzl&NCaπB 偁Iq*?6 ';h,Q{-c3l|l8Deu]FP'Nn\l+Hּ)^X8m]O=lJjdY'rڗϮ;cO&s*y_X5Dqha83,?[l ]{$cU)>;t󫲫LVK'ih }}j2v 10bk?k,.u:>T -脵VuwCD"slGw#"d9Fp>4q@͂e˶ծ |]w]++\#_ }3Zėp'uhKĮq6XNk4ZR:is-htguq)b u#rfG'endstream endobj 413 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7879 >> stream xzxWb(! D`мtli1MU,K,[.؀ 1%N Y@:o;x|W)ɗgB3s9yk1̇p8Ooٺ#91,3"ob&q>̳X)\=sOeO#ۓ()ǤMNI͘pE)9S̟),B.9elKoM4%<*6,!zJr]Q{c;w;z;AW'I^.5 327fm, ߒ57r[T`tPL쎸񻄻$/[>{_/M"iD 1"fۉbM"=sD0(12fy!6R/(UZԶ FڴV "9Ha՛h-.4NBs\}rd0 vA! KaG%mx4p0f6|1h|0CN2饝|Jb޿c] g<D1}6_y}N0czˬfofz9*ZYLl6#yŽ;,T٬'2 Z[K ZV\-AW>"aۊ]@T\hsGK}`}ECHh@k஋n͹yYSFb7<Su8ES$ѠSh:M 3:JZ4RQu@.]Bz{@(`{H'79/SLDV/٭d7RPԃwA`FʴBP w/{}za^3Gx1SQtѽpBda.rh. 'ky)1^N wflEr^r։0E]EE3/Wҵ"m>L hgͿއA>yOWV&[cpaCe$}ϩ!is]*=e^$;E[LgDF+ƌITf'*:j ۣ]A@xȐ㭗ZN۫-%PIŻCDzsTd,1SvwßtjS)H:5 u5/O:{}LU P]W,8Xr#1o0A\e%H[K)4鰷X o1&0-j tښ00ivr"@Y#lꦻᘶ HO1Eish\FΌKIi6UIզع$dݲ-6ܤ,enjA@8H.tø2]Hܜ{r=>EBI8*pZ:"^9`L\^ydhՎ]unވHv곋AW}uKF0 FN)N.s +`a@w-4:G.G/j)5;X2Y(KX'X|)[qõ qm9@{q讔 pv7|gGM߰~Q#5%TƩ蘰LSU>|.ڹ#Pe׋ 忤0+u*`.qwї=\>(b RN@-ȿoZvMcQfi!(^*DB.1~ Bp*ʨp-Dnm֚n[ wJBRANm 0J-4o->b3MXXp"{U}t9sa%{ Jyi_d)Ekmtzt'@`:(D! ^+~HNe^oOrnvBrPf>K2#"] X ޔ"ZHܒf{aYȣ'[;˩XPQJvАQoVVk9 ^@怒F\5k%XrthÍ6-&ģm`,jlQOa.p=ÐV׊pa(K f:.l:>cPDd 9W1G"kυ-=$9Da.?XM&+HCƟk eRD`_Ţ][\LEVI Fv[;sqijgpw}3:?pS jBM.zΟigI̭=}*;5 B;6o\zoJD̛?jJHe+ fed9v2u.RLdg4D'P9FnMtSP)èR0\Lw[c.(KAP Ɍp+?\t'ڧ\U W1fqo/_C; e&K٠6{u]c#]8=($qͫpKUT|r@Krk0Ѯ.Ec: *.ykvt[ ٥ƾR^[~3UR .*ʲKE8 =>ڬ2׵ZKǬl~;! TbCا ̘|Y8x㽎Wq ՃbPHٳ&:ӿُ dUez3ا@v'Z97HvEA!a=KAW,h sɯ9S4ʇj.ŅV@_.Mwt:zl ^3e:YA \}d4v,6,a{lb V`hTo 6C廟@vF ^ƫ.Nmtk^8i8%n;#[?BU(c@"RR U+!NKˎ 9]7>lê~+w J^ Zlp9 ;TʼnjI:d򲺺vA;Y9q=IL ٙ~{2X4hysFbd]G COn`by-|a6K28-i"ICIRqcsةOuXwQDO'8-n>$@^T]QqAejN$2#?{JJ ۶n{ΓhD KVsB30my]TGeyNu 6+ZZ]y:}?tDqL}1a\'+Mg׫Z*wgBTkM5P b@_%?x4Ŗ 6|)0<&=@wL5t\Ce=-^+HvҲ qݫ/ nNc߽;1i_z gUff،-P տP $ǢgΝx}¸??PR8C ^q4FGEDZs EakB.KRĥs$Q\Zsfjܔ&_J eYM--jp,h! vT&ewjK[j($q-M~k\:UdRzYi,A o3dc~=Sh֥*ռ&;gז8%L!pժXLN|XQuސQ:^ ʢ Frշ8;v_; $].>$3oco؋͌4eP"8xl=l}_mt]3f ŞU k@\q; yK3;)76'7nݪıE<1WMAk=sD~3Zo0;lT9Az?,wYf0(`pԟ@S6^ hJ]n؉mE#E`9upV{j`VCCQ &JaXLv@~W#يoy ,#D i f~4>EWSUck4XhK54l))nppJ0:]>-dOhXDcXf !5 )tC}5 @ {':Z+\[]H"v! ؅ Y2~B?u?ھtYZ,.f֣>?^!L ѷC0#Kgs=Ǐď&Qν\:} ZJ KOcSΆ<';ꊩ",zϋ|9e)ZHq0rdv2}|YFT618J4mt85Vg,H3" g _[Qf䅋Cџ1no:СDJq#2a|ܫ__cQuS߱7셽y)$]-O.u~DD0@")Hŭ?jI`|bm*l)Ge22_ЛkڎɡPaTEGj!^5fU6hL[֩䬤D_{ xq5e( tc YF ˰KQ#~~Arݳva0ch_\ xDk\7zN`uNH; x넛JxhgV୐ qfSXEEj\NR̞ޔ['oMkCu~v[ٌ&5QEcTف!%Q2Vj_IF +|:YZ[nm>BkUԸaBFYvc=t ,{*F\ܝWF& k% Zqw>RАZVBЈs 6'뇼lEgJwm@/0Ñ bm)ևݘ)Tb_݆cKpc6[NÅ+9}io`,i4*MN]f_ N*PTK*Z MB>hbxw0`MOyxT+цi˩\U*K4pB[v"&f^Ŏ4\NMr}7=<*:'_qQ,4E9_aGuz0iB@M 3ȟ b+mGAS$&h9z}gzBZD@*ˌ%2Xlj"h˪Kpީ>{գ)v!OEe ׁ^﫳p(A:] f8mƂj>^ʫwC:uAJOJ<#@U{M.}5k ٟ*+A̲zLp>O~⁠Q?P}?jFA 6,kF]h;\-y0Ǎx X"bs=0d9}wvW~yDW]YƆlʶ|p(hW^5(ԜR2)z6x1'R3_\2' I K X"[7^F܆+7C 4f3p4yFDE;\zύM.FIT/br %N8]gTFVשE$HGRʤ1j֩W7Zܻ&ی&:u&ݻeEЇeFcJWވQLU6Tdkm$OGTr sѬ\;:T-•1ixZѺ2{JI['&Ar"j';Y8wʞ 9aM @. _U;h]3u Ez[ͲtZm/dC9RY uץdd w8E՝,=8s}Y.e^mTNB=d< 05&Ŭutcyn5rخ]Fѣ\&LH:ztg9XbT2գ `B'endstream endobj 414 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1226 >> stream x]S}LSWvZҔ\#)OEE(EX2鸔"FAAA%,2 )N@/'ڸIɽ9{D+S["uəyf?9g4.xMb,,O l _ٶͅvKvV6Jlz!e9sbYNeWB†s^kbﳩ))lBTCJUB%qK[DCrBRP*EhiB{(%n=JD=^z6}S_jёm`;3D\^N3I MbC30'+DpGͣ[匳x^/}pWw@{gb0u 9xJirF{R`@ςDg_LC?(`{7ttQgųAphbY'U3 u@(C/'MWa_'sd>b@~b &P H{ xoUkpꥺ!I |%lѴ'Y/^9תe> stream x]1n0 EwB7XU\%Cd +v_lm(. o5Dl\Pyc K)?J8Zq~H)?gaNyKX|u "t'N D1o=x'6Sޑ 1m8rUArUҖ49ĹK)MRendstream endobj 416 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6987 >> stream xYXWמ̎%&#E3blQc Q-e{]$V,藘&F4?\?]ʦ|y~]箈2УD"MRعy E=-.G# .Mbm$l i e 9xHHe^[0gy]jnziXAss7KE/RsW7O_ws.m7_oeo]i[w)jZ{Mu2BCm\6]78mVO;>;|LZl+VxbY͙μw/^h{)jLmQSmTʎFmS j6I9P]jj6ZC͡\jzQw 5RE&j1eKmP>eF )z) 5NMFP˩Jj5Pc)#jRoRG)Q:Ď2)A$ћWo/z}RB)qaN 2pz# 9Wp4Z[oI >9ܺ,O_vRJU4`W̝b٠z~FU7˯o߾w<^A|eA oK+J"2#[Ӂj% d>-E˼UB ȶͰ esXwpG S*0'Q2={}xUKZ_Jw91 [`%VfY95~͇V/rSTe.ؔǮIj!C#2=?sXD&ö>w4vcSlƗud^>G2]jNAa@ne8b0+R/8Xs5>9ML㡶o?+ 3"2#AIEb\$/x IW'h'=o'2>2(1pIf#JT#f% xX"t=^D҅Zn ySYw<5rAWڷ#ք@ǑO)s*BJQ=~ TaօxVi,68nؐZi^ըV:TbƤUl*Ez,zȦ}sK {P?ʙzy,8$gӇ{_~zk^X*pBpA)*=}!Gʵp 2itѻt:1LE(dYlHj̧1\0Ҋ`$ |~fgECPVAYFiV-A8ta ukZ3{{J*$L+ HuJW|oxh_Md}YsAg323K3JPb>Vץѻ,iSgO^Y=~d|3)DG#,`8ÖDx8񶮇v} Sp ;]}!@Tj>-;%7=/#&0 ? f d:Z=p!TՊtmGSGgB~ gG`&pxmGc؏4~Q%Ҧ$4Mi~4l2xؐhg1xmô E/at/m( HaU "R!s F/9ڗKPy ?艿  I!LJom{Pu)܇FHzbr39+=JF؍7<+m̚T^ݓ}MĪhyF$kvJE!{2MHJgPNAhɀ@^l #1lաHu}|^}D\?JK K䓢{-xݯ5_sK2<hy09a-x<`?TiʕFWw ț1['d4"^P] TE|dO?(Nˍ!bk<%a[Hb5j3ݰi'KrsQ */r/1}z 3ٸ$K ǹkC2 ׊6tTgd !H㗴wBdxPt4B]a(ԫUh6U2f![ 7i>ZTx,8%l|/'ZyvR qlv%+nwoumn&c/[i4$ƴGe :%Z]G`AAæFdZuD"6@n||T~'];Y tMzx1k:-a;&g7kaׯELa{n4y5N6c[lO5` joU![6vGhĜBWPStz[Suė%mf`c ClR}`-\n"_o!sL)Ce(lu~jS vTו7MJHp]J`|<ob5`I+.u1%hD%=5:,F>O°>ZCk#gZ/I$^v?{4† |HxDmg炿Ҩ6Xh̾ ^:A;/6':KLsQ)M}<[zdsݲ擦]*t<|o23ȳ,B7n0rϾ1y5϶=}ܿ\ )G9+KxnuJy/nڬ^>"A"zG?6˹;dDe6SQ%ndit{ς fciK~6.^8~cV̘EZ*@YXOZRCMHG~c~ݒe;?zս'y9t@LL5^OӯMxHC[[-_š.+<]q 1(!TS_>^wj"y71֋޸ Y\Lr_Gr]k.f:9",#eߒk+l֊#bUuKtѰ+Ν;S>crڻ1'.H=_ȟ> W'8mXqt# UFU%cydh*/ϫ,م9(b/Cwoz|<9g4&F|xekII+}jөSUMUjo _s*:?6k /Ey%ʴN^S| >):My0n&Y)*ӻ"!B0:I'@M<=Q~Q6F1<&:l.$4u$/DQHyL\M1l_lƘઘ򂬌R\jS, gZPYbvx1>0E`^ }]^M@<̻Km'acNufGgI0bs;Xt!FU*%XT2KyQ|(I kh$ ǸJ^Du{ kT{z!@ &?q$jNf޷Y v}w,o#k<]FaS0v9rg8}j29TS!XX u⼒[ ]k=z{LSSc_Q-WςMq> INg[%F#`^Q]WVgZg o:،WQu3y*c=l0]=柰1k0ު3l+l`8̀iW}OW0*LdfPAXCmC<-2=4 IYerY#eCmBPI*`_%vT"{ IUN|s r)iv =:ya_>s39/_ؽ98$>/;s b>޷`6k{ ěT}ɳ9fȪe-p$wKoI/ W &k׭[?h.3Wx=V jIY|[=9N~Q!@YG]JBH-|uaö7;l$''/Uj~)_xw+*MJNJ  KjPFGEp&,(7'7omn,/E+)=Psy]EuYK-?U[$LH=|/^J0,ǿG4Fm<`V0@=que4'Co۔H)zt'_~\B$|S%\%G*s C+h^m-I7_F\:00聥D![/HYvp@/zN|)L3t$$̠ʰƆ.3I%`+}>qRqqO/mT_*\#UCuUs?QbP\SU5PAcK  (wGEEQ 0V+WWffԉS( 3g>4f ՆW#&NH f&kcFPQ4gvH Ŭy楬+923:QEš"XQ]D+F ##frsG˨(^8 ?0endstream endobj 417 0 obj << /Filter /FlateDecode /Length 6214 >> stream x\[s7~_y۶ƽ1f32v}-[1):$mahh4)JLuq;\]+}].o.?o׋ooEI;/7/Sze7F^ i]Zმۋ_>_vmgo֗>iҧ~r)-1ywyj;-W}#y} ۤ |}څy/oEhA.[5mg/ifѭ;)q;vZq4E+QiX.XWJBWʴt/=| vd h-+ yuq_ɷ^w\iGaZ. euJ; :p^4ͥ@Nwi]u#Э##;:,>_LIX?^|4n4_n.~eۧJUIӭEn`ҡ<[d:Fk,|ܝlYd@.MBTa{5l|9C,+}Dal6{##,@ 5K7<&8Vc#Xʄ}.uBW(dPfOKDq02 )| :N@J(zfVO6[㆙C\1XWC.y>%q!ߠw8Fehwg9L+η.Q3V(`F^H&5q6y {l QO hDA;e䨠j "I칅V+V*4޾y׆d7Rݰ&6ō N/؃]veEp֖!A[!k;O79[#v3ӂ WQW{`Igф+_ωmhθ+dV݇|%yٛts2(IN*W;k ъ#RN=C%os#\\ :KfLօ>Gن؛F)H` 9&Jq1"^uD e@0Ao e~6qS8\ԁbDdmǿљ?<0J_Æ6%6mH5{shUyg⸩ܤ xV4Me YFTM!h8Gc4sq'{@3D2VSfL?nխfWfE6߽zlh^ ["X^jAZ4/ǎtO+CyJpc3QԖZk&XrVudy Qd+ TιB441KiXh%1aeRV!v$Hlʴ` HcaVy~\uUD[kR woJNsTr?!i"k%#܆^U_I# z uWxxj !hW辂"NiWׯ_רgL+0tj%؈ã=ߙiЃZaAg [&nңGf" Vzb44YZL+UJUiCh]*h`7  D47ߗL0ry8ac5j߹wYSGkڬU } &׊ǯ{RSD]-iL9fat]d0"[~*uFE}:t\';9_2ոv3~WvoZld l[#(i3 G??H.bXRUlX1 }׊n, {eÈ,kq(u>ΔV2EhD@Ckecӡ2T @1KbPƠ'(}[cwmuclFBh=u+e|}.?"ȉgv 5VDSؑZ~|yB%8QEA}_BS1QPj[ŇUΎv0ҀlGRJgy"QH8 QNqٓqHW  sp* $i-|5H088 yj1?]kIc2#ćF񝎭KVXq`TL+ "=FMfB[U Sf=J_R$ȑY1B硊Q $d)^Z")@Y.ŵ\94^y8QM5 Lͷ*9qTe/H-[BM²eYSX2,̇%ɤ"_"!ž= }`1F @JȴT5UFe>z s0DDtLDog6Wqj/,6#5YѨBe̘Rdv#Nots%*S#e 2S%ҽL2lq\Z~Q=o8-Vdぺ{ZE[ʃx4YmԻR9.@$PGC8>Zs>hMK?jK4Q+۩\̓ӑݴ*l8SBJaC8\Zmp߶cXtVNH6:zwȓ>4lOuQSL ]j&cMIjz _J|cG5!jlS6IZ z0OLaH0x֬i\ogj^CiϯCRQui y~!WVNnXh;_ pvJW>rWcGkoIiNHdrw3LQi.GuBb۾qw2MEf E'?Bt7BMk]> kS-ZJFӴorS<*ׇ#4J)yVYPJ8#@DWO pe\[5}GFuڒ (iP؅6$;)#7\SC3on GgFڨ @ӦII8fzOp <)!QH q$~N=3!Ǟ!޲%)|_{,7sK9cxc}-ԋT}BT%^3ȵ`]nK[1fθ>(8b!p#N2;9&>tkL3]sls3S25v}q@x\x=6+j|eéDk.ˋc9L!M.@[L0k[N?2} 2I5zǨPL%v9Q`%;SxnŊ"*[@'kχDNߎImІ W^EQ|,`r`\WE[٩frtZ)?c>y6l^'ĚT5'jtsͷ }?szCۏed#ȾT\uD NCR_TBKB͗pORő@[J?7"my.يOP[Cp=筻 u~9 ߐΘSA e#P%Jn*Q>YDsYxAx>Bځ Em:] c 1ꥺJ=О i$hKryvhsx㐖Jn$1cx8qw'<[CAOStucC-7Xng!9E_ Kog{aL%G Vcp0CpP7?֔xő N%H=0j80GÝOd!.@ekb v |'FXс]J_ƣjN+afXMШoWxC(#ݓ{+h$0;v-\2^wl\@צcUwhE9$oJT99 WI:p6. QFZ [endstream endobj 418 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3412 >> stream xW TSW>!$*rrNRG}cW"Z߈Z Hrȟ$yHx:֩NJ{˶̽w{ug:+koK(o/J"X.%)29T1cu*edbD%T~R G >gƎnCǠQ(b4%H6&^8sYSEGFHڹ#1029&pWd)ɁQ ʸB\t5|,NMߙ̊Z2&6>a]eESk Z*ZOQ FjZLMPӨ7`*ZF͢S+**Ԇ򦌒%^ҟeSdrZN~|o5rFߵQP{M(p2F]")6?gqot07q(՝Ωk"YktOp ;5Nwh52! eQ|KAҸOˏ 7$>B2Ozd5K7m4CEm>w&_͟;M,l wKB:O_swyŝQdn69g4ŕzmrԬ3g$$G^|*LsKM}9Ϩe=hƖ3>]5 3 *_k$~K ZCIq>p5LF giOq|x“|ao-3kI]M/7d?5I<vt)T4`U=;Ll֊6:rܨ.?&U+Mms҂][ކ`f.:Yc7C;Ɩ{3ܙ6n#&4xh}a@>pfF5ae Q0)@BTٻgoyhЅw. Vݔ>yꦭOOXa5qY-L0 mzu|x2b{ÎD6a_+b0VT<{ֶ9:&x]qP4+KILA?jȐ].Oy)KpMUw[\ g_Sx4|QMt3kFdymul7ԣٍhNi( D񚤒|]ެFjKtl\^ģ3OD4bK<#b@B8Qtܨ!Ia H ٸoKQR?eLiceF{l}Iv82"37&"9\֦f2YHBgJshvUDL}åJ[;Ϸw768[nˁ>~$'ZDkJ|]]{Y*xT펬ujJ4Ќ T}koi6;76/!}}bp0͓eXSvcGmr+!o6s_5O?Vi=4fV=15Rv^&߾|?S90ZMv-),)^[c>bDFw\V|_!w2^KxQp.C YY4L^]ДHUjB4˵ѭmgMp ܟ[i<]PI,*7ך]g7 +@RN\ I~;Wӧ?SyN(1H-6m.R*AkIc) I)t]# Z\G?CPPG>h#@ɧWRIo5 o/]9brI|HxXe"#_: 6MI>kƾu1\~^wew 5Z"FÁQYzMr@_2riPH:ZN:p3b{?{?m?zK\\V= ýׯ p&d,3 ||K#jIQ;'Yendstream endobj 419 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1217 >> stream x]RPet[MB;r*:J0;*7!0! BcP1qK/܁TjN2f(N=*;_޵}?|>|8$ 㦼|'rHp(9O@Z9neA٢uUE4 IqqlY[kRMye닋4՚񚴲JvY-+Jhh ojdfifgf_gą :r}fa6BQ(eYH\,9$EY%sd"WHvS*>A@S`P`UNb[kmGevl:ʛtP8! }iW]|!}st}ۉWi-2pc!vY!(k..@K+:gמϠqx"Ҫ3v(~ @Qpe7usFi4 bρWD W!6*-"{%@ [)^p}Zr+ d(1YdISrav5;}ktY,h9h:byx37m,[ݶhեH.IjN +w|$AAgᄿOStcb+sSDvSXg@RɉwJgK}S--[[lJȎ]u-@;c.{#a+:DVvCM Sv6NrNΦS'?Wendstream endobj 420 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 972 >> stream xm}L[Uky o\0(Ə%~msӖv-P(}2tYۥz+-0okqc#Lq1#}F4js=;JQ!6l*jmZW~^NzΩdKYE%ݫ"tjХ|GWmtlYuPuUVeɑϧY{+-}5.qE\^m;l `˜{JJ’҇sT? G'_92=wD|݆_C5G MGG{#m#pz-+ $;D4lmzjf@L }zKADDozbo8M*\vMrPY\>z9[qF[d&@t;tBّ>0T N/%.<0SCy=3Qq*;LWd'3d}Ne6NV&n༌U^0UΤZzfH44?+c < W΀dlҢFJmf|o]lv܆5:5u)enХps$;otb2Ϸ[=endstream endobj 421 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3054 >> stream xV T V2Q3j] ZW-mD",! ` ;?#KX"RPj(nbںޱwg;}29s̽}H$s[B"pCocw"~ٽg)V`+[h@bh\ezcȆ`yp.[N-SFN-S /Cd `?yL$\-[>olOV~>~n ȍQh?@`<|ƍyL%O;A$< W‹G'Ke0"` k†%)Ao 1'FdDBm1Bc-ngVKI'M_QcTzu(/8 h̃X3O7.{?6L.I& 5gnT"$YgyqI%l(_ 5 ~-"n{|ĉun,V\a_ӌ5#D=ɼ w)ɠC;? a;l13XJxˀE- j[/"µ+ZgxpڽD,En$ue >=z,񨿀W*l3X53 #b"~JԖT2x+,Y@$U.%i0ЪHꞛHV컋, ӕ[Y]j6b)ŐJIdky>@a|gkz-n:`ʥxX@DzrgQeHJ8<@r6t'k0 ̬ _1s2\4#X):{!z#|yPBY"oHw#`$yD-$[Zo[XK7^NBt Rs8Ďv%r3%c; 'sa"zwX #Ыڃk̷MϿEnaþ|-܃\ <۝}(\]m*-+6 U] }HtNN..SLܼB):io45VG7C ]3az*֯NEיx,*Joz~`m&AxBO2 5-Yi2>S(HV:]k&} b3 3K }yڨ.g1vQXUwPWolS6uÄ^)dUZ'E.wi_lЦg%kٴ嬱@-uiE ];:A])S2THEƚX|gNsb<ӏ!)YllR&Wdb~z? fl1L :s̗ȘZ|4շG]nO[,NӔl.MGjζ_v;k)NA!U $&;dEWݐqv&>^*š%EIԮzS~<\|8,=eo/9vn7O+NOoANJ|5T숧#NhB&gh` ":lYcıEJUE3;IQ9li c}c֒JaQcuqg Na ;KPBLZ=@Mh~@eI֬Yb,S⅀mhĻ|H+> Rdud02 3?C2Z]$m4BE!ZA /4azfc5"cGxt&3*<K)^5sEwZg|taS77%OaR{̨c';.VpnssZŅbm>-xE̻1 k2uz(GF/kd6ё 4o ߢ_3K;/ kOh#hcS$nڒ:[8Px G *ONUJh=2y ur9[Y~| dUjB̍ݱҧ۵Ko7$eClf&$E+ɂ}ɮ燢4}S8ĕɆIjjH,ޮn`G{Ǧ [!V*}7=a1,iR$}Vbg] VnegDrLW@-T?Ln7 xP} eX> stream x]_LSw^uؖMeéQ ` Rz:K+-+4Gт,fɲ0f$sq.1nQr&rP$CA(Ҕ5 -VV~imG̓S+ i³"Θ}vW`DIQۅ~c޴ysјv/n` W?__XȪ:YeS.L;Y=OoKv1%VM;} 4$Ogڇ˞e42WMkn[~?R)1j9T.y66>2u;] ;t7^fuvwo@l)_1oJy'j b15aʨ%g%%zQΔ1cɏרlz;ͷsh4ET.N' wbQ/O8/T+%69 \/usu4=қ^w3Lk*8 ߺ55> stream xW XT- (H1!45IRI7p@ffͅ6(006XBDfO֡gZ~>;KS0]Z[DMpD"є6(RRGe8 .bƟ ]C N^s)H%^HۛpEKLNJ:|9D EtlB<_i C6qOkof*329Qkc퉍OH O(nB\}z33nLħ&n{;H2)3󔗯rO>e&\Bq|X4f# aÄdigJGuY k X/CVuxzI8=B&ָySTGH@-vQW?* 4U /*WM/_YLgSuDU,0rB(%BEp:Z]D`oʈ&K~__tPkWYȅXfQ rV-C^Ju`?ę-U5='BW C"sX%haB_A&z<>:PZ4a-A}%xGYrțCN zhGp1Z  oc#Vu8v !yH6 rHW-y|Q+_Zxc t6@N.MvwB*ߪ&w)VE56DfF"m O4i"t]u"} ME6\y*Ti$qsa9GP-tx_=v]BnP"Vx8Uǡ`WWm[ytUy㾖r?HDo-<1 ZuŏiؕQlWc];vu_< 7!i|aA)l>ho@>XP6@Kz|mViJ<ķ%V UMX]>{Qw"ߠjڡ:N0iB%vopt\ WJAUT' -㔹}vѝ! cdd}=Bv$er"=#E}4b)(ݜLoHmoPwD}uǠ^ljm d'㰈!kb+T%UHTg]EL Ws$Ab`'HK}٨ݵq:Փ\\h'%:qtz{x/S]Z tJ#YU|x\i]F,,'R􆜖ƃ-SSldhiX/TJ(1.;6 !Gttĺֽ {C㾧݀".})xyK|#Vy]Zw߾ҧ*x$TF["áfG8Z[]eV[0`N 2 OFP'UuUwI'/hLV' lKod膄K` %XtjkyDyGB -+A&ivrHqO˰))))͙]96kp!B?ӒtmԢ hv"Ce"" bH&G.9jB.z2z&غF-k АZ2ũ]ԤIMIi?Ű;LONɏ(b%E_]Btn^ 4&HlBbo)B_P LӮǚXxWR76L.'$9 Gƞ2cQlSD[ѧЖͰ^1;ö(c> -;7z2#WΝ9g_: ^6! %#JV:(BR3D> stream xu{lSeY<ڀ='nA.SDe`29vi+뺮t=u]붞0 *IH$`"JBL=D}%y1imeecF\`gCI6]gWd_8ܻ @& %>Ǥ8^9 m&#v͚u⹑Q RM`6Sϩ²BcE,6S+ T]CF2*>*>YA^U^?ب5K-b&l54I{xCsQErM-;9|#}! >[O#ċAORyT_ʱ@ź-F?XTety+` t 6G .{Q-_^X9h;4F+H/A9KpLib D+⧁e@CA4q8{Q"ZQ f/=o2`Юi-X@~:#(l~H&Ǔi?;}G{ڠ0) p@|=8nIttm+ٝGO>Ima"@9 j'E1'?tlC>I{xֶh|9ӇΞeR}n5LTno6wA^jwq>.ʆ 91 [k :}Ylz/`1kk{GsL -\@ {4=> stream xcd`ab`dd N+ JM/I,f!Cxnn}S1<-9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C52000&300012|_5C/0whJnIЫOol~}3.,a-[7o|3}x|m|e ~`\XBy8y<<{{{$/.w{endstream endobj 426 0 obj << /Filter /FlateDecode /Length 342 >> stream x]An0DmlHțtEE;ib"^Wezۖra˷徥\k caJ/ovkQ_{%6woeSZ|[nE+qEKpck[7h *zŎJSTI(kbHG256S"6hDau$"j:KJ\Q]x""icG % 1^<<^ހ5DH ]E5kD}ߠQF`#6x->xLBۖ]Gc1oeendstream endobj 427 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6807 >> stream xxgx[e$ p $@BBB({q7ٲ,ɲե#=]{$N!0i2L2X=+'2\w}{~y#x|>+W&K3JsR̞&-8'0c܃|n!%.`SSܺщ I|Rl"]8}~q~0+#Ssg$v+^,9eO~IўԄeVJX_B.f%LK؝.mSYIkgWs 2wIS22ss rȕݩ{ Y9YYL4(+#7Y(0@m^\ERFQt鲒d"E*=)#km81G/~8 xo[[[[{7I&ޫͼ-x[yOysxKxx-=cxcyx!|w||?{7wo1~u&O#q>=@~z{Wsrl8޸/qM܇&dCp]8`TS$ xmgLD_ `4YZ'}o]ԜOu(ܕꇷa*{qGZ "]x{Xb\ah_;|i՗ QQohCO Á{+G]¯\zi֌lZ{jW]pu EVNWiRVcV2xj}Űj .Lt@B^Z(L܏40@! ݇C3x<^(z}kv:PK?s8AijjwxLJ(R˶ztrZD>fމn]_܎9.p}&(jKDU@h>a{蓗lCg#.*VjK]sID6X!Od}WU!ɵt+cw}48'6hRRtJY<<Okww3;p>_o^kkjr@ lǝ"2ECu85._ ԛ ~~z;<@?3痡Oo++Ӻi-rZ(}C,۝hM9CG#^OK+䚊@B ǑFGNN*E8L@p ]VWu.:p3dRE\4eo'qi{PO_}3 1t%Hz疀3܍Tg1,tvBbQNH!wceM%P4L꾓IՙjzPX9t|.tL FBP+RGث-~nLN$l 7Kx89~Sr aR@뽶Ԩ>Ǭ(Y ځU^_ M=w`*9{u_+ [{ IyXQ\}kogUs3]Q%e$+-o;JBA VJKl>~Dqͱa9E:oE W]6zjVWmym R򒲲RE ZSTwl[kA,Ə]z󟃱\Ӽ㍠`K$t]zh`il ̓R9edFcOmin8"G{k EgsJ*yTY gLgwL6/A!S2v=X^:yQ5$a7jmj(u yU;&5 ߥ;Q@z& tF2ր*kQY٘a WڤėОƛ2ԩMQϊ@-~Mì#P @"A4ťE/D# W5JN}m;(W8|WІݬow0=Axx{tw \(cEx"Fpj uօ:p(vƖH/=dozB&EhjCVkY“f+˪c19ID,u Pln7ۅG1+Hh~IBþdk$G66,#4 P1ߣ;y9fX4Uos6l`#"XNl,<.- XnYbWH&YNpኖByä5eUE\inI4D'㷅>UH kC9=t5& bP{MG.BȢԻ4(,*(NѹaEmLFЉ5>]A9Ds날@8/hiDvڄ fJ:!O;v[l [^|3;hQ8 />t;OK``,ul_0Ar_w[lC r((MJ]"-'OotU %ht548} tH/R9S ߔkȣVLʤbˈ! 8AK5L9;.f-K K COnl2i4I kZ,F>eg*;dwn}jgvL)`z~A1d=N'h0~8|M'C 2ӫ|k FbO<~ERAS9Zk/mOⅫLJC Sj. GBi\o 6{rڿf} #wF#zύD69&Z2iQ:깊+ZRd4g:de:# *k:/>:[<@¡pͪD"f%/.tLRZ6g$:s~ ='T,4F5]Qo i6W6'6vQPU7fѦ3vlw{;lnLgSPY&PhS?w 3H"Bqxn,!>IzTm]u0wކf P||:c8{%r Bn͝=mǓ˼֎hW_qUXjT<ў}EXJ_| ZyS(~B7ѣ4g /x?P¾AoXyߎGĢY{WОn 5o ˨BmT"rZ+4x ?ŀ 8tHM nw;½h=BЃPX 0]И/H3Bi!zB#}v kKʱK L I +f`aCRz~H.~wX|lo*5eϥ-LL۲ lYWg%f^?8Y> 2%J Sj b4jpo: +ůgQ<?xw[9ǢYUp4 Fy^¶yYRKRڒ}5i[? Vwip,VN4y!^eܝTUTKhZ4\{ @{M.J«ـ+oWZ5vh6@.)Vhj3J2 i\`.~^g #sTxvFum> stream x}Uip^Y,` M+jM]q$B`j*_T_į,+զ⒆Jp󱟱bŞeӦYl"HdCbg?WRO!XJ\V",+)^ܼM+<3/i(x)ɩɼFf'(.W՜*>;ZV6ooV l+kr~ Ȋ@RXV~bێeD6";]H.IA D"ˑXz +xdy\+{3>*,>CEW|.B6bxKSaf 1}Z*EZtvk<ꕺzRs RU\Zl;m8I5 蕁"[VZ{DKR+PkC8ʨ;w<н-1q*/=3:5`1BD(eݣ5[84r6b"(Aa3(HIn#:PN'4:W CU<4MYB >Q=)#u2)?zp 9No9AE Fk9JaٺR W8h4BJ*)4s׽qa-\II}8y d.E[ϣ71_l9#ОFB+1RV$=Q띀6Nk w@'X/W+iz4|C)Dj .X| wꮌdʢrAa1hbSc< V[76Z)<1$laHzDB{TCv ǡi4bۓxz Hǣw.#ZWB#ZݫX&{54, 3;(6g_z滔M<ћljb6.jho=Ƃs ʤVj% *n[#"] O&|_PS[˔Ux@־J hFsϔ\9p^H҇6;j|MP/Ƨg8&>FS4uL>11qГnY`in4-7% - 5kŃT*qrطofjNȫ 6w?A$iIPU4vb M*Bרt\g[1ѺJhqjiEjz 0&'ōJr L}PSd;l n շ`X ji$:L_qHcp[p.t]m홼4:84vva517}sG3sgeaYI@j%fw:)7{e~oflʚ\k,.hAK* 9)-1R05 zw[0uPlbS&㥍55nRdY I^eܟ}L-;Z"NȺ̎UVpK^G7\> z9:1/ 39tc7߹}&!RIUmf2U 덬'FXHΤj 6T)©"Hveަ%}>KZJȸ~u9e"]_U);⓱]N=Zz{4PMH\k4nC1&&F,.`1y / gMendstream endobj 429 0 obj << /Filter /FlateDecode /Length 250 >> stream x]n <ophhKwaӴqJM}:M;|/!Ӝ/lykMK*{MG,YYӒ5]cQ5GAM8|#7u[ kT1a0O^YwRB anjҖ,#=:cIOniQY!] 1`Y}H'V*>#D9A>s^+MҖ49%)k.Mo}eendstream endobj 430 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1815 >> stream xTkp޵SgW$ 敁@ij\l5(ȶ,˶llYZY%2CvxP'ے@u %C-2S~]+J'v3wg{}$)IYI75הE ȪҺDl-I^ad Cx1z~&z4h:,InUjЉ\mWUK/\d|~].)o0WVZWQK=r%U~S.RJeREDU),&)TPIdCM/u~e[Z[[jk:&YL!)dzY]2/FɕrY!S:yT$JY^RRu5AdϪg-X\mR.q#v2N tKb4!$ ğf2;BIa3:hȌ|2t 'n^{ +DxeJvl0P&c`8ãB^Eqq nP/X?gP Gy~n^VODl;vo ]v;,b~;u 8oA'H%ckKZ:~FmB wF>&)DCARV*Շ 39+nnZ9D.!`ֱS9l8n ަwR0>.DiwPa*\==n/lm4.JвO[(β^ؙ/a.A~lGG&Zң׈'VW&-="U M}}zurɑ7d6qodY;mYS - PvxȖZ z<ÿK0\sB ۄƀQoj%v]9hD(2ițD~7mqHN߄vmYd2ۙ /})OHkdYdžMz- vKBTDy&D{紀L3%[D%S ZRMԉrP=^&¯8+Kx:ZƠO4ep9j `.Å#>mz!v7HsQ@gvB_Jǣ"kJ\uxbnu'dz'ۤ-¯4ƷH>JcwÉ(HPRq:kh o {n<(Laa(NEh=4/\]ᰇFzH4|sne+UG.vhP_^w6ek^Ƭ/pwNof"∜O?Cѥȑ gᆮp7kN2 o n~['׫Jɼ8$/~jԙ' #.a![>,n~-^:Sh{|W!8ݾnn,OH49VGPS?Akmjc=CUꧽ;zyݬ禎Vs1D}v\U7޸Rw2xkītb8 63V,ɔ($am`6zBKs[&o'; D=SaQ4-N > d]^p\nwg0tVe> stream xWipSW,[{J7RV,,V-IO_h}5m:meJyqaZn7,Nː)´dei%i̼촬lM[ec֦[R.ݟW[^X./+ x9;V\NRk ۧ%Kf^-ƛ{[[[)v'" yw C$&}S6NJM'|'/{rړ֔7@C+<}3ʟWV\-]kc9/AHz!8I2{ky(`S$d PV Ou ^hYUVT>X?r;0(KXZeF.y zʒ!5J[ -EIXr ,\mFiIy&aBGNp"]2yʊ,PJC(ԭn5܉s!= imtN_"PW~yVB|O7kXVJ4V `67u`SmE¡Eq S8NC<YC"F0Ŀ8 .ʯxk«AU؊sf`Zys18t'TUcX89bomDPP(KKu-Z#vIc_MT &Sd Fԙ<{b( \*Aoe/qK\>j8`܃VD**iɥPG3 =.@8-HB$O&MB /P)S>4Bh"C?];Z9|*, 9. -؟NRR ]\A1aoXIU u@)JRug;s` 5VQC5V.,ɔ(j%{b?)DJj {lcu(sH36/{tS{K'R68U @ >=gZ@=:Ojk54} nyxo-z}OSR!}ˍtsxDnJtFM쩎>1mwĞm!rbu5y-Ma\:>J7֯&iٔþ`PѲex_xM4o}<3 B_F9 A\krPO0xڋG>ǤZAH˥*9`UP t˒m=3~QB?cIJxqzRZ?49MGOukvh.52u~hmABuIk+j 54Z-#?'r>g^\'_aBЫxg둸C}c8x((z/'=O?P5{NuY46#?IE + 7}z/O'zXf,$"(9i`ްm,(OgmGCt%k Y zݶ]e j";f./yT2hY~,`"* 25!L;#h*޹:6o;xS6Zi,jԒ 2:MsZ}4SlDަus~;h#} Tw~}xj'+ACmP|ϧPj;]dC11w&E͆f}ydtABàf{69HA)4΄r_ʑ>kω>CZX"j+Hw9zO÷ \c'$eD'(GglpىGDNGc>c>{;J#}hp?LSqJĬT N>r{w)WC3ѡ"ݞu-g{zu%$ЫUt6{@aj6 *e%Ȇͬ.5B138m1S_vkj3Pl6RyhH6)S8JVK1NIϫ@'cEǤYr`.ilSZ/xiF@P?;j5h;t 9лb(^ˀMԚa܄:5L6B v7>%u_Ȥ䣙(ݏw #tu~lƯh41d@蕬UaGw$i!8js[T E(];bnf9"dW[ڍ]vCރu i"'_nc@f˺\~hR.-(&Gdŭ{D_)cwQoU"|m~qoEP*9) si(A/c}vGؤiYJ1!ƢVZЀy TPe OܟF?O Ӣ6;QKN{y> stream x\KIrco؃ ́-b򝩵 xb 5#(R[soʬb:]Gd_DFEײEG/n.ׇnq}./n_\xҺαUlaiP˛F./_-hu6/\x|nٵuYrnjq~5QZm'Y6ksrm1횗YKXfmSgUis?^/ Ҫ4 jYtmҢf_EKZ:;e?{(ۇ%XorVVP:cdƖ͋d'o iO3^^ews.u7A9b?L@ZgB9STq5RnܜFBr'qeؤڸl3 {Z-uh:;`hHhD[yG]hC!>nR7ڤU,iY/oA·,-*W4NEOR\ DlYu7ǚ5TWЮ~ -]@/Sm2qZ8ŷYd&XKyi57I^f}HԪm)Е_*hx' uS*7d)M?wT /8nbzǚ 17aZ@erf!#\H.X= H?6ƹk ~BwwYL$araSE# )3I UVlڌᅲԶƊZL_gpD&ozu3/ : owFCҺֆ{ܮ.0yጎQ4-,I*Rr?ڗ*qoH!kzۿo/xoQʆl\R0I!WEo3u>]&념us87%Vmicۇ؎<#w&fO{0`&b seX$ 15Q:q0[v˰C _-Ud5,YXm9.<WdO+ |UkMs>~]#2{ I|h}A|G>itŸxI91:r8OC-@!4&Pl+טck2B9._5RÀ7 X<~_ɺ ȶ|bed]7wdn^2燏4.8P#:)ټ}ᇪE2Ԕ=OH`CH$9Ev99Qu |1:]ǒ w%%@sJA>ARyIr Awy$@Lb}8(v_쥇9$@ I5D:dT" *.8,9I= CWJ q9ha4X@^1S*L {~ʶ#\#9fqpXwt^R@X37k9/Ix;h PCXfAg{+XPp6EHk2- ժ>FlQP#5Y(#q6@Mmm=fc Y[erdkt}|B;}`x7)u ^27r99!} ^ @^aq>s;[ s7 1FIGSw4QhZ)h9_12 >#^ "N3cb!x%ٺj6;{{#O8/yo>-:؛!y ^WɍIB/rKR(vA.Y !NiO$tv9IsCm0^ vV3f{f/0V6Y8h?[K"pxbBT$x-qܼMx]'?eAifa\m:i"6'yi3FDZ C[%y.kyq C_Sد$p%+-[Q)y H 2r?RNՠf+I330ڰ9N;>HP9 ` (;I!&"1{aAdgqY>3J{D88)AY:+Obx*T2nX(PJ'3HEW3`C&qyHp})ICQ_aZ>'(,oW%˄Or  !lI;tJL9vB%&U)ˎ'Ќ|yhNY?qAجt:#N_ c>8'*`t@҈`#h~6d('+O "91C<7o:Y dofÌ00NK>ϾN{KLlwQ{ӢL_Xnm%)l4;T$Ng.6UPٸ@|&k YKW,nPrJyr=vd;dH`EH piw84 fĘX^y:gLpy~a1:RR~N;=~jJgI.͟!l%>&x 1SE H=b'g]!)3 S`_e 6 N8 67[-~74}ꏹ\YS70|T"7mH&0+"!Mx.i݅AOOˑ7}:D ;Em:܅P%\ TzRkȸ ʳ3%4$DM8BTA-)x85CPSgd`sվEEPvHNj_,E޴kx#tڳѰvZC埳frV*w(8 8?3Ui|tW҉+χJ5*l[oΊV wiznZR$‘ᦨwה(vj%WxC㢌 %1]6#yqockU^jzJc g g\:EP:Oi]v )~3-J3F= [Ϝbcp2ks}rHOcR=ND,N0_mUHJ s'Ϻ*!kH S TW'jSz:,k⶙74Z vjU;2j5X_1Ceяp-Hld)`5- S8t,-^YZKh˔m(t6}1HkJsʈ f@Ria8yui0Li1+rbn)Œ]}8h UZz-ȓwv?J4=EAG!sz̃+ uϖN?+R3\\޿ @t*!t ĝ-=>e`y ~x榣Oii*@A Wp,,.-}ZizB!/ݲ<&{c4@_Sj3tpG]/cxsNDžٟ=aIAjz:ë71Ubޅ0{/틞gS_'F^n\:K|*kNe.L@9Niٳ)uPXVVbFyS]֙ҦZݙGz 62/=WخC 3=R!0%"ͦ/ĸMـ&# J"zXzbhHO~թ EՙC‡587ỳxtb_<2p'2.j݉ɡr3;FC$yp`jsd}>)^Otv(Eow],~.(ksq_r :a1$0h8Aix|)_?f˼I!'Y?(氋V“V9o ۤ HLR<}Zs{,tXPX졌ċ9iRw~VBןyڤM\ܴEGg",S&&uW?hA.8[KKt7S?j`tFJ`2gu-4I8\T56rؤ qWUVbnEGtg }PMa1-;Ruw*[hjZ-} ?!櫠 yҚ~q 76 a- Na}rҺ*q9b aD~ȃ^P K ìhmgSݒD:WXk-2}xp"܉AbwrC)um|wױ+ Ix5qAaXd՜3x|&yąm'-p>'!L> kMirkptyV4ShiP𐑫CyET!iC^VeA(]);7Ίnh9܎#חzˡ„RT7PgyfdxV NP#D/ˤP6R]JS7+_$e25}ѧӹOīr`4cendstream endobj 433 0 obj << /Filter /FlateDecode /Length 5358 >> stream x\ݏ#qAVpÁN8y 휼hOOU&lI>lOMVW ʚ-n"w[FpV77Y\նR/nw7wWKzƙ螖gZZ|]\Izfyu>O˕00;"\2._~>l' o8kh/_^}ӽMY)+ wam儧W٭2;Ŀ;a~l0{-bᕌ:8/}Hke@0|FD@Ҝ*>ߵօ̖ˮz9ΨN9Z CWK6n~ $s[gU^yAT+q^W,2na( ѽqJ>#-+FZmDl 29 %fVZ8ٰ5a~2fx3{dɆkc>ηHkjG$m ?T).gVVedyזFҼnf'=y(`{iP{=DhqExvM ' Rtp.4w+aٴIhdźl|/<2 w:7Qsr8ZWQuF q6`9/oB̔RcR}O -3wv7qExxB:+hx*^d< aS)Sp֗ ^y`M),h߹ȏ>3w SWNU1OSl]b^w9x >A.-o}Ni%GZ `KLlXlFr"v8oBi[~JbԂ0}mee. Nhpp Xfm>A,,d"+ T3m!15_7o)rD={#;nh!lqܔu.O&D X/Д( vsMk7@]B68 Щw Um #"e.׾p^->#fn!"*XDkU> f'{¢ Qy'Fʳ E/\NV VNVuyD'EXG#t4Ѫg5#ZEȔ%H Bp0OYH+ÕBH`LoDOKei2#y+Ba;Hc ӽm;;org)FU3w\MJ@UX $vFJ|۱"F3\YR!Qp+RGUPbdȜAg0Agopah`i<؉LF@;!`c#OqnL==nNM K%+9M2U&mטtC[\tR*EiZ g.)h;(h{lz+PlD3έfcnXB^ʧ1RuĐׁW>,1Gܬ#P0ՐRmz@<0WC8t [+/!+,Pp3kYx=[wo* E/J3I&/7o1ўE  dۛ`y [Ǎh7ݍ4rl6cfΈlt|ݹE\T#ȋ{Q{u{F#.ѐꎛĕVu`IoϢ&` J}LsI>i5jV0%>sس S0沼dFI#(r=S_Ǽo Ιv J@+-YdtHV /y[GCI!c0G'tI_ےbSŷ9fK-)Hs+cՂR|m\͒h S7 qUy~iȧkU Guۉr!]wpѐ۰<´s9#*}n/kNvB\7iz|μIoRFL2 QyCbIxEOiUU< g_ 饆OcRϚ+v-1. ٿ0Ā/txKA=8ve auH`"v`356 aVЃcDMo&[ޏ=na:(I7{زxbyi4Kʤ5k`& f0jz/HUTɊ#3w Ȭ6Eb&Ȅ\,z# w!ɫeu?R9& L:nR^kX K~2cF)T&B4:adWgwCi+;6ΎPRVj37>1-/᧲F.Δ5@O95eͽjIx*ο 5ГUETH~oISӬ(ٸb&t jL= Os4sVU=JV(ٺn('wJahČWu [/6vm,)x$7hdiw,3J%Y}8{r]c+=m t?%)4vSB\0Z4!CSYyJoDJyu3ʢ_/C!t2@DkTnYkL8*"yI.fzZe\T7) &V[f ]t4{}{ͤRsT+Ibvx ՅևOAu螦VT-85}Jٷ1,AA$W%vL㖭|m*X i2ux‚2rh'q{Y e05F#L[pLLaE3J%Z/,^_d ku"`8D{Ka,nFgj?] V{˕v[Vl6oߡl=.H=e S2k+9A?~ٮTUA\n`A^t%V *T3c֙461ht}N FHA]hd?mJyLsfCp/#sLD!IN+rHwɤhKUG<&L Z'=\ ٘%G}焈Mlca3RڙF>2XmZBrCY4M`hp.eahz* ?A\+uBI6GoaG]f|Oga'ojtn!aZWVqdI~h&Em2,l#?a.-  ĦU1φn 1=[WhX<~C `LAԓ[}^yϹ*JfL,A`A&Almл!,k29 mp9 ? h:6$*gYb bHp掇A 5HsGȸ!m'i*1F'v׉,$B*AT&4M] ZkpDT\~M:yy3Yy}#2zB2"+$,2t=q!3*J}7 : Į 'MQR\9Sͳ}qWׯ1g6)dԩ;OV*kU%!΍z؉?9EYM:yv`waz/~Zq% 9BѾjAi҉uvB؈Y!>|0(tZ?_WteB kP,@"è[ܜF>ƌFxgBFF>"f^%:p2Sw 6Tcm"Fhz".hmSb<$|8=#*y)d%Ygq p?BSHbx)`p[',':iҪsgrp*!-adp!UDHIhH}NF*CDJvd!Gs^@%GY4ICv~ğ!Ke20}"> t=egF~ ,0FaK>/Þp~TҤ7F|_x3sendstream endobj 434 0 obj << /Filter /FlateDecode /Length 5679 >> stream x<ێ$ubL`A@yEՉemA6| W~hΎZ^w^9dbuLIS"Fyz`տޚ-n|"nkGg/_^oBHs!VKz~)\8ݛ%뙖3߽~,WRYjjf{纟pθ >:̽!'{gUg\⋕g]*#iF%,Sݗ2u9-g!/YP9XHFnr2KGdF0,7cz!$><_jxۊ(dw qVvϏe ͻaMN9p^qo/}Ɵ v]M[ gKݬ`ަзbwS.9:@1|5ETbn8$~+a@ 8yq[_ }VVK`p.{ʫr^~&[Ttw a'o.|!]ba&km鎛%,sLϥw=#]XX j_@J!j0Rf(:f"a7 l,|@o2Q)fVӨWmç0;)h3'seg2Çay|b_?漥_T՝[\qu/ր4J^m L!/8q6Rn-&b~'"G9Gh :T?캢>p7L2~ ꎻj9|@Lr:d}#{7pʻm *&@`2qN|:}Io7}M|`T7[4KQ} zep$Ⱥo{"fɷҡF&(*x"ME+7"9NׁJXh0㌼6ح GF0/PR3%$f*Tp$k< t٤{HJ#]\/nxU%/^ FZ.a ]˱TOU‚r]<_8}oᾰ .z ѝa8M1/?6SJ׃sUka iuy$Cs؊;CSst+P!"?^ ؝z=ݡBb( .DpBb|4* pzM X!@nqM=+rD1Q+`(B M n EC.]zm0OO#\RS`(0>|మ)Mv$b ~K,9!4ETm\˻ 3'38紐VR0`G>x+* R( \u2R[h yŬ6Per+CsofIC09Y|HNS;BV_<v9L%Y/'L!8_^-% 8yLYMQۥ3!F+.ߩ)&'Vz!Ę L20h68Fч0?"(914,s^/<|'m.`S9c"p< )% z>P|>YU.|pUR@/ 6)2 dEE:U|߸(@ Xa("1Ј¸1aw[( ZUȌp!c&963AxgftJ;(rxcLZ騰a)v!JϖMvyjoZ!/t3R>7y,fGx pX[.xYQ1Ҕ,7':[sM50"倻jd,Or-Zl$`b'8K3M{Imib YlXbp6Ys, '+Ī,OH&Jo&[;RS/.? 4q3-MJpX",3wZLJ~!TPzZΧ0HgBū^/lfQ(e\4"T$+A@m94kۢ',1ks~ o XZWØܚ5OmE #r ؉b]qNy%(k(3U&*RC[̘cq =CQ( kObMD+ =Dڀ@ zU=[д4=0,;@jfl`#dv`S`(xwxfk +B=<^a]@Hq2|>n \ P#|p0 R9?s!1DL(V%&< tdj}8H%I4I-Ց{`փ7lv%q}+5ܹ#alSk$5ڥ61Oyuնq=QeuGUb)LRUӧ=8 ,퉒\z6q>m}jgjɛ):.bEb?ex0%։s]sEݰA UQ 1|Hla36LTb>qZj݁6G\nA0 ZV" XWis.l\$u,:r͌ U=])gJ[OtY~6&Ig#Q t0rmdǽP )ݠpd@.2` `_ L¼a#ˉ6L?Rg'eJ=κ0if۵E[<uXnr6װ61zCHu s<([-38R7 bC{T_T_3=Q;|URQSe oW6%8`l{O>Ó '0/xY VUհ\{ɦ%\H9RQ>9֤籆?VN}UE8(eP5fC)^*X0t7qc3 لbn bYOm)QN8^:oQg[:)U~mA-LcˁV!.G尣ˡ.yVsSd譙SVŋ;FzY߬NJflLMT&kP=XqR 16wwd)U[dR=?Sxj-1@ImQF .?.XOw`S&]1(ꪌCD*f38Wy2{Fi\̫)+|a0Ȕ%gOy8q.VˢJc xGV97j_6`39&pf2Aݻ3z4re!pRrjNb!.*rե%%7ο[: 3FW! o tOz~6$XBy]z4)V^rzs MxUWLV AG>(NJm._֐f( OH^Lf#п<›f]=ƪUhhyP!Nq;4{xK ڵ׶%Sh޹%,i:K$6u#;Y}\W)1br$FJ7z68!0\U=_Sڞ~¦'6B$Han;a |]Vp:v9OףC|:9!6BaJEA0V=ڃhKܳ4`_޷K( o=PrW7R^0C@zi5xL+0,ez6ӓSa̝IL@$`{Vr)`4(/o N5`pnva$@8.r"H̝/s%_kg/-F#wZRijv8p ^XzdU"\Op(*w< uzDO.%݆0 ֍u M û^`hOvp<?M%&vRB61:v`ӥb~n Џ#Šdύr ɝ>_chҾ D`ކn-(GbML`Gͫp@ZeM> stream x\K#Gr^_>um*.w.d6l@ |ӢD65,J=vGD>ŇfC󑕯H~X =[ ?[ݰxpE[=>-,[ez'~wЖqU4^ ?޽J6n𸕪?ݮ\7wO˕$ټd~/xc\qL1kˬv = {fn??_ӂL A3ۙE..Ǭ-Z ~4=H.h.IG/؏v{00];8vzX [}?Wr@6??=:<=¦XS)g0o~R/[Jqovr: { XHwe(DӑD,l>W0& S=A)AR m}rWN: ,HV?*q 3qfb0,5>l^GڭQ(0f1 Ree ¸nWy8?8H>t8nn 5*5a<6>W4EЀ0ɦ΍*?#i57QWMi0HTbWL)@nzclԅ/z ];8W(̵zleҢq=8)ҋu8Ը :2!HxYis5BYMXj )ЗsU5ha@VCn6y t@ { σ k(ֱ ~-s%AT{mv\䠓#n0|υ5-槥ce ]niq޻3鋛'Tss3U &\9LG`e3ګ؎A&#u Nz$cXLU^8Bqɔ'iwOn},q>  p6Mَu] w%`K8Vú9.}]m ]gv3k,Ր#L'HE ڭswaЇ ?d.|aP)q?ø1!>sJ8 2LLbӜk+.CX^?}DúpU iI@c3ڤ8µYr;nubK% 2'xu `;p65?kTfTYpԅwJ C~ɰ¨⻧}qlkz bU^{ᢗ\VWeOA^RF)H[ĉl³΃F>P4e\=1tSE @Vtk+6^[nLÉ8k/^ r^f^15-DCR0"L0Jf/ZC<'۞tP $xhfBP8':}(mۛ<#Corkc\;5 dVJ#műJ4Vǡͣ kOճ,Ր[V =lw>gIׇ";YJGKI$4WQjV#`օJ4׊CPBe \5wQ6pLZ[9a`Y#9ap|^0 vߎ$,cBLiflr3RQ_&ρaf87 Цg)Ӥ(fUWDN" ~<MRYy`#v%m#/ 7܅r &Ɏi^ϊ@j_4v |W8R34ghuaMHj`mZpj=aT rQ$>zR vf * CV @`0 g]@i F(=+3w%%;h"yw$P2vGiύ8.ITݸV)ߏ1Q叶8y .$`Ox6|| ȜW!/sC 8.Aj Q2]SEJѠd9N:9%* R6@iVpGݼ΍6mAk1t(5,7S{3n]=-PsN,A|NmыJXfZ !rwmk aC%L3qT~n:U Ωɾy-<)JΨxAPst0 f)1 Z<77*@PU(@m;Jk3hZkO$Dc11Jn%н1 sLI'uX/JS7Is`8l0*31},IDz\0tPeY/﮷s 4͒ YdX&wz8#4R6fi 5=2[Xt$ZĀF;eeZ*mNb,$"5d@+>!$pQkA4DG*N6Ǿ]Ni-Q8aC/vRgT gIs7+`@1!WTu_2=>9RTٽwPHx[2 xuɪ74`i)z80h<¿v  2{70#&kv;2K.řS3=`=yJ@X6$F5>vf9Ar?eGiLg  i⁒Yc!?| ssWUteΚ'fX 5Ss]]fO$Em[L'v/ySN~vE%sв3+ +͇#R|cL 1؃`-W3_ Jcm¯[χ`1߂.d NϠAch ҢX Nu bv"?`Z@YLpG9'9:bHmy$f|8s~ɉcIK~ K}xBӇ ɣٌK0XxNS3-ĬR͌p-1rZ^Uy.BlA >؛lBљU~>H M/]Pw0R_nn̩?+PDBUEEpZe]oO9YS|%g}zFi8Ͷ k5$zDXkE!Xa;I+6I?͘:~KYm48uh\eYʣk䲃{shuURUb[bc4^#I V>:v?o/^}[VC* ep|Bc6VϼQkdgQ3);śeuå/o| 4ne˔dZ֡6e_50:WyT 7.|\\fy4q=Xi{rnor=̟jAnzc!% Xq]p?3iKv8qzx{IJT]F Qh+a;uuH&8ʉj;y1^)LS\K0ﰂ ǸxuLq >x>>FWrI) Y̅í\>W[ nebm/_Go` <~`"hೲAJ*K)IaDÖZ* {05+nE:lKfUaUaQ9Ү>%)ocNʯއ-"Kx Ӡ=%B8m)?œorendstream endobj 436 0 obj << /Filter /FlateDecode /Length 4848 >> stream x\Y$G6<ġa #l@ÌҘޝvwۻNDdfUfUVOapOUVVfė_/X b{Y. Nw?/.a.y"<B ڶ^p)\8K2--g~\T-:|fzOEOpθ/)tUk,`9FΎv܆ea peJ›BM-4̴>WKTJ0Z@|ʋ ஜװR]~nX8hL55ˀ|z&} GoaOO 4"=2\+} yD\p~ ?$kJBX7qUS^%!]vր>^wvni5Z;G(h0ui@ aV/fOۊ$=>J]Y1< Eq8mhG#mA `}aIJε-L\"0Z!57k[wl$PvFYzA##}enx&{#6gl,>4!LDPq(4ފ)XXu.Bat&TYʭ&߈R hUoޔ%9"v5E FUޅ8ll:WҀ#~dj 1kԷ)=9ho%b}Tq>O VL('_Ƿ©d#itq˟]7?If$B:Af)0bFN!hCx 6t]sXBۙAdߣ7ktxrH_i9o]x^M&^!(׌鄀5D')/T!oUZ Ka|Lb*l& .x2H JH%RklQ0B1܆x>ق]nZ䪕Id@*gɀ uӟI'Ń_u Hbe0IR?T D!@PJ,/C}HC^ |R۸V N/}{=h6 d$MTg+#ih 8p%*||n`\Kfc\ĨC, BJC{r9o0zuk|Ǻ]\zś+v,ӳ|A֟ Lr)i9(wJ4 . qлz 6vx%5b\<2!H8>uaA5](m+0caJO}Fsc4}V:_KrɽqV+Ѹ|hH཭!êfO#TI4#)+%Li/D ۓGY a2^Ѳ t`(!`]! Պւ֌yMX(Xq/M qJ+㾸}@8lZTSmL$HW1iJ!) t@WkByɀH2h^ nrH *uIIث.ϦCE:sM7WZū4//֜JpvAolד(~wy񷋐׋|.| WɥYh `l#ؑ}lLQzIЎDvw,.zwxN0 LV. kG$$E qƢ $ke< @y># (/NG׺iմÊSڠ_iߧy7Q  ՘7㴡vX`9Ò1ĒJQb![S4Ԧ,zѾ^11rFkj +ͻMOc$j8~X^(Bȷ&P^ LiUFy,RnY} )&3žU{;,hu[&Ia?n}pBn:LW6oI?l<·m WB(dTZ2i#t]Lr.b@UiS؈ 4w!Ή{(I> ܽLN87e`}'1@1v CfjrޥZG'0j x]R*JTǹ=SVӈRcÓ㊌`<8I1k4#UۗjvJ+B`+#n;JFGy^n ) w7ω/, n+g 9CT ~qng[)Cqے`2Z-+*3 'aLd`;.VjrLaP֚J[_G+Y"@C|&HuV[QHōH;rrO6ٜtXtg.H|ahw"cڲ dQ,?3n)b(mJ 5*i |0@7U)X;fYtX+zR$W-n'JC Y-fCX d}W`ψ%[LA;0yϞXcԝ;aSe3[}݄Pe˘P"1'o8\ŔQxvLӸa5A.!\2VEf5pgNZ0@I8a>W5 sг;_e3[(,XC6Pst? {ܦn3Cp5jn6JkfCGtG&~7ƄBzJar C2g|Tױ$5hte l,v6݅"rrrH.PqM ?rY2=jn]Ics8cU\QՓOպ;4oO2Sisg-A^jm}vZ^hZIS<2(jHI2v؂z'X qEOz#;v M녣s?97H=wC7%1Y63 5Mb=QA)1ɲqe8!ã zŖF 4 c)]V Orvz ))W&eQvJ~slE^quQeq.,.M)vcDEhJ]IH4*䛽:HK3&PK;YĈ4ן$*JȠ.p+7*}~f~dJO@ܧ( XJ;ixc LސVZn*ۻ>a01AR=0'?5(>yӹN~ͬ!釔ҹRX` 0˹J+RF)o!Hz6VIKO;ccUkOh ~r 'F .B iVo'wo6Z̏o~ 7M&}Яx: lډT7x{T,g 4ߩN_.na"¯fqpһޮe 6ҪO]T \ z\Z|<rBdB7.&*<wV* C׳#;ǔ xp1vHo @ʷeL`zhy^t+CFW?Yj>!&{8>S|I> ;wv`0Ѹ褷U9 f0~u3Lƞ$ :9xK,q ?{i'ϛ?:y@ׇ|>Ib-~^K};phFm۴ч.$}}1@-ӗӾA{;B`~ j⿦b-l =4 ·8ϐԽë" ' =Li[Yv<@&}zU pX9Fo1^зwC|3 ȳ2ÌexwG ۘ/ D)y]DgU|>AGA#z?'Nygl84{?Lh=O1Z>"ģd. +endstream endobj 437 0 obj << /Filter /FlateDecode /Length 5862 >> stream x\͗qW{!'~sso4>$+O{&$f8sߞ ѳJ4P*TMײMWxm~btw{ OLAp >tH 4E,< %,A{I8/0㲀&h (ۜ2p\Χ>Wp죋 +q1(%8־H 4Ƹr?O"Ì3m0+d++H[ 0ުwYvZ 40&g d 4yk|͟G:o )ؽ N8KoReIC 17~ww8q]'|V0 J6jmL{8 CxR,QCXuJ?sw W[X_֍8.-,R=,ݱoXaa=xLNF/>x@hk 8rM&=Gvni2%BߎF u _$E"c]M*ޔ`q@# tw|OU»_\5KG RX)ɹF9ymx2 Bdc`V*c%B.}/0iU /$J܁8C]ui}XS?Cz)U^mN+o!$? KԘ:,rIwgF&Id-!1/KHw;{Ttކ[#'^,x58W Gd,'e5^G 8ieȂ)kPn] e%- =V0ԖCD|Tx%s3.5ﶩ!bl4?g qF,%4C::a\ 49]1~'8!S+I[g(ƒd8_0bGJ87_ɈBk~iP0>os8LOR:>e!ߕ|AcR?be_Ng鳔>"):hN=:hy*Voçepjy;Q=^S+.WjNq8,tfE9p/|_4o04+[+9I?&#)-s}H)ީ,9 *sͧa]|oQ!FH-xa[DrWPm-ZDSܥVf)I.Zke:%),#J`*d^򩂤R s!UhKn>lsο&{wIo]*1Q(0{㼵śiSIq)IUae#(gz/HTjU៦HeklR)i^sit78B3U'3 avM"8ksfe96nF(g.~Ld9{ĞPJf2Dâ:\vQi[mSY瓦n%+ <2M\j~?Wirrb ۦVZiMs,jq0cb֕Պ8j${{41cRiu<p"I&*gÜV߬4 @@|Et~mq>,- ],t HYnl [`g|&(.UdBuLٝnŢ00t-s_~;Եȍ_[께+(֬v"!;Po[f jn;FhUt;sE-/ E.7c1}aKb\8oayroWH QH`Yq`L^0̪Tza*1-|4 ~f]ٯUa?dmcSK,w+A^,p\|6}/U519MYa%N*p,d2:Y+gSfppm+ṳէ& OKc;P&"8{CK[%Vاp)g4*~qeLlZoIܥp?i?MH{6LHȯ3hUF@TwiI iV~S%L')$Xb-s&Ьd,rnӁo}4yx=iJ;CoЭgҷa[pypR㖂":,P $\FƬ;-G#Ŗ,˩$7}I.wXK˒͛V攋m"pp>y]6B\^WZy KXvRA,"uln mq{v.6B>N0Ջ ! 41r&+a6ئy30 Y"{e5hb\I :tTl@%F#3B+r :Kz^ 55IP aL J=11 ;ﳎË`\!&,/!z#ؼyV '?RHYSlPT4',!hjU?pgVN ZN= A[`P@.ԠE%|dKtLY|Q`lNUbiߎ/pnP׊aW6xz%FpakPmX*YL=F]̾g>Z $6Pʌr5 g&@iXPWlZl#"5˒@`R7K*uѡֈV5ԧGERk6ک.C3 VrQb[uWBSok $iG,+Lk7= Q+얾 T*ܧT\)6'}k8LBNgZ]Cp !ucMLD.Wq)W2)q#"Y%+P`ʻ]8.m&%Ko-cz"Biy@}?sy5%j׫o ȵ# }e{KfD`Ӫ,C)qN17*=`PtI'[K7~` #S;ԅ[T-jߖ> 2 1:xAxOjK'-'-㬀a],k~ |Lӱ?xRTe. |5VZ׎;ǟƣdhvPZFR(_حgşC-`3#yee#Z<~p|)c.aIϴ G:`s/TVPi}> stream xZvFkYx;4`M$qd:g0IɰHBH8550˩ntKG [flD?_L/]'tt} oGͿzt>VQFЕ,e2'h>!g1*iMuƸkrӌ*aud= I⧫|[o#DQ)b57d iQC.9Ӗ[F~cן_4\vsx 28kG*jFap~2u(-w57Pٌ6nsbYq4iy\qpvm)\ 6x+'c5)wllLF靖̯YM3}µ<=7m#f-ʍD<z,T zL RəSn7׵_Dh 3 {vW,}{'n3xx4Ie+c?S$2ɜ|C8{}WQ?7lKW&6ɋj ofv9"aS-`Ԋm1f]D7N< X:q3QiI@5YT` L8 $3mA_-mUTa0*E9kh7D C6Ty>δy(}/#xF؃` :>24 HCXZڢ,N%pL3=B9@@R fo!us}L@i$Rzz{ԑ'6 o'= Z7 BXj6SǺLx\&ߕފ<ʷys,y3F))A(M?$1Sʫu2$:‚Bҝ:/,=a'TƔ;PXQN:.>*:Z7l!L7v+~91=0N$0`@wG rb)2 *4d:iTs,&@DEt{QV>8UE[ш|M @(ylei%\ZH] 0M*ѭƖ$O^Y_/qVM }QU[XE]W""QK Mab~\ m&ɘu>|]N`J*-[ z~(K̿ a!FoqMMeWԙRuQ8@L{uLo]qrcb0a ,U\KNrY gix|ꭶNƀ- r< gc: zW^#:$>Z{R@>~?\1IrNF^%"wF_F+Ne ̖U*%a V|yCܑ_eus/t dƍ$R! Z$x-ִosfn1M VGjaMYG_5ʌ}2}W:7ڎZ`t9]={6\ P1:Rk|HR@‡$ۢF5!E8哋|pY f"B`-Pz5ò#9 K?4*b$il2#Kd:&]L~CÒᚪG ;8WY R+*ӓ?^j$1D&]7**h, Qx}}w$Hܵm(뭏վ&_# ^X P~/UqUAoQ:9Go0Y͸ E}QvZ4٧Gou=?ﻍQ/bcV> stream x]OA  ~=4\Q?@ai8/PkdvvvgI?g&6_> stream xmOaǟJ4<,a^ʶlՆZ rv}1|L"B(I,\uCc^:zoֺ>/{m6^}1j!ѱ1N!^ӱ SY X)b ʷ gihG'BYt#i@86׈'NMhOi{! N~<>".7?}uR.*x#h p9AOz>jBz jxQf~釶#uhTA=<0$c ذ( T-mGaptT!-=]Ժ&൹Ǫ=OZ`C2KH7~}a5Ys뙵ZGUH:9sF|diτh~n>[vrey+I۸E0_Ԧ`2hMLdIjU;,g/ \ʳtDǞOuCm{! j*=}zsѱb[ٛI`ZwsR(d1/f7'b$%9oyendstream endobj 441 0 obj << /Filter /FlateDecode /Length 214 >> stream x]=0 F"7/P$.6@2p{l^ےndS'K!3{=B4Um৏.b{pJhicqn׭~U|$1xC)K8rԠP (Z]ڋ.AY]U'ځZ:PXQ k-wA9An3g&H!/4&y)u"endstream endobj 442 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2057 >> stream xuU}XS@ѭj t$:aS@EE8B. S"|7 _Tֵ>U+ ˦V{6v֭+p$E叛{?^xz0""#c We4Ii䤟ϟYMuΖ~$C.~ J<>2eS$ Rs?d-fg23y2)5C7EQZ62Q1*G+Tm~:hss:Τ^719#-QMW9O>9jI8X>u>vutԪQ՛7Do E\*51C0]Fi @0Ls0mB>6U4"8kkXXM7859>}a̭晪}gԸO&%4cQFͥkƫ'0 b/X0e;}f2H@DdAVTFy<* U*TjP-C5&Ԍ2di{h %EciYlS5kחrӓzԟdxB#/}?esޡV,KFޱTya$7mDr\/)*̦B]T'Fp =U@ Xu-+w#&㤰8X 7&HXA*- i) xv0xz猅b"$$ m[;v (a #Q*Z1U-'[A6 Ǫov*Q&n3i=3/ș4ғp^rMh!_+GGx>]Ɏ.)[1|7b 1^Oʤ>**zI.=X"ݗ(6Q+9@`))BXE|mx0VtI8߹j@Wurr`6+"/'$B N# b !9G3+ E x0`=NDTy {XB\:"}QaXW99^K`^֭}1KV9;x'7  ~zyPg||̽2vD+In$UW=Oz7WCp,\ilץ?6ZcirN'~ř ڿ\ f&bnR%HK o; =TdтZs aC6{1cgjn};p͢جtHCR@5Xe~/wq5q~1n<5|)e\(,QW,@ Мo0;7ʴ4lbIsZ؜îHL7OhbXLs/Vל;U{ĝ(2FtmctR:`濰endstream endobj 443 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 467 >> stream xcd`ab`ddM,M)6 JM/I,If!C<<,~ }/=[1<9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5 U4$(8;(E5|}a^}Kku.)m|1|t &4NwBO/bz]KgWsoÙU{gΩQYU]W.7o;{Z%*K˦j[ӿ\t^I=}WyרZ5nY3t7= zմ9 %g̛Pq ?oZȶk?s=g7 ==3zz&L?guO}209eendstream endobj 444 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 708 >> stream xmMLq#^&%Y!WpV[Cӹ/ *$"`'P4D i\|kmyusmޞ=nF֥/qAs2¤~`Rh55-Z'KCq%䫹hz<FoegX)i @J5Wo5jq#+Tw$5 zKsPTD~,([bZB[GZ9QQWZF/-()/ PhA$ >.ƾI> |0'egΞx{Y1,Ӓl`imتokp඀&"( ve{ۛWpz7\d>A7E}VuόQ|YޔGAtǗ#qⓏ揌0 lי]{c&Ig|CA|t̶re֮!{(> stream xXyTSw!iզ%m^jθv9nj. , š}$ QvTPkݧֶOjN;N_Ng]zyZv:}8 >kxĔܰ1\tµy/cOKGc!AS?%윅f"(mOKԴeKp!<4AbQ <7#=4>;)tݢB7Є̔PqJhdrTֈw#BWm`Ǻ3AO-^)yG//HؠHX95-="#3k!Es0b3,GDVbXAD+D XD !%b-x@l$69x?;DN>HNz\C^V:􁇲* :#'80#q3?50GEm?\?uBJcf{47% *N[)΋+3J ZojzѹJY b{)H6E%j(Piv M@VTB-G&!.@V`0;薍 7ⳗ3B efcRKFY^;>y<w rTҍt3>]f.+0J\`9|Zu+OGD]q7TC5iE lT0k;( */Yj~`n47#>1 F3h[q\`Fdª(i靗P@). we~?*e "#ce\"Ժ2NG_".K8 Xf#kv}VN 4Hr i/r }n z// H$Ŧw%oOԧ%41 ."=G>h,)6hZ:1NuХ yv ?gEpkU"ds ;j0eTKں\PܸzSE F{ “7($CSiÈ Fٗ6'{4z,3i:,%jVM_AÁ j]lZ(/EK @2- |Yp7+z9j?14,%m3_._yx%s:8xF97>n}T#Bg,+$rܣ}Vʝt@=Wk;*O fvѩa4&-`gUiAڢI)PD*iQ%{o;K9xDoH19jQzuOts|;Zn(wT)2I@t96Y{aك|kɔJK &31"D*oTˢҢ2bSei q}ꈅ9OT+=rmiKTϞnWMй+M 5lVkm+G%?}/9~*8xvHKJNWW`|צܳaGn~롂g~zbpT~q<"吭ySŤqW~ƼkvL"ʬP;̶},N}`©{YV zd}o)+AQN9cLCXq;q7B4P)g,4# 9:qH&:SEo :tl&wu\ P\!ʨ):m%hM=}_}~ PG:4c.R%r(]Wre¢|6LXvoj]܅,.V9ܜիc qSiZUИQ+(hlGXki_U>k W1͜U{PI|[.J^EI<[xOU^$<ҷ22תsНЬp\nռᕟO`NFva(*dP2l/|'NV-dSG=l}rͩ"[DQzAn-7QYzJ}}Zq:˺*?z>9O2>ᣏ {| rk) p?kѧea&u&2lg*@ʔum1!yy=I]yo 8ÛVX ҵ: (kѾ&tчoGvgkχR\/gzgG:1WzƹR}BBEWP/0wX,%Cn n 7.mgB v$+(@哶`4GrsymSf&*NN/x*uM\ZS4+{Vvp^g[AC~XM=?|E&$\~~StR &&7o솱lYVnT0kևO,4{tg, RYV;JF/;_'Ӓx7_?aZ`ieP_4P jіY۟&:;MWʳْNqW\C1PY>C*!\O쏮y+Y lʄQ4Ɯ=QQs8U(5TlXGoK4._}P%3nȫ(?+KXr.ycU_\&qƩWEj;d64xj\]m Sϴ$g\h&E3F&Ű , /},l+/7rký}XJƔHx"|wo޽mWFQh. (8RkDs 衄\u8oz`duxwpiFsztHFCQ]S{CcLjEڽ}sq<ۏGDFRK;};` dF+Tؽvjm,oh(oNiw"9{GcQ:J'}@+ }>G-pE)M:]wVCp\-`u[**d~!O7$#p2^8y㳿ȟluZ:V~n/uCsv^%RYk[y#!{ދRYTFV}ޙ"\ BބKmhvps[Ds?w9Q(!RFe5,[6՝{0zGC"` moaA v;X'W*eD߅Iwq=^Mؕ.km?DM)A.n86KK9T1@endstream endobj 446 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 420 >> stream xcd`ab`ddM,M) JM/I,If!CwϘM<<,~ }/=S1<9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5;U4'841)K!E5|ek~63~w/3 J+WwW,\н`fz\0%~>eꄩgHΪ %ݍۧ8{ČnYOϲFҲ3` Lq ?oZȶks=g7޾i='NY;wƴ zxx"endstream endobj 447 0 obj << /Filter /FlateDecode /Length 6162 >> stream x<ێu1oueQaX)qX^  ڙњ^5{q"RE)gVV4ɺ:uO}յÿٯ3E_WϟC jnPW_?J*z׷qW^4__vͻU74ow7ضU.o0rۘvvJ76k} / >9`Ӵ?+,(|=:6Y>V]=~>J5.|fsכ:^A^q_I7"oiO`tg tHP9失;%f#8͇4<*ku/f=7 \/ pMQW7ʴ7#4?%֖i?ȂȆTYs9<^Ç. /[EӦΚ#a9 khsxOX0삝!m3DNTnNf,)w9=?{]73 ,bЈ^\pIxoބw"_ljH$z^4~ In$eyZ?|S*'m @!(iS~)Y)"6e$/U@  P9"-VO? Lj%yz&!ĔZz!wI\p#rΔAna\kf(yMR jȎP wBVh4'?x)uG) N4f۸Vi0O$: Ӭڐ4Df `{',8Ӣ)}CKj[jM4hh|kէ3HL2N}nHX-.mW_jC]N! ̷x}@pZ8_M;fj,,<KR[ KB}v}cq@1/{${PxoL5!*2ҡ$- lGaT@5Fu )b^K kLǸsʉ0pTz? /6lf7yTDZzcƨ+@H߱-7 YKal tHkn~j;'7:PGCVf,Q.АPn{97ϰΒr ц JbͪoTQ7 ؜޳`f _(O>HAv⣨֤}=S$>Lš#gh`^>!5b[9 JOi=5&[w"3 | ǒJy3llZs QHuIeViDD9 CByijg ?ѽRǶFoT O` y \j@!M )uk Fue5X;#I%|=QR/wzЧPR\͜N#U 7Kͨ}$y~_TibJ>'{I&b U=.Â逿%Hg~ƓB yG|hqs_|EMW20PY݃EDZcm~zԚ@k+ݔ{+r1r2"&؇z䖗T^x)$3 5Q5L TgE #@C8D  }O0i=h1QM<9DHbQD9rBE+.um2GG ϾR޷2`Ym H¹h6]U :zgu 3o^4:Rv[pW:q\Î0 ^9p]sfyW oSU! h "l#(K mL܏o#*! C{&9)<;A$X$^8ZÛC7 8x:x?1x )6=)_w3`Dq:]C]ՃڙN<]ۧ܎4g X&[XѢ6eME\OV]BWAf.x$PRh2EL"8kDB~'`iqiƈܟdžb1; o*=p#׶O%قd lo< lrTm"Ep#W=fHp0؞*Tgy<cl.x XG4|xV]H5q|!&DKj>O 5I!OL9$O]$E6P3y 0wnw <Ӑ~Sz(xIE@P_uDئXǒw0<3û-2Q1mV.] C*Þ?"kee?" Zi3=SdZOK󙳼O|$+9J,P Ù35D$~rmpVZcPKcB-H5ߤ9/,Lpʞ /ŐZV ߥfJ5 jSeiޘ/bMOZ6ѐ 螤lǪV4">.,]]Sgr6#A9Bܑp0CE^rRMdNОyTsz(TK L/EUx`9a=ߖiJ(]IX-F1JDچcgF3Z+zJ*ľ`zމkÐ5 j=nT3(āh>Aa +w7rXN~)} ;%*GޣZb!ImFz >f8~wg&y ;`{CoK :ˮAqkWԵ`c ~|ܞ%f;M[0 =i,yiHb4Ɨ҇Sx7eBaƙgd uf;j)v2*7Ek GeQ,i?ALX"~k~.)w >k/PA ג /u¾(r(0ӷvg[0/qL]YyvHEK^DY/B,Q`3ݦzθI@KWvGaDL`4۳gz $>׬Bi2qGE61'MPӌ 2faX%FH$ʚ a67hcXkϹqHe7 HX$ 6'..dS`:cHG9iaіiMfJSiܤdÑb,֞1^I7<K!(v@!BEA( Î-d֮7ߠx:zNiܨYrti*=v!b`]30r&2ZH#LZ Y°ME9}~/R'NX'Dag v[8 kiRy- 8-E+QEYZNIZe2h,.l?ڧNe2 aaY<lsPl %V&]* 3;9=#,PzAjo^.|54*8+|rubD1WXi7|C"'+E ml賌TlЈE[8V"ژ@ܣ|G糾XjCH+E׊<΀kJS )#jE![h}P.dʔT:$#}L5Ya]L?).etfƱ^A(M&U1b(FRJZ1xib]cՃR*c0w٧.f!!0f{̚zZZ bc!UGq,oR6Fqڔ& t:۔y!3GS5o-?5PJR1ۑX՜ȷ @>r75Xܕf37L>9pX{ָ6V6AnۖV:LU]gE>%E1?DdφNdQp)Q7w-Waq!Ww]Hg|]:n_Rngaѕ9@,!Or9 1p؏$ڇCwos V^^Rj;?(؀Z475X"G=qQ[!Gq=a0]ωB\~Vwɝai h } ;qXr ƨxOdlGX$%RYBݟ,G U&6ۤe^<@UJ'qTc-姼h B"}%'*zq>ڱ!G$ۨAysd!QS0CB;śm0!r`raUT)Սhw-=`1mmBʶ:[[lW7t &D=N>/7{+9mD6VBT°mDܧJ(EfM0=UW`Q0N,^ή\ۻp~K>"Yk@u拍DN6+أ򷼓&>NekD/cv6 7#aC=b9 6ʡ̦2rZa9)`Gw \$!lQZΦPn6~h*Lr͚{;,+q,A˱|%hNnf>,rW#FG`0Ŋ-/wRf'ƶh'yWuLbw3{)A$7 0ɜ%3,E>5іl7_ H9Yi)HCꢥY81"Ո3z91Vi6 Н1S|1 d)-śdNywutݺJLB(eV hcu[]]J~L_="]Pd!%ǡ-NO$uK-B' W-N=܁{|C%S v{hW!E&#]`H .;Ӗ,iКeN t5:Sz w \+NO+KMyXa=Ǿ6KмLָu iX^`#B ttE^> stream x<˲uvQ\ &T^., S½C E=4 ₸@?N]S]/n~xW?] mӊݓWWaysխ2'7W'B6-xinri^*;t]zPJ׍7m6G շA^fC`.ᅷFu9/t dMp3t7 :Fc6Zi6_HZ )Js8]8F7 V%L 뼢˶5#Kgol1Dpb9w7lʅu $akc=ɿ>6'6DnqkªU%Njduzϲ[&* F8Ī/ρus\&z9l x,i'Kk <7ad^Ffo{$o._yZLTc̿@(4 ,B^r)BL=g86AB9g8.:M2NzeFe 78u%-Ojnm<[sT*"ԘR4YXWa ܙ,[G) (Fta#ۇ~-1ĦT@Ȇp&AXVRK)JOgfH|'1q' P+=rA,Lc<"ЦNlC'" *5 > A!dLwB*a3ώAU04݀F~?}inW>rhbq[/ɭ"[ R]*|YM%g+-ߨMGa͆Eiʠ'#}!KqI oArGE҂bV9\EAB"Mwt(}2%{sQ($"2s!lŋ}pJD+ z.(6LKLɣV -dB L31$e!) CE/ cj$ꘘl\1T*j =C* ǴA;G0=Ip䨬ߨs#bWr0xWKFn&nJi~_^*l̤&$)=/u`#> /+>lr=e0I]c[d!AY!_%!1w $94f~V4,d 3꘥ HZߌSKuy205T2 ?oy<QJEha(n{tlH^,y&2> K cnW&X;]G-|wzU.'rŐ3#.PQB$73|RCGZIܥgp<7rÐX6 nʦb+ҹL0ISz7*E8X]t[3(c ˌ qO7/^NS)Nޚq 4N-,'[)]6J%YRB8ʓ)Ѣ͏ql1܅UhXxerugy:b1sVY2uLZ ;$o]hJs-+%6S\껥jB;̎3T=Dtq #L1qa&a("$ F,[SEV,4qy0=4K`(<ŎV^XG7LZR[-!D7\"!Tv- KGRs]!V 6 WvF q@@m("ƊAR֕g{Z;ۚ8ˍu<6<6M[}.㛴eLhD:,hMZ66pRтLmyW{ VX6S<{YlXQ<֢!, VCo{x'_SusH +dVRǰ%/jv=pYws:J~Oju2|l?- 3\ްs&FZϒ 3T|Lйg8de aU?%!9Ka^vĴӎ<ƛsd73bZXe\WEG+:lu U| 0$*kyl?Z%S6Vwq&4\-"!j!nt+,$;NǠ'(@4yn]4In<=[UVH:F- geY ѯS2gnjFP$ÂEIXJNM4Vp>5k'5Sm)EMֹ+=|ؒjEB6 uge`)_:fSH!h|R"پ~7ἆl |<[ 7=lC] 󢰂OyKSV(=v[ ZzѡL] El}-W%p6<jg|V6BRVJw*TibZˏ|zLB^>?OAs.T9ǖ iCcV+q^őZ=޷'<Ů d΢@ჍZa@\@vؔ-c㽥dMo*XWkX#N 3JԦt}'DC1yy2 HzZZ @zCσ Qp*/CH-2ᲡSceKlP1FUfp8ryCQNtcّր{Nk 2arX[00w.å;_[㥬wz\UJy A~i7Mүc._x8aGx?_r4lNm<0?Bzq7i B1'1 DTW\%f$;N)3Kr-6xXe6,$ MIW.|;d Վ7݅"Y7N Z&.Z~  \V.Bt+EeFP$ S*UјW S}MRV 08޳ e\Cf\ZCz/FA988i5 )!1fhK we[;X-rt.q|#f!i\|hyނj,Z d#(l,Ôe{sI>g>yDs%4^DwEj,S>E6 kT~m^.tan03%(t-AUwgc }1W/OYcm#`=+ǀ|7)D\ ǻBZhBŢiԄ[SNjj7ftA0 -*&-Dr4W,;c\(eQфˉ+bDlJѦa]T@5܈n4ކj|?<'$I -x4E-E(nN}jxJ a-p&qÒ;Doӄ2؞'XZ-fmΑ\ˌ +ؚ:Vjâ2 ?v!7kS7 +4I/!OX'/R,:ڜq\TIS4q/D|M%qM01,eCW-cbicGl 2hå+HM-YO( +?F(&ax0?kendstream endobj 449 0 obj << /Filter /FlateDecode /Length 3894 >> stream x[Mܶqڇ0'bWl؛AʁJ+3Ky8,t7!gvtGϛ_n]k7MFMa-Outbsƿ+6RZHiM[wln7ϫՍhdnԍQh!jSJM+۝Г0ՈmT9W}?6BV_IZ]N8Ҵ俷J#]`Mw7PovZNkձ?N;W'84MuꏴDW8d?ƦRðfG}?1l~T[ck}`ǡOh*ҰFSw8]X?XGiZn e4q< iߎGRߝl( 4.6*Opa_zSMJ-Ol:]@|t ûr/t%_gya4病+`OA@zȾ hǧmDJPj:vH0iiMH`%6`s/|d}7rPVe?/UK|6>m0&GA+-0TP[k 64960 Mћ0*E- -VNB6L[ޗҦNĴɝ{ZVVʊuNՎ*n0F׫` b6IUfDo@(Wժ!W#VhвaRԭj{U@ H-նq扅 ܜcXKD`  1L?(=WbwDZ3%X^h -׏t0g+~V/- Z|! VsKx5T"%ŷw2!ID7Gl$( 69D.Rjڏo}Y#g~,jwtZmf|7UvR`"s]-]:3Zy6 ld~HoFW{FV1% Mt~\h]dnCgD}:܋-d;o1Z\ME(2)<%"?Xp\$ Q1މi}oc>^BEJ|n"^`x8}`dKC ܜ\g!9&"4:zewUpW|>MϘ32.cNt_SHktا*]R!#2a!\)PlZ]JŜF?YyBs!p&xzg,ߘP|/?T#kS!W堵S!*DWh]Flx>Ɣ QTYOMtM脠)J`K!dEvrTm }yaqoxrEogB>k=y.:NFbolD6Ǣhcs"E <O;[^cA-@܃mqN<90`/(TnD~n'O:ۙaV1AShz`JC8Bߓ/Q +7q \uJ3b ֛ے8t]G8şQ+yU#טHgvv+b$ZP$v[:ʥf.ڃqpbW/Z{&gI.գTa1-e<5m@SDG,J2?1ytCC-_O; #a}f~σo\ [4hzpH K17VT%endstream endobj 450 0 obj << /Filter /FlateDecode /Length 4929 >> stream x[KFrE6Ph7ӂkC Q]]UUE=o߈L2a/eSlS>9tWp_7?n`D)Mn 67Wn*۔lcɭPU/0q.tθ7wW?e-B Bfcן/fNRfs4gfe^0mh?JV2;v8AEQfue# B9ݞ/@# %"k-R s?V%m(-uEp9&tnt_v{i|eus<6(zN7*MVDg:oQv5;)r.9ߠ*:=cvߴ'w $0Yw[T#x <{ׇt}uO=@-ȁť- ٿyg7;Lv6羭qkxwj@GچDB0E2P}:P L{u/?y E~̮͡#zS*A2j FUO46Ӕc|Wτvtv=4ǻadt-oӾoH"Ʒ:zS}}ؚ6TJ;W>L{%o[PE5ꊛ~J4LV:%T3qeHgLU+Da!w?3g9d9-2W$lʁU\քyqyp VjEk돬t/Bf{? 8  ap Z#6lnM:.u&{I6@k2WŻ{"܅K ..B!h` ՃerTP3K+2\J,&J2^>9)Az-Phʬf\JQg;dIh+( X DaK@$1 <7‹{tR+LE-R0x XāSTT*n"_]0¢Tm–pXe12 (ʉPXTs:2yo EgU]blinxx}9 biEPp0 ejD fcBxFz?Y,Sʳ^ara6jsr1x?'kMk>%lXj!tHvjS߀r]TFMyQ)hXRt iPit~CHdlpƀ##=d@ l×DڅkoNbøT:R#'?L6hb&:EW.ɡ6[(HZA tT/[WĢ/{;Z'_@]HvUQB -#p m1L -'FYyi"(^ʒFMu,+:0 kHt1\?B,,0 `O䢝c%e0sV(D]vuʰ V(Ptưsl ":wnV"kN5UjT6,}p:ZW.IѩK]87<)b5vB IẸҔ;#qeKضx[-6,k„S@CK8GXhB+C9ma7M: & w>㾋S}^js |ic_~ \8?6+&[I c2i^AWhPR:&\WJ`6n 7,+ޏM%x IVw7cNr~:j0%}lv~@jIJSGdvO}k,@Ԥ_Hd#M8H}{؟W\ @]}ܟDyLnꀗQ̄īr UXQμJ"&S\*]HF3#&ѸS8UU?Pr}!`:,zV[f& 494汭=[_phsݵ"U,U16ef7(Dԇ%>nmԗBGi^ }WrKObͦoV9cpdz za BU_h U.\Y\&{['!ÎJC15K Kkˠ\!+V]Leb߼S`.Hk`kOsڤXk|h”~÷XdLSw~痓.M6՞v!WW2a}N m [_!g-~e/TacV O%@0rYPh#Ҟ4lrÀZ90CTyJMinQS\I<*/Vpk"gs2vSc{Iv}v YGC.3tcCW\@u.> stream x[ˏܶyC[2ȥǣoi4F$H](==HJ"%-|ܟfUf]tYt5;WX*](|f\q>3ʔNylmY7󪬔0ru|>_!ʰ:b*ִGX1^lKZޯ4H:<#"3 ;] j^LA *qsZGHV:sr DQ#$Fqoi4fZpo8/XOƷ-q*~b:2wm0?<JLij@@LF2 6A r(% #傡HuA &z(pZ_ (h[\HCYG{ԋd1']Zu.c){Rjx>r¦!;JR: `#0+ˉzs󩖎bk<=/R)MpNk!+F*~%rkJaGofZ>.Rx D{#זD<; mJ oQTzdT`Rt=P9IK .[lUV]U;݉x{9-Vl$ u :wѸ˲f쭘/#46Q~+_ҌGxa`TG$Z@MjFm0;-F5qi$Q@„ t`[pnzʔlP^ U:* lj)㉈I*1,ũv45 .csCpL@W,wnU^(;vXluu!B稗[R ý>f{:Y v(OV !C[3$.Eȁ,Vf989| J \Q zl_e~ o(fw6٠\mG,f\tCwOf67!У5P,xԭލH|g`A(=$,üZ}rjҼC|YF660:K/3kR$[qE%B_?9*ȆuR ZACVv '7NNٻ'JʪV_8i>v/Cm!|__-9#>)Bك$0m?H;DJEߎĞRn8]&hr"bNfߤV /ɾkL[)򢕑Õ > ?m(0'̘G7] vj^f^*?Q2mq G1G3<р a$rhm\.ח.qԓ"vLlJ0O,h옌[Piǟjdd*CY0;;U6Хr$Z$ `)UɘldNU>7>* -XPyerf^a"8_M3[!:,"CrlmYt.VwT'DI!T"pI$hOB)Hxc6=- |deitE3hܟͦ-!i>/zT>-?O aSETCuHط-p5t`*ڙw(F_Y5QUAk_ʊP[z"*n7%͌v-fixc?ON)ߠ_$pP5; U;ʴˆtd(&@];6Dn5F~n܁t(~LR=c,pw&f^~mpnZ]Gud0.gZjd9T`PJ]pxج 4?6;N2h zn4*rG}X ϓ aA` _9sMXMLWX$gb݆PV{EKH*j.gIE.L=#ɤbPd0بqING/dCJzt08 B;WImZW& :xtpLAHv,p.ܟnm{䕀,}4G=Qܨ;H Zbߙ1tP/mpσU9Af=,@whg !X_lH6q":D#4[KfP؃2t#q1Yw .-y^SpflF#`c8\}c&x;bĕ'WGgik5jq*^> ቉|`6p1:חZz&ktCߥmV"޾WVYDwKufa(LW%T1{7CbqHCb?#8\Ĺ">^@Zc.1֞K*-,?NN8Mm HقSGl^lͻ欹#0M\(JX-_GqQRFV݋Oc܂Y? 9jjUqP'fY?q+ѡJq/̟,xI_J^7"kkpUMnηW>?wͫP]囧­fwi>guRXm^ЭV?L ]@>)hx=ZT2Y20a:t}~qќ6׻-Ñ/fSy ̗:^OyI/?-eb7I~DzU ~9Z؂& K|Q'h14G :J4ZE+J{+WMEb1t?1)endstream endobj 452 0 obj << /Filter /FlateDecode /Length 3030 >> stream xZ͏ =Gnrg8@b $=0v-W$^ g(mə73~˂K_f}݌0 Fo [Z:_T:7t.,aFY`d6Tef6ՇEYkZZrX0o }>/ڒbɹ(J! /~`Kf*K6eȾ (KCw;d%{zVtف,JC\8s G>%ҫ;W~-U &l*HЂQFʴQ^LL,y bT2/Xւ~zXR}}{J-zS]Յ `J~nڻ1\}́).`LZ,-%$'Žf("T"NF0-J{8>K%td@x_\d\.kw{Nf~7,RZ yxԭIDw<|0KZ|EΌ OҊLB`ɔ]x#\|a(h=iA0J ᷂6S޺dCv.h0Cু!I" BKA5Hݾ[ <(IF=[F݀\bCEԅbNr0Cy܏ K2#-S9ɘw^EA#p_P9wV팞E]z 1(MepbJ1=1,lD): l5YR\s-[e1?*B Cѡ/ I8(6liDLQF9\H8ۉ|2*Sh>H`tN:!t'׬rsjsB^o>a6>%&,4<ڄ" N/Rgy986I!Ql;@mB)&uJnԹRbօjkuޓ6_ʅ>{H DHB*K͡a?\''l|Suq|TĤ롾1t`~y9`mօ,#VT6cWW`xċ6<*ҸFH9.O D(WD)~>Re(#z~قp0l.4Iy#+f/Q^&k|@L 5Yq&I*U`@A)8_So T3p`yЯͥ ʪgTX7'nפ&l$0N(/B CRx6W,_]jsטp"}~)qI𨆢X܄L)r?v5%6:UQu_B{$> stream xZIC /R2 $ېۚKɁr-qI%{ޫٜX60dwի~o)8eO߿fz ozsX$=ĭ|jK1; jf(v/&73F3O3T|ڟrH(S/@((4ayi A-PDF2*:RTYè'DBVz0 r Z\rj7Z nB&_J^u `2~ztr n i0R kENːc(btn"j>{OnDV)}l-D62nׂ?y#eh](F :f >_GOS~G1ϓF:|p.𔷯C Q6Tq2?lwA,*$펲s?2v%vP k*mw9\PĿ(E\t *ؖ(oO<hAGύJ &,>9Iu:ç6T@y=hTCvY>S,^Aj"X 6 [[&*PR^% /+ (SYxF:=v A[X\u&P+YY#@`@)Q*,-t Fgoxiq+vemG mF%AUi~KNDf1fHƾ(euܰWr o=jӻSsU2!އIxAЖKgn.pZ2yYn`7rs.<)KzX!TK@8}'cS$$BưOt%SPoFeu!Mq(ϓ]wt# Ĥ?,$`fB`ءTGY t9rZwZ_! "C~IkսLJ9]:n~b~?^n uk-߬ݻ$N0[}4=exw:a!DƑ_\UxR0RlZᝦV`@`ޥ_W@i-n!4\qO6;(}C'N6a땼׻W_vl:דmși-"KIhUԹ B/c~0'X|j/?52NVu]d kn[soŸ櫛9]>yQQh7 IA*~'xaXovI=yGٞGuNnuuu-#뚺Jq4a8Y{ihË,di5!00}BN@}1_k7#;o"#PqǡylaA7^eǡawyug>Ǭz+wilfendstream endobj 454 0 obj << /Filter /FlateDecode /Length 15308 >> stream x}]9r;oūEX$kC, Ǭ;r$g=ſݙ眬j6g4 ŰN(|$}('2ADoY 11c̀LqPn"E_l\/?{?3o5uY劾V lץ+(!mg!@|{k1^3$~u: B} ҮQscy]*7oKH#?8m 5""UZLn-BƵDH#R|HԔ-v#%HM 1v] S p@]bwhm@<Èt!#t#+3cv*|QV4\I7R!143BFƊ KMcҞ%+ rVĈM%אd<ЦY[63m7dPjGOJ] {2\tNhǗՆ=$L3 EBuErW<&D7TB3#%OoD;2CA"M3'ՕsC JMuADj7 È4".1 <Ū+ c NѪ0 H H&UFV(LIuJ]R]U!\ԩ{˸jh&5dݼc4,Uq8jѫ |.9Pu48l7J'2P*Z@,ԁ#+P[PD&6L+j9P]MLYUPZkZ;W Ce|G;kn(ֶn: "QeJ!UF\-󢺹QBjMh ]9.w0uHeWWB̜簗?T6f Tbʡ@U?B-8&I)ۖLJK@Ǣ![!eG+y$CE-D0UlsM9#C(yհRZi'oGvY򭼠33q:H5=2WcDžvm.а(ѱy 2ob i:ckv" ́zΫ (@c.͈l0}ΥB|1yfh.? 3DsCN~!e0h< d..UHj.wm8Iu"dhIGhr\!0յ!a6ҿEҠ\C08j9e&laT"-sѻX0 [ kbG+15ӡ[!xȋPlG&"],k֨[`*cjqEe`8E@|0r_0 LC-+N"*u/Q wH,Qۀ/ ֖TBaڒG8{L@x,H2=_R*:Y+v>50V$B[BYQkH^Q8Z-ԯȢ#T},FU=tEfmUvv,AE-Y+pIq 8>a+Ib101R13SjUb?la<0lE汭 9wݘZ]tXM$?[sLah.U[sbLRJcju-yL>]hj5(fjX mQ5AqMJi Œ8Xa`_ ]m2bMJ0!ԔS_,{슨V.gЗS,R׾.)~0y0x"aתxu"a׼" zh֊Eoi"aط$ W ԧE3y!74*CCǢ[xvX}wlr%&<@,:-VGv7*s^KgDL,m/\n(3cNP$1' ӶeH9\#`%>wqCM~yV'EST4 .+Jwt?%$%@AAꧭDWK# l0l4.:k{,taw7S!6\FXqRTR4JA9OEڡ ?%g]YxD›{ *WAN 7yqH@ N} 0Ӭx 0juhi%@ֵ0Kܼ p+^Je" @h< ?$Z5D&W$b=\F7 iEA Tõr wk܈jA96":ָYgA%P΄E- ˴kuDbRT)ZtBKnkQI&\ oP*peuXJT[&T6 ԳtPI:a׆cwS̃>TxX4Bu"D113Hs"4 ϩD .d <O>s4-ex)Aй2B \.iֈ4TKt3eqp:h L>S }pӵtV3YpshF #]!YuBKxY O,tA Ci_ʜ8ː)+lٚWY ˨H)*JZҲ jY;,'L 4iW=as3O}[zO4ӥNKеF-HpסV: 4 « 3QdN4#Dp7t;M4vf3 Ry;A,ǵ8YR[ϰQ+*s0BFsU7Io&bqY, I0kRMܪS΂PCsXP/՚TƎIw.]`8.u]QbbH_-qU8Ra:+$\R-n* Xq+6a&XrrlTKb*R9|X4fg#C01[yC㳲R9:]?|#Chb;8Lpzl*CGA8Fg8\orhM֊DSuuYhtsVy)P˩|Ua|&\pT: 'EܑBaӑF/g)9"\՛фV Ոɩ#-g2Bњ7G˕i1jrV68W)r*+ȑ؆F*#Iijfr̝DV][\SQe}Ź\le˙̈́׵5Wy3td /ZT#tkYND&N6썷f`D&G4DAq "3C:G"y0HV eՑ f`xp t#0va14ӄ ׇJlWDjBKJp2msfL:G#&drVxܖՅo˥X]1 ˑrE,񥖊c#(M'0;j-[4TkY}[sLE#L"0Ssg(o}8ԅ1zuS\Z/q11EșZAe~LY !+G|y{ϵq-N b} &zetX+'FcLY25ŝ/:R;B/A z)lB\ZrDәٶUq܌k7Gp+n҈td*nqRĽה$jsFf"| ,! &q"HX2u:VWEa\&Ԓ5wdgĵ5Ss ;8$Έ &qE>@GP"#Lt"s ڈ03JmiLl4E K%r[ĉ\NIc$@8ۦ$Ωx#JLw#tT ΙG梊S4"ù gq\iƙ62sCymfg<hJ4yK9#3sQlڞ;K}ϝW8-(+;4qav8y.i/v"*2zNgUKIT JTfϧw 28Ō0΋=s%Jd9JP@N08S9nP|pHs."4!Li$%ҜQwS8w:#쌕iΫYT)ps Jvp2s'ДsTT);DsQZt#Ϲ.L گUb gRV\ U)>O޹#yS%NV~z{+2q-X bRiʆ5Ndf ZY*rOffffBhgdU0Ƞi;a$BK_@ø]ɐ 0I4~dG{iG$ce z0uAx 3Ȳz@A@cb`^P%_O ꛋ#3ia*2lO>G!Õd~ FVHQÓ@D %›Yh%9d|Q3FmU䚒 d:1"|!|Oґ D,L>-NpI6-[CƧ[5B6ڀd;d|iBU0E{P`W=#*#@.-M NtOYr=P DDH=L!vl$n=G'q{Z8:5P '.t=T!1̽V6=#x{ZU|O lT5?%"w&=#Š&H֪ԩ5s=--LƧKOb|Q&?A'tOCGU:Е 'oJ ilO@`{HҨ|=H&*Bh ZDD&ȞV2N;DzIfBS Y4 DJr=-.pZJr=-"[ڙT]w*ҏ\O+g2\O\O+#5@D"$"&#@Dg#FDN(Ɖ @E^duq=dG\O "ikG5HЏTO+j`zyh d拘@"6!F k"^'aCo1~ *'V3*@ndzZݿ#EQ$C2=Wq- `zLOer?\`LO 7!p+ZD6>/Z*2=-.IOQ2aXD?Q2 ~zZz1TOGF8q[ 6(a›PiMzB#kTOÁ&օ\OBNp=-'["2ȞdiBZDp)b{!= q#"+zٞ@Yx=zlv"jdۉDNpxF(J"ٞ*̠",#ٞ@Ċ#"&ٞ>q.tDLP=q'n pP} lv.(<h HZzZ8iP=-4bڑ dɀLM]pTO O8}zn*Q=-=bd d29Q=IkLTO Ъv D4Xr= A#i$&%0 diz$&_k!4$!jeA\OCڙY.*-"+8>LNz3\= ~KGDDL3|H]DOLM9@4`z23&#@&"3qH2>3|qR,M؞.6?b{{C"`{Z䐊ډEè;+sJ'~${Z$4T1b@iuaHÒ|O ]e-@4bWYb*Ca 3,g*u#3QG=ΌUir/tOW{wZRVE1J'ٞyELwL? i-Mb{Ǵ᫏j.5 b#ipu!Inp~ji/%ɞ@n${Z8HE&Ȟ)Z[=|J[U[dO "ݒihb DDW=-^D5%HR;ApjHe@pf2d=-h"= @L.=̒ $ {Z8~8=-t&*'.1F"'.jgtt%t {ZTL H"Rɞ~ibހc2YtO "˒ D=AQd{lOC;H'*r'@D$֒{H'7*`s.R#@ȝ{Gw=k18H;=&Q"*@D$o/^+@$|Pi"QxH '"/L2I''B'yI3$@'Q/5|:r$|cIfI"^6@x6a>(ѻH"J0)@0:E'I" )ʝ;2H4Fiv'*. Z)B@LL͢AVO" D|S*2AhT>@4ꩲ3p'7M +b>OYw1'䜌N.PՈщdeCJ'$jvF$ytE iHH"FPdҪxRJ2̽ %9T5*iGmJ|Ȧ2"f/RcW+n{DJRTDYÈp s/g6qd$NR/gƌdي'j.Oį!r\תˡCMy ˡ_( EjJ| ɣҚ.G"7ɣ#֔w9HMFH(~?HUعikˑ[.Wv9 +r,a.{#9*3G$'/ӦAN.:WLȹʺb6uπDc/{z"7NCj='*%?_"7yW:04ۅA~%2y(EK@d&+O0>HG *ҙ9j}/ID^2.e]Z"-iV]d]ZlUڥHK. Ü F^rU 4%a9; 3 >iJoiȺ%Pd])dd]4⋢D/1HK"qpz.[~+~M`|a;EA#RU-i TJ'5e^#)9Sک1*.=2w]ŌEeSm2=JFeKi|M4d*`7pZDWA%*iDZZz{a0&@C虥 s.戴"ߍ:Ғ_)*jiЪ%/ӑSk@/"9v?CeݛE{3̹ Eȏa T\U@'4O&Z$a$%CnwD@˲{ ` nĞ+UGIlMy%anyvV"4Aw!jicq4#S(} %g/ȟ>|0E,?Lw؄OmjMg4\qyHݧWGuћ3d|+/Dt٭yKcqc+ MfbCcW|=$TIݙt\_9yn5"i_ڰW#WG([KKRRvcsB!Cyfcӑ?ls}Fx5Ykrlq'- EoH_6jyH/r:0BBmB UJC_ vǧ_Y#=̀㇒חҖ˟ތG.bYEnk%}|-^m+rig|_|RÌ}[-ၛ{e曗Oo_ oR޹`Y.{ww,v,Cmv exyvyoՓ^nX݂-^G;/d¾=Ը #q}^n^݋_Dpײ. m%lEj݄{M>*R_h0~*֫d67`"\ Og)WBr8;od(㬽_7/8n59 ǗqY[x>v[km ;/؏x,:~V~٥qyعr˧W2zKvy Q5݈K{\ k~|KxKvψo.?SAM=W?KpQf> "< Jɹ/Q>d{cb?hP?ݫg֜*cZ\n-cW._얾_&.={SSOe<}89d{Su9Mww*zo.G=r[ _O{ \w]~ .=u>7/;>OaVu;v[[˻K-vg!u wt9kHZ6za0n𩹲ݼxlC utSǠ|YߜOqTg}àYqLQ%a?SXo0̈kML)0l~cn߅G3/S77~?2|˪Ox?dN#S^"Uȏ?ܢ8{Ŗ$HG©ϗg^y{>u?,DK`>Z>H4iOT?-jf iVfӭCO~;]}zS7x"\R=!A7n(XRh0&sBwp\\NCJ=DֳOgTv%_2{qweIq*oW QiHs ]γu{n9e{v'B)MSaFr9s6G; i1ꖽAXqݵ^V=_ kQ76# @D/pݘC Sɸa} {+s6H>_xq\;D!T:풠Rw9>~ѩǥiEoB]P.\ꓷrxl*_p_ڷ?n7 Qmɉ 8vғS}wkd|=PW|k߸ύW/9Pf<︼>m{ -lw=]JԶsEgfE^ε޼#?YB^y ?+Pos+\m7A~?.3d*}l&~^,Z7>~='ϝH3 5$Q7JAyo Z*2U5_H|rYFsMz /j0}q {f)FeܺyjNoK#;~v0!~]'◎C$ ADđhhJ0;B;KRZq]çyۘeF{ ~x^?Y[G:.w,70B_fxۡů'UTf?݉>-=,gM?~"n5|)򌻊?8Pݪv- "M!"e<3γԽ1iz/ƒe_s{2>^f՝n^?qnп:n&gx϶È*s{<@Lx=z{>~ro}l)..Ϯht'P+:>8ql =}zy6X endstream endobj 455 0 obj << /Filter /FlateDecode /Length 26587 >> stream xm%Ǒc?>5ʗl`=1 X $)wMl߾y^n]vSda9[YyN?9^7|݈7ןg'_~l_qu~=ߔg)oF7?泟eQ^~z:c|{ןQk{=_QbtpM}]s[8_v>0ӿW7}vf:}W}}st{:_挈1ޜs:|ͷVY~ͣh|$^֛@@~F! ?ED~.Y^RJBk!} q5,DZMWu'?V! NTukQBkv>|=!!zvWt+V;&>Ǝ}I%:K*0?{p=|-99\T_܃jZ9ڈSH 287YS+z iEt'Vv~wsBgWBNűHĽSO!N]lhHѷju!M/j$r#i')z*Bzz"`^7糞e3F0Ձ#oj,;7T5G\b37'-?/n rYv!nd%'"UwLw`YCd]!⻶Џoκ "t^\m '[׶"Df"OmhWcy),ZsN KDF`۟ cB@U݀j}=}Y׷ |mߵ^S  (iPmԇ(gDŽxU o >뚉qݷ n>TZ M]؟#/ |""} E/MDnVkîEfuU^@Wil "m}۟'^vh--"*rS 䶕V鑷KlK&klj?^hR1<:!ySSm\Ӻ!Ed8h0 `;݆GeKm 4N vCz#۲ X4[^ͶAݷ Yh6а`mXd {o#z]kv`U1tQ60DfY9 a"_$hW1Iޣv#_1mF.I= j~rL{|L6~]}c36޵4\,u=%"f)]HW'$b'"C)*ˉ*ه=kec8 _ŎVbw߅]7BO;\fW%icw-fvk/{ɮi oOwsݽlrHAbk:DC2{kG71~*^R<Уt}OE`)X:S lA7Rԁ3<+؇V75[?0"FB\f63f_`.UlWA~Sߠݫ.EY표lfx' vlSA}^c,wX nfkDќ?-2F6ifp Zk@gТTh-<}%@1jUu90BGb 2rja(|Ea'[ta' :~[㳟'}+FmUv!Ac +1 z8F70 N4ڎ G>/]5G(6FfESsvBs(4#'YQ4lV Fl :0 cf?'0z킶gcUhC7MzѧOH}'^+X̀!!^c[`mI80c[|l{C&EnbЇ[MxtVwq,:c%vOw" ]. ļelgz3فpfl/6VϽ|=ὒ0Vr09؋!` wLs?|i.zlkybob<|ױmsqёya4ck^3G<4FMؾD5ՁsٲL9 =m'|Uܵgˤg'x.wSLafs#vcOK/\,4뜅r͕Yh77Y:Xr9ws?Nzq~.جֈ(:Y7Z2HvaOsm~`;O>t~2x>^WSx`s?{s3;h'c/I1bc'rϮdXU dPsvF; ;B=+#wәw@B~UGŎ+4 { x5W g`0]S>fplC|&%I&_n 5fMd24"1z\:Lx>!2hxau<鷛5tЛY ɿ{; ;؄%}6@Lm䶮mً{PBzֶpaӚZ v xm#Dӵr9^0|kمhY'vd2XyNF;e5AG6dT~5_SrHvu,KmoJu3D'Orr\ *d)C]0SMP7=6r]U{7]k[фh ߤ7dx?"cmNCWgP6/Q.vsӈ:ʽ%*`B, 6FSKo]-#p^F߈yB.K~Ǒօ@";&&ǣ0CVdM8|t2⿶a |+k1va36 iiZ(v@-ӄKQ9Y@IG6Z&Z6/{Dgf#)F,~Hڣ~qCS󔟷F U iGF-Í Y0F|w9!_oO\ TzވYF7̰.FI6R单/c"c WvO#YTG)8Kk~1|z8je`E=sp0nS~j\#f[zy1c[|4(rpTf{E?5CN^ Ƞg~+?ڶ<xPlsm#ބd;{p1f.C -zQކvT? )7x& c]SCF;Fj{"ա)XzKlES+k\G➇l91q(u#:cIwʴvMN#~)ӽrSO~72>(yO=ـNXXFGl2m çDYrYtƢ8m] 0ʍ{@h}Yy"flb#ܔ}n/EDG,#ۣ.^[:G4;+z4m#tĂMT54 mVGSOM}#=RDlSX M%b#Nv]Ȟ)%G[&23 ?cixrBO]? <1`mgOV\]o/m:`AZڈZpTMG0דc.ZW90>t^;N14|{轚:9q"ȼNg6q!ׅTOh>{7c#ws72|ڍ,x{ۨhQ1ɲ ! *Gz jLN-bђ] 4".&8wL#Bq1AШ,:VFdL ~ctS9Njs_+C L=MvED61{56 2&*B\LRZ3Aԍ0BbL&.&]E>ۈȞԮӖ~\#w |bn*ZTe"FL\#yr;RCqpLeFߍ})`+W#2gubhs1BbbC`poOۮhCfctI7ט90Ɯi'4s/u9G^Ɯq]b6zli.BW盖hɘӴ1gn5dz~0%ldfbď3 xdbfP#qWMiTSr13Ts1M*0=[63r!oNnbbܝbbb^-8LLbb9@]PJ/TH Ozrub88ش1y V껴bYFdusq1ٚ^E"|͔JF^-Sp$NƴrÏsi;\Bf}s TJ׆a)pFbGz[Q +bb^pX UP%TV4;"%bbœ*V}R?N{yI8ߥ|m\LwzG95C?݉Ilߥ3aLˋ.qۑk}Zq;,yP'[^Teİ8ˈiyQz/d(ꢚ/gTksJmOjv9dZ^dg.{2W_79%lm+2mJ3zݑ"%}bz}F†Sd]@%yBnH5OX); ryi6(D ;QsX4NTOqk> "s=#M.xhWF=#KS Y^D ,=#ac{X7K/94L4$bDeO1EPҧXD LĢJ:BnŸ'֑@z]E"z} ?G$,nB anS,w!,o)JF[B rWq+S~C!K0.%r"ֶtwRxHBd.d]j"۵a.C6)],_3ԇ+ۈ.ჸ%jD܅?.Q} jYeR%ꕾjyؓj?.Q5u_r".w d( f,4z@"kYq󵔻%p*@K Q.cBPwwz%xp%KuIwzޅhһD#ԻNU)ZLodK5.+ֻD#ֻN!Y[ Ұ*0di_zhW. ]=һD;PƐ%C K aq.4K] aKL2/5EKDS؂gF/3Δ?piY/#'K ;3]polK+/3r ^]8n@&$x Zr%r],%p,Z8>Gz*һE]H6]h)\xw2]51sRwS@:'rb'z@b'ַP8)GgUL.sdQFUPG1zV -1UD.&KLFf 1S¸+bǡz\)b( jO"ALԗL|>Kz๩.=бK  0t1l2xDU/E5?1#Q ċ01pND(( e0$jF˜@bJk Ĝ_µ zfHH]L*MgD9$ޓbYL0,}D-{b cX(X!] " ^:1Ѵ.&=ZCE (F tR)] 6!bEt1D,a4&pr0FIcbi +F$!b@jeM)!v$ w%e yBxtא ےܟO2[Q/&!";a /U0|ֳЙ#*"nF*~eD0@%!RY26e0D%-0܌X% 7,֥H p )`` &/IX!0NL@U`d0%uP0O %:x0&B_: a<`k0.,L,LP$v‘&(.EajQ am|&(» aFĶ4\B-̒&Оg0 "T"4a"Ֆ1I,|3ҁ&XN3D"K]`SH.9e0AKg615 $уLH $/E0<17_"h݁T(E`xrZ(/alLxbD08џk:EtҊZ3T$0k0"YNF04_jT]ӈ" 3W]ׄZkt\B52WgD䈁Lfwb]0i96)֖Y."fT?ef]̤jܸ#'\7OT^8MlP\Z.5`3r?NS5[%`32Lw$f &3!6ٜX - kȽFLlTԆ9Nfb.$L+ٚf98C>s R.әeAgF0-Z[7dI4L\͘Ҹlֶ\Y$Bre*!D3 \fƸ6H)If;<@fNSw![hBXG[S5ZY[gfM :su;:Av,RfC>Pw+37dgif/)(3F[" \Y&܋!, R:0:kb>ss;dU摵0ÿL֬Cnezf~w 7#tr׉nF:/03M:`3KdF%n/6O5ki54\37d͚[,Ss i2sM!K.!K$ :sː )Cr'yɐ |wK7̧or ə"f)R3)5fgV!IUfnֆi+4[Xl:4fl')`3)pS;YÓ@g.sC ʂ͜}05 ȗZB/f/JZe?s{f;_~P " mg?8o9Ͽyo>GdFfuPHo7ߐ`ߞ#mx\2Nd"zy^e 3o"Q77$+~` ?~+}x4+Xi|ܮ=}KY'O]&v O} @Uoa'5^+yӚl}f>{ ؐʟ ɗ`i*uQ";/B&M,z\dvџ0ԏTz1ڟϾ]=~hbE oQK-h#}?᮳z 25Μ߿oϯQ>="HX鴆W$i+ZJdΕKJg/ƍN@ZUkTˏ {dhGW=n#:IG"c>e4GK׳DDwiM'ӶYB=Y.S}: N+f:}[󖑮NZMM=j\*徫`r߲UذyOWneb=\}yL4ghNbԳfj4 62W &]~WzLTTnrræ^8LRA^Ԋ؜qfrH"E+F9SOtg;(Uos/#-$VS: N9'<Ӡ1 8u~pDTRN]ơ0b=I UjO:1iTfXk@Y#*݉HOuxgLneud uRɚU*@Uȵb\jRɚC1!U o'ۍv#չrv(4Km$+ש*75-+Up*ܙ,0RlܫԢ\DH8{w Õ .rͮỈh5 d 2vrۗhq .7M)+ɐe'/Or5pvȪNy)S_R,wZ QVTp qWJz+t rric.w:Fq]ӕ Gz}8r'*ZꎯQqp"&TԽޭGhN}栈po6jQ1pzP0nCUk9[)aj9܆ִ`^N*Z򤾂ޭNjz7s+GHWB4ae->y Jk+,VAY:O#WƯԣ|1&(M~;b[jH3j5j5 _ ճuK"9t<^!;UNў:yxІdY| Ad $=E.)õdpJq?R z}p0L"ߟkw(B))S3'2j?mъ ;K,M/] 88QSsj=j٩k=\/\c5pX؅eS{\R])e티8|vbϣO|dD:_r;?T8v'Ce=ztÍEf՛X?ϸ;Wx,{5mOXY8HڅTI>|@DȔ© y`g=ؒ~"uvGJ7 xQg!Tf$ 5jI^YWV c3"]l3bf'2ee mFVRǡ͕\K񱤮6_XY_Af'z:gXqSO4fÊE%a5bHgŠdj {̦]L072T:T,ӵmK^MѬaJ\SeaU,8T靪߼l¢BܮSAfR\.UnaXwWpQad}^@qnt y!eaQf!Y/D$4RgM=r9];\cQf)i-h[sAP=X^Z7*HiXY +bE2*u,nzPe*7.V߰D{TNBRՌef\JRkx5ܗ7jZ]pᙄ2%+[v)7_Z5@*!y >S+Qp&D^U"\B<T I[S1yX4WiaC;,g$|b>ʃ=>CXK;wHB̩ÖyŖ@nF|Mo{(iq>d>5$5l,BdMGr.m10bIXۮE ͼbsC,8R Q8a5;^nd0 -lGȸ(IH1ߪMUiF*PfZCަ4#ӐViU5]v? +V,!WU988P2jod ŶDC{!7` lC,RSUlF$CW\%8\ѾK:*+4T+,.|uHcue>^:pfPޅrµP>2GŧY*>_UN^C1G'/8W^te'/^Cy)^vv!'/w0rTS!4F Tm9nm6|IqTmzyiM6&`jTi&M'Wt>]ԦW* G`jo!dbԦWߝ r+U^^ɹ2{FtJnԦW2 6ՙsHPi 倪N9AզA=26;q<*r:}ZTw+NFjku**7 O='+tN 6~ CjC$Cc(),)#]E䝆b&}FTZ?BLd_ 1t7}|THN(NҢ]T˼AzKj%#mUu 2 ÕKg,NCKfoZ-S|i3bmhAf@4mF,[,V;ZAAfIJ9oEipMq!˖9H9p_s^ Y֙xv\j=$ ߥ@SjW|ӌܴR\7DKDӏRǖժET`_#4X]oDe|_;)0#?sPzc`#eϼqV-+{ 6'>OGtӠ(C`/)ErN#y9 7'59ͳ둱pf'FZŴ<ҔGisTPtFTed:u[$3 0YTHn yj8̓{g<:Gڀ n&tވ"h`غYܐqq$U;G+ yJԜ!!4ǣk#Tz;퐃cP'ʩc泥1_z]]tL E/'%|#:h駭f;T@lGƟ  0XnYtL ~_!:&UOHzqjYI-)Kt̞ALzavRѩAؘ`N1]Ș=chHSOtҏkjMO.1| pRJ7\6d>L?{Gi<]DbtenCiٿ34&q8>1~AڳS+yQv&d. li>rvcf|̹b>fZو3gL3y1+uiN6b>挼*PD_tL;R3|Yd!4ԓ2یӤcN5Iǜ.Rf:f^`.Y1s<:>Osa#%FlH9gC+J*D :,Il7ͪw@W/uL,mNYV? 9nXaT*ZڜWv^ uv,02!9& 6%0QJRҜ ?7@WnnF/XEZyBmWz\U"zU'DtL"ꨪc[u>򢒯]!qeEyB|vxNhsG-Gk DEVvu@neU'ddMꎨhoB%>/[VlHIײenwd&7PIID_@rڔf"OMԙ#d@ 4dζeb|ψ؂@4jOiJk"}?!˸S4g h$aW3RmgF!&] 2D|$H%"ӂ,@Ӝ)VO|u.fzBz^R-C gy zs$zB) HImhlnt^akjpJd[4"g ܤyBN?*l8祐}@Yc2j+V(͑p]XD\tRn.-! ˏ̻܅%Ji]X5#T| !Y%E).KSGvapar}i]X.v T犻؅u`<$va $vaz]>EZ&fԅl0sBֿbT5L` X\TP<Ro@k> SYm7&[Ten. Yo$)L+0[dY ZGfH/ͅ0(_RYҒo)L04K\&ZZ <3tIxhchZ#KaX9J=&x2jmkҌi [RT!kt笱&I(% bߥJM&[( ) +mY7`0դ!9::_#{!- kJ LdR0;vXRh֥Ɛ,ZmeZaL dT^,0D:$ CK ePTr{rgFRa9Kayj,Qh^pks+ %E1"O::tD1Ǔ+~eC*Pšښ'M kZw%QLzI$ 0 7(-T:0 PkbX0U2 nML(nM *zH[[bnxc#'51yibP;2y[EiӦTPyHiML^41,Y誆\~ŰЦޟE1D+wQLE1iD16|WW™ϢOǑ(&mE1,BzQU,*L[`g*&ඖFibDp@bXlUE1Js##VbXO<\Te-bXW#["@,&Z[V4I"ˢ+p(]L.Zz7@jT,q_Űs54RtUn,U%bXzZ(%d1hmt1zHSjU{bX=;2:W=Q[$ўֺN= Y; ŰJ^e1$ .XCĚ )cX\k1ln1,~n퉤1D$atVKcXhJchE4٭y4%݋aQDư *ưx1DN4%ʑ4Bư%ID\I-!2,rQUl9λ4~\ӈqe"2H`֜߬!ʘHC1D]C$[V[ 9oIj oRѧ8FD}-#8 1Ǔ6icݤ6&-icV{zܿ1D !`Ն1K1bNcX> AdYiϨ* $ ~AN@-!$,%}Hڪ1Jjc8E 6E.VˉRCR"IchKQM4YSCĒmYPCqʬɴH"MUR< ch%wa K"qڈ$Iƈ1\0 U" `-;0]"*^(1\Ii0h[pE(\U۪Am_r[Iqt1\Kˑ.HX;Qq Q/=¹y-b*db0&"iG)(a T⚗gE1Sr)2n^1<-0ީ%a =X $ O50i½KCwz#%+wa =`޽ dry0&/VPíRfK+:A\ ciF hY$ cɲG˜Km cYwa 7t,HM_ c12&(25Be 7o9^0Z;^ߕ1jZ< vݗLƭOmLDc@ϻ/</ژU?1  DuOd e00a13w܇?1r/qL8в Zcl:&pxXԁ:J?1 ,l-%'1 ou Lz>c>TL/\al2M0g2Mat&m?rt%2 a԰BŔ_cCF0ͶW1*-Ħ:R?1:1(Rv;0vk1N#b]29.<& lӄ}FSR&uL#u O,I:~HóAEL=8q7qPwq 3=$ _O{cxLzМ8ațr[Ãfț8 j M&Ku ϔ- :ЈinZ8'މ-N1w3.8'Y2q7Wݦ9Pzj%6I ,©`.F3wsecHTyɞ-!e2 Λ8 =3s3 6 ]pH6fBQ-?o6gw][*C2BO"3DtS9f0m3!r6G,!%Z*cy8Kdf lhf 9;݈:kÓ>~+t` Dq͌*GL$_c4myb\t1dz31J6/f3I&df)q\W,;nG$F%@>"Ŭ8>R61gs6{WSv}Z1ga I$C=L""jۃ!aդ͞ˡ'-$mvme%Xٖq CVkLDÖ[#5[Mt iz *LdYG5&kУ1S4D ۑuBd?L3Y3Oh7mJlԯ4s=D#܌/܌ $nFVTbLLNZG!*,Q< ),hQӘKJ;PG5Tݱ\J3;aiIr+.d>rDzl'Of>܈8;twi@|1ܲ;츝斧;K;5;rƘh jVd/]*=Ư!ЛI>[E,7 .`^rHnG+B+zI&HQ1bQiQnG1b̆}ψkƥl< Xψ~L!;awaj _j'WYī/ӔB"?g;-~ ?'DTKpt'yNt9()ly76os_uߕ64*?\"(~`5/>?}7sD}rԷgwoTfiILǏt<=e3CDJ\@4TVz\dvQ;${\"DR/qE¸S?S-+8@v1~l2c/(m;=6(mCؾQ}cXGyx7M/X~5DwGT5-SR.ֱ㧮i3bu#Ƽ#jgӭ|5 T!hme/.yn6osk_yimc־G???Tx/|N}W?5?KyV~wnOGO/y}{ڬ/ϭ}OA+➪{}E-_=E@7ܿ/^~ Oo^߽IVn!5&wwsϾ~q嗿wd_?_}W_| % VcW}68x_>~|&8Vq:+_g+>~/xO_O8 ?mpMkr[__yhml.^^w_n?x7w_.{%ϒ=qwyx6DX(}t~x [kf^F7eG{o؃N_?1K?ժ3?{Aчu~MyiI|oÿu/?9#G>IΗoҗ8[}Ϲ~\k]>>[_Z>^=r~n7Fsӛݗxj+P;О_{^hZs 3L:H|Y68NC%c/:|_|`O#?'/タh6bpՅG=j>bO_ݓaDWu~oS?.?rCg[oo˘owiNucI, ۀMx?_ŀ-Gc Sf'+H[^X};Ҹ(Ifs{߻ ?y8=&OLHwOWBG,M(/mc{Q?0\/jWwgkܨ˯rEz~~AsPp{=Pz4 ժo d|s8ŧc `@pdiUɣqҖgM>SiSƶ?9xϾz&6 <''ha"q} 8eOo;/DAwOƓkk/޷o7|\؜?~~4h?B\-M"*ݟ!d<]w{xZGv0ږ`;z{rO/2J F ~o^~ZX߽ɏw (?no)~d~`s}ۧoS\ ۪GUq|yn7\'[SdKX5_7Xǘtnf?ΤOS!?m ^{۶yum(dhTD}'?w,^G}OT*%a$S?c07~941[wONu|4ϧnOηw}f(m?kk<ԧ06ՒqG ?\ګGs!B:nDmx<^R*> stream xY[۸~߰] 2AiO3B3=jd++iMo9%O;7*.I]{u/~w}uJC#뇼M{pDa.!fz*v\vJ߷帽f," z(w.҄u=HPK1Xr NK,,%a"MFu?`{Um҂{FmK+v0'o3?"=31qėm>lD'9UbO}n=F&U;mEU%O-PÅ'$a>v'+Ǻk5.`Lʕ\fBd/y(%h#av>z^1S `9rkxo/SL #^+UH[XvJWm}~$xWOPC1Yf@|t=dp:..`l_X2~<,M9_=y9SOQ>),vӝ`nkC㞃3Té"ĸw924!lSIM" ZPm6YMyEpW,aF)m"%N'Z!l5_[̯Ȅe}JOgf`܍M<zǞըD[PoV@ߠzej8]h.UO3EmO%D-i8.Yq0$Ś4sv8 >sFۑp0tE7quVCe~*EndF.$\7y"ݜ.2SPݭS-R.q0 @7 %.8|ϸj>(f3FzaB ,/ -$WngHXX][f KbR'\ϕ44,o)C.{<-SaH!xP=85kZ'1sSbg,B€jڋ|!Ec`U35p$O+4t\$,R,e R!O%.ȫ8\m;0)C[ct`a)~<_2?.u36`p?2`[ vJߟf"PC#%g{YTo?pDGZ̜Oكж˛nNïP98v05Ü9()ayO7Su/-ox;OD:+(K(Ǭ,0ubR6V,endstream endobj 457 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 415 >> stream xcd`ab`ddd v541UH3a!gnn?' ~#Ș_Z_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*@\\KsRrR3s3sY9w'-n>?.giMtIqLk.gU-͒-[~=CbFKow_7Ǵ)Sho gnd.jRU*n5nsȞJ~ z%&O/?;OwJܑ|(jn/'+]c~'Mg̵[|>Y<<@'Nendstream endobj 458 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 536 >> stream xcd`ab`dd74 JM/I, f!CG<<,~!=3#cxzs~AeQfzF.THTpSJL//THKQS/ f*h)$f$))F(+k; g```Ne`pae{!a+c5|2~3M =#.GwFѥE%͵mr!QfΗ^[]S'utu%!bY.u`wyaR͖}GQgD]Q]+US3~v-)Oo>$IKW~hQs@Jsw/bYV :tctw\wN^s]eݕҹsu}=j} C ﹿ3b~7W-*y?v KH>g7)szz{zzzOYȜ;w /{_endstream endobj 459 0 obj << /Filter /FlateDecode /Length 14203 >> stream x}kuw'RovU -H!5/1$!%YAߞշ3"YvV_m݆gݳgz;?4؏~t~wx؏㮷0K1};|{Z6\R}yg_%-)s?ן~/W, G ?͚7oӿrm1#>}??}qܵ2>|(/ͳPw;fwͳ塔V^_D{5{ЊC={OHBe{҆]Q;\8?z"m.]HA9EɿZ<!\HWĪL6}uTnA}MV} R>܄trt]>}>H]5Y !Z1sH5aoB:gdK]NW:|P}^Hdg͊Yj"U:E5]jWٶTz[O!!e˩B (s>,$rpʾ?ZS@ؽ*{N;r`.G =&s5#RԀ\~% 7< 5 Za1uHQe_DHT50{f4(˖02 ( 5 Ft6,4HQ/!$ |8iA4[t=b)@V4Bbr ܕS\EI#DÕbCmUU^ XB|>l+JZH%q}ǜӦg-ԒSaBJv=kB^UHSV"SuCˈĚ֨.ē81YN,$k:X:jMHW&vפ #pv]S1D<g6wݞi!d)5j>Asb+i]H,ijjA$[5Xl%2985wxչ&HXE fYȚFH"VL}4[6&;C˶Nڶe kZ\D|ӑpx4kZM'ұ$t<T @MiSMש@D @n%h4?Loٵ-ؽk:;2}MbJX];mVˑi}˩jhU[LjZ!fS9i}j2>lDum ,["ÍaD&jmĝ.[JQNKwȍsnf@jO#[¨6z8BlHp۱l|"rz 3'ȤUm؟򦂮%rl/*VJ_`؄sꗴ [^ݰ[N(y[]z],U r ySa pJ9}*6~Bl<ENtEN\yMp۠ElUa :`v {fA9 1[>٬7Ɲ65,tf\'@GqWrƶܡLV9ORVCYH97 GlsL"F*v4RóY"k6W0kn0p#mgHz;Wqkv"oPNON`tL!m\[ !(۪r( !ۈ,c1c#ڀ0f;ˉ_2L\"2Ti}`r#Ph{ƇMH5 'F638Ę4Y:a`U]hs,'\ OHo6b3F+bHY%/"ڤ,f193?t"W 6+zsEA1ܾYX ag!cx1c^AD1/A]~GȚ~E6!-;F817j0l<ֈ'iKcw޵!MC"Ud"kS8&AwG#5k Eu1c GC@7s3:ur k qf1Ӷ /猓qAsɶ`qNC,O6-8 e"Q0{͡3U";Vu?lD0;\$=wIsHl]4=<$<>~a3v{pW[F`oˍqGvv5i ։jD&'2e{ %LڬkCRӤc@!1&`2ϰf?h^s 7]?8LN eˏy}i{`QVaY3S տ/?gT,f娳p=t#A9h{U"e!Mсֈ=»5=XN) |"Gձ%wrr08(gkxs3#ճPIaXiroŰu8!:J l1o0 fa%k/ƨ sm8$xc 0"MǶօxΆ.,)ޅȋBsQ蔌>(-h^ۅؠ 5kCNx|A :]cS`l,gП7"e4 ߥE3r3J !"cVX_'2-[~VFpx.CZ#MQDwg)7c&u23r+p=C4PLFl+3xпVha#uΚCpT.ws道J>2W'g~>h+,1'C~xr D֋w8Â#KQ`wqZ~Ԛ̾0:+;'TtzeucX] u,DϗHojt[yʯНKdݔci}Wֺ;HQ#^;SDto J>9gAL\9Ԋi!jV&YDtG"5ye/3<v孛Ceޛs(˂-<#ﳻ PWT)zU4)LZ)W&CI:V:JgzVv?[|j&bT"ʲ =A\H?n;hsRFp{!SÛDFsXܻYsm\-)|/>{/7ȰV IXPOR^C\LwetA.&tNZ&auTiȉkOq1yb";=]W&.&x MidQp:* 2&.f[rN»J@TL[Rd6GQQ0"*&3 ėV Ȟ,@4C19}KO:Lih9rW3G1sD3xJLim{S1gsS1Gs:&,s2U6d.mGs.\̹)]f. 5ĞkńKk-\̱$bⴠ 0s,H#4ͳ؅Tfo2 l*0TFYTQ֤bbb BlRD|r3by+lypTa"&qz01% 6O f%D>֢&ba{"31))WPuOQ)&eHכ .D<̾䲙b>e0W2W$&UEAi6e7 Ӂ2YP7äC3ì+vYS.aRDqʲʓZ!qJabՕ)&mX.\bajrMBTՆM4L"6_"&ui%݊zݥ u嬽wG[ P1{=UEjB۩*\L8R1I!nqX܂zru.`)KcM"]R>\NsS-0ͩ\\ZrA4ꇻ#J((Ң巀glkjy49OHXYdgO3݆cu`-,:V [Mys9-,zB$fa:/XX#lXa],I@ 7ydzzBl9QD4|}* x* X%5  %"RT B)jK^\5?7gkǤչ 1@ `-  `FDTMˈ(+1n 0+1n:XDR;%D%KrН FHrE$kb>!3X#Y Bw( HR!Y BԳ C%D5CESI"Ir.,DH\t-7=*!У"y!a Et-D<ҵ ~G\–V:TDP?%jSDHhk k"Y-DԝqӦc1-l!b 'Xk , Q룬l -D+RO1{_"K] [~q%l!fJBdXBwȼ*[w5٠E߷ ekhЖH [/SmUX"0-U?"E)ǃ/lI"J[BUB2I[Xf!iK0HBZ[_["[ay ?]9@BDM[ҷqӥo!bi-Dᒾȴ0[Xx[XSmWuIB$"wM5.=-DַQHB`&UPKB[&b kbP[ҷD"K-!U_".G"y *+&$p!֡2]uI-p EH3 3 p D…Ar3X.C ""* +e8ȅ DKo ͍MԸd6rk\X!  q x)ksri\qHk)XES~trK|.{KJ('wJ(*t!"J(+ߺ.pS\o7%^!v.].Q#T %8GJk5H|.QOb.Btz0tzt =t xc*]%>R@&J]+$u =攺^i!Kj)u o.W#@0,KMUxGJ]q %u .̏%@K J`r](%L 0bՍ0AU !%XvKa#]m8_`-LuC0W YBuq'& Kf.@xD#5L :b 0HawJaoݱЅ<  hEbߨ.a% NP &"Z b&Gr@ dr` DLEL0bq 1XB,Q B >$ȉc/EL2H7]-tr>VA@R IbQ=J 3яj$&yT9VP$SJK EL ʱ*b/F"&ֿvh7, 9 &HNU 9"&z/@)bzUb1~T"1|Uȧ\1F%I0`4C0`(9L '9L (xvI`W{IM/F|<0bSI"=L ᮓ&S _EHq$mI- 0wVhǃXQ$ c b /, 34 0 b Y@Xd!AL,ALD9D8k=LĊ.ALzD01 LM% *>T1KQ1/'PFz[\ 0DH"M0e+/j1B( Mt5ᒕDhi?;`c &XA@`XFCAL"KX!A 1?XsKb"N*%1'QĚDH&$&2Y^$1Dǡ$&$~Y˺$1D*X!KbfIL0d %1D)8y} 7)bXd&EL Q"ARDG+bThDb1D/TH"ӈR3fr%RnŢ[CĄzb&RPiD1D\puHC:)`n&nR&` `ϖZ? `.MEH&k  /Q^/tRvտq)ҿpa)!P,2EKCYkW  'F   _# # WbXgvD .rFZciGB Z% *z)m)L @ˊ!& ͦU)Rвzu?KC=,)`hק%WRp7د EMebKbT|0@- ` ZF& r%bs;s9H]z^0r+ `.d ݈%U(4(VDR]1^4UB*_"L_XN/t\{%lH-nn9a߯Ebҗ)fdۗ ڗ@M:8 q%5F,ҾP2vqBҗ@2,tijf/g)oli_SX}yD>/Rq\}*KF}p)ֲ._|+ֿ4. D(K;x_pG_x2T2!@ISx3k' c\QLYPXq$ z/*E>Ro;_ŒSv dXFPv+b/s)d 6E4J Tcy@KAK"e BUȑj/Kd'Ie5ҿ0FgK4œO c]օHpX5h1H Mf|i%K .R0~Jk(L#t3?"RĥJQ\|QI0ZGX sM?+`X:2Om0ZC$ c%0AEÐ5&HuI`")2 @7 ^(a1Aٳ ^&<)`KyTveTRD:R0/ ]s/q$ J$( `(֩Yc[UI `xGg^Z,i/\% ?J5B3_EQKDg\XX&lE)ZP-cUDS¼044ʽt0ea/LSiY`v#j"%/̈F)k,~ Nt*|_x_[5QO9Țx( &_Q}ɚHCjߵ%S7K 0Y@=L0OFN- (&2*}zʶ-}a^#' sVHp󃦚- 5`p)_^H({/ykOiq#a<7L՜KJg Scs ̬|a&?fa4,Ti-npnro2W3[B.D(_HCqKAZ"\40y<0$xxc颅xLs5mKC&qǐm3\Qyj fHk,DR_,kLf 嘪yjl#ᴛQe&UKfC6O1bf7F]jcZ1W$1|s5·H$_z",!!JjF؈Sy(MXnYK]Y(j>I@-B,)jzE&yXmU5<1Q5׻(Bˆvx;x3Q V{fy+~W𦵪,]"t >֫spQ 0I-E?p!IkBt4&i6l ~|ZQq1勼ffx$iup1 Q<*,mԓii3LҬ븏cK$ͺ"|oZ x3@lYGaqY݈Iuŷ4I\ϛ+ 49{iZe2TSff5ա VLeg<Èθ y"$i)DNԈ,$ͺ"04\u?֪nAGX;0?$ilFGM"B:jF90QL<#fYeKLf)YeY_$Qqr2gV5"R WiDnDK5PFffY>-YTͲ"$&땕)lup ѢFx0OS5:1 mev{YˑtqeKKRr9O7WsiR A < op1K\cb9yw1j0S󌤂, :e᭎K4%B9/\: Ug5!j I~"pQǮz&y͝ .'`W4?;bCg^]fB%i b-H;քy[&{]y-\#Z䖭)h>v-;ܷ,+'<-=߯ﳜ4?9O7Dtz%X\5 ^h^YtEǤS+}wIZM<2zΰ6=rf(U9/|s$^z>Q=Y vrOO9]=^B-,7 $?J?C?zө ~QSziTz5Cׯs5nG=Am?z|^/߿z#ز߿/G?{ߒ}*s_b䳳7_d>ڢiV[J{/u=T9?_pNy?>fM-n ҸEy}GՑ^y~>'Sn߱l>>vw?w.};Ww_R iyj//Mx }@?㴒ۛ~8/}^5[prs_:{@v\:>_Nү~~1ܽto8[?R[ykׯ=>Z)y֋!xHgh "=Gzg%Xr{xyN,~Ter3nLƇfhp5wOO9>,e)?0rNǷW?\xsy̫raU4a#/eZ-ہ2hܒ'>3y|F \`v~o[/]oY|ws,!"Xυ8.zi:/޿{u k?h3a)mj oHU݇ ij~N+ђ޿͏>?|>l~Jl3ˢIѵ޿L?Ͷ>Nm!אK˲W^dTƚҋBNq4> k}\f[0)CZ4 1OOѺwWx"~|S^g+cUzhgU?p_s?{[M%~ur#]Ǐmq{E=ּ_7;mwF.s;[a?Qf[Vܿ{IĮG1p+]s]xgUd/f Gȣkݬq ?S*woO| wOG?Rs)vçyy*?򛗏{#DG7^싧o?}G~̣*_>~鳟zWf5"wPV}q}yk/4ջ_AOsSƳ4pg%r{CDzZ}/^lumgo߼U"CڽW%q~XSG]JO^}՗w?|z/+=̉'Ɯ-`lH?{N\<|y?n5ߔ=ܔ?͓ywo#݌ng߿G7Ս 2C Ɔ'|w!//qq` w_Z94sJm|43=[/tsܜ w>*}sպoθ`S^u9^&{{ϯa6=VQE4KлoZP{_9΁Oivh *~kj,}}{|кrmyعi忼|퇾,#Q>Fz_<_mYT9!?~sfy{yxh]P6֍y\B^ȟј\/_O;yg6Wק[so_nG?7]So/nZO~ٯ_׋/>-3r[oz1_>~/}d[n}Jwsendstream endobj 460 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6978 >> stream xY \ČSZFb+պԭVU-R7 *ȾCBdﻀtqRm.qmmU^_k{~w' 'ws'ji̪իcc6DN|u֌"w w&N!8!`3zfClT?mrvr L};&65>l_hY̘A~/ܝ=wמIBD轞3W\#!aSb=w qòʗzKQ5;L=GyP"^QOQé"j$4&5MyQ.6ʕbԳT%PnG RZIN C!Ys.z[U^1ỵӈߎO~ԙF~M?\ϱo?=(^#475n'ܫɞGc7}~W</}!aܐq]ƗM8AZG`QusyEZ¨'A:,7A 8Q'DPL]&ʀ.Z06[$Ch} M&ʅ9ePO0x,,DP&Yll' 2A!^c{}%lKJaX~d7-xzb`>p(h C;{]3Z&b Wo,ƱU^fsEJ{0٨4A0m.&Ӝڈ櫫j-x~j*B#sݱ@ê#Aۥ]G9 175Mwb)8nʱ.\G_닟={%wGg 5h*v~ 0G2QAFn-ȳ%˦i-^&F"hs5v$ٵ;*>?V>VĦU aMS'5ԯwttd̍鎺}G11#V_`K%wKݲ ;:SdCABOlG#n}1C!;Y+/p%}%ڭC9}L Tnt !SnҦd'{;$ƖlKn$&9Lvߨѭ`63nGScR| +S}V8b$Cz䦃QrS1qS i "vJ&IH֔z-.W4ԾvE~KZlڳ3G.7J { S:>Eh]kpVp؞Yzϱ=}BoՆm|"rTE}\ǚPw rti9i-;HP˴UA ط@OiݲY72!;g6X(uqЃTN+͎o3wl8㽦}NaU/7TB8hi~&Ova4AMe DN/}ލi]5^Iw!\4tcc9d?DzbڇDϗGx,Sq10{'6K}@>Y42ʈ*(J [jyBA *e)} j>r_=p`c epRЖ[h31&C#_uQ@R>6}SX%T{@Y+B!N7ZH+ M Lټp"!h4IVƓP:!tݳ%?^_5b/(o(Й5@ѩd=P99u!02 eI*J:tJtO%_S`\">l4_8|G__,sZ9XLՈA\3qT];*rJJEr|_0ޚ i-qO9# 728p,R/bnt*+NO0Wd?ɻRĴ; c5O@ }mcV#o*^u}`COBs84Et~p|iY8+ Ka>Ṣ=44zu xt $NȜz Hь|49=2i%Jc] ӈ|>`9O hIܟrTJCUOŔhvdr'}7o`Ru829d ?_)55B[?#ʻh [~OS1TJ2U)BS].&@u[ءTi JEiyL } }=->?Do+"p  I͎Sjsg3")Hhỷ49Ѡ&VdmK`T]ch ur2Y-Iؚ2cc:vxl D hA_֗ qb ^Wg% ώi . ؉H2A$_G'kİ:'懖ԃ{3Mɶ:(/>F JmfJ ot"j@tx 9h$LBbA]Rطb\A_KpgsK9qr񻥱h5U@n2+򥒍 Qɳa$WeԞ*m5[:*Mmr/Ś>C.$m7oiGo9oהɃi(HIjm=fWF]"4jpƣhvb➓PWjlm imNH4N/24%nADȅ&-ڝ% em_>M@J-˕1Zч>صԾHpn=sbCe#(1g{ e=f^y” iF&Jjϗ/ءXS} zkXW_MʓEWYFgA 2j rr ba=Wcg5ZCqnnlePg%3iv`#3aOrgmqŪ;'dX^핍fJvqp ?c5Zi@7o<1F_#-}8VBϟ)Dd)C  B^&)J^xɖշpeJ BhGn 1I]DyS _MTq]ey\O2⬭f+4-u2CWg22%%:Sc/⒲c41I=ׯ vaY-Z(\~WYz8.߫|q9^p| 9IueoTSJˈ m* sSl*9r $_6-"B*An^y^Y^yDo3na`..uNzQ8m}U Nv%P7tt8( @=7ᐻ#z%RG'у:':Z|@>l{Ƌ0 ڄƣax)Z9ilW4qRrϣ&Z aJ߼wM*Ii>~]qBoؑu/L-+ 7} ;uCp?߫Dϡ\:}rnL?qg1=ݠx~5ʅN4B|di8}%! vfUŗCDbf^}{@OxN_^ٌVpǎx@aw/q)(&%gCmW*!R$&-.oΞ0T|  S*wlǰW%{exIޘ\( Eřyx̝7D2 ÇĊomOχ v~;GWe:T9EXDA#A8ӥCr# EA3z6 uB%D|ا 䂇͗S0}u6BԇeDgOJM)5M&}625$E riE[/Jwχ볢kŷov^6*kJ/u  ďᇊ;# "5p[p7~SpNMM~,lhC{c͊,\M,Jn ʾj(rXL Z:âX z=2sAj)Z`14&IBUe?x uR[N_j:!׊O^bp[(8Up,̯|P4w1}Gf]h|v ]H"PIЇGѝɽ27m8'9urbQd䏸/ԙZMJR$ `W rS! ds-~]=6%% ~'$;o`=_ JCzc#5E@~Ի >* Z'!Vr5_~MP$셗|'W( 2p+ 5iV)q6Gp,m.ä0))r̄GX.ݺ]Ŧh:TJeпh+ɤpbE!L҄v88`lwW΋ԅ|| sr6WIH0YM)̲pܳ`Cnb*&q((jAjCI@hhC^W[C$p2vbhyaO6SǻnwM&UwcOۙ@(*SK7Z4 ;\ox:+-S!mqV&@Ɉ{/3|+PX_0rV&繢|}Eشط9SScS.-tљ>/@Eʙ4UM~nh{c8&:Go"Қk66Y>ͫ V1)-ח8 o}ˢ7ՠ4>GJMf4d+>+ʽ:qc|q=4Ԣ yVa=B'';O{ru`][l/rK'א!HyV2N^ "D{Y \'"! '܂[l6Eҭier4]b3H0M+wxC^%tyw#7 czaoo]MzkҪI_E5_c7[ϡF皑/Pb؁ס)hҷF xr{IrwM$=tҀr:Ñ-GZu:66#fAP+'B1GڢISӦKo! $4Q9AV1/4QY;EL<:NJi wz'fѷYr<`FDǰviNN_) xtG Y%C>@[2+O!W:?Xu%9:ǚ@6OR([1Ϫ+ʯ>zC`n}{2u^؈p,F+D9t6?"Vr3!!;w678CcۚWB!GAUdz^ާ s9Kġ5rrNއ'zN.CeaVgݹh}~CY/_u#٢k?8P\^-tؘWm;Z.f>eD%F%e&7Ҭ^.sUHN{3 LJA_ħk"g^Frw%-+JIwdb2' kfQZUB4>-1=4Jc4tP_Y& ܋!]hhZ@YNQL nBD zQ@+ F&'H}5eg <?w8~CrY>F6o/DȴkMyŖ./&2c`%?GL֖BAv6ErA_aldPwUKql*INTN䋼/)3ۅhq xaPOKM?c4rdg^a5?r`4E/O9endstream endobj 461 0 obj << /Filter /FlateDecode /Length 5220 >> stream x<]qF)8hNNmAv-!F@%-$u~|g{zv(!fzZvٿ4~x]?`ucWwTvaمQuB]\m5L_^} WN;quuw])aX%V&}qJvR6Y?$þuN6]m;]s eVf3vm>o_NsτҪǧys-(MV 1~/I.ǽ\d-gŕv{G(F+d1JjyXߵ+&,ɚ?|g}DaBi car005Vp+|9&iXV ieF5pv^\g7ns';n}}84#.VLJ:pXom(W6T~8:8}ywK ҕ_đ*͊BJ 3lh%͎h StUg0 o24'd$ZvuN3¦8I$SO%;iة <Zc1Me5~ImvHF ;G[F8ܠ̳qB 7K$ I*0~X_{Ӂ$.y큑ֻizX%_XRmw8;C_R ;hlw~%,vt$V*SW5?(f5dJhփ _h yP@}j#*#oA( k8٧6n:Z-~*zo3\z,.RUT"\eNuAj⎄ÛVͤI?6L$\ &E7t血}Jh"9#'Y'|GP=8 S{UIZĉscM88tA@v@jt-h=okj[j^-kxr-rZ'Q\c%u1*|_E~,&r~౅KT YsI "HM/C3A(CH%}OFtu;Dv8)4[ī[1qvFYgb ^_:H_lV!2*)}15w-?*|pYF.ͣӭ6m5fox:pH@"]j$ĥlT^x0L AnX2ړ\gr2u֙oʠpCؾM~}l¢K\[I !u;#2(WAc˼L1#tuA?RaWvф, yS֐)77UlA#'52:r;V(*8,eV, ;Cf15 cT3f uԵBO(`b^NA {o`k V!SnF|^TQggH&L .]C 8T$ar1>wdM:oaRD: JxՀ}&MmEN1D)e^1~ʲXSRl? cg rYdYi3ouQ[d}*IqLZm$G513f Nth#>G>_Ҳ5+A|)B?N":#3Y9kO& =[Q.UQح#.-9i_EPY1Bsu!_ ɺf-4M#_74CXo3Uace]MUgIfi3 <T1,#ʹ@g4 ?s?Zup1˔FZhqD3bU[4U0RJh"j!L8W%9/Mg$i .©<9nB;yw㍭ƙE#2b2ZV4_Seh^@k+$r.1U,N-xF]`6ᤀajq?:|X΍aD4'qjFjƜYΝ "%?ۗm4 d2QĞ_PRԉ3BJmH`{vaɄ]ց?Z- a(Azl,ɍupP|/tI\ͣ}-7 z 巙{ɟM fyQW}l umz~blHG'x'兴oXu?|).K+ BKZs*ᐅ")%3YRa@W8%7x TnZſ(3l( ^~~U5)2[is9onJj'l'h`lP-̶F")oB>֗1eǔ^.BRHV'OU5%[NtL.ID\rp(D(ʩw1oKc`lx5G2[C) dݓ SaX}= Hk+^ㅲV7?pvsNlЖv/4;عmʼn+OkAT `\k<嬨-ov%X>nY7URkRz)LfGaQTމpCa0 v_Y$uXl7!C ZKb8W_ǗIR}x;,R^\@ w DJnoOHm9uji"gLrz0m9v ɣt\mUt 7(duF8}.xMǷxMy1~&{KUƸ`Xf4<ֻbXxY|2Lf׬3)qUv %[=]WzLUi+fF(h<9]Y?_EHFxUf0Ncɿ2-[ieNZXU7V4]҄B&4",vSI =Ӹ a]Ph'. jڀjp`@0h63a8P}fWzJy]i,2!ISŧvh9]/ﶡ PB iG/o:ZnD V)UEcCF5ϋZ&kd7슈%`*wp S\^.[hY"}ØR fVP>ElbM,Q. #JЕm8Q!;>yS)RWl?5 )M8S㯁,V'[>BZƐH8ULx2ǎ4 Q]DpCۅ,?gG@ɀEL4;`tv;Qqa;0̍m^tQz+)/QG9~QEX#RI8׺b1I3?Μ!I†ty{I@"ci;#˪n&յ7s>%YCbCm^ܔI'Fr!U M1+^5Ui`S,hqc$n/R+LCb6XEltƖwPPoP臟p>@0s֡SV, xYL&Ϣz6vcK=oC‘u#]֥o_c^KCvbN%MϷϞ*6CE=zr`@5UK$? 7v`4l< z>K}F ؇ಋ ճ vendstream endobj 462 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2879 >> stream xUV TTG ~A n`({d3 Ȯbĸ/1("jTQTDE@0dp4L3S 9}WU׿}w+RWhp IO Qfz4?U&@8npi zT͵)M$3Z5{Y֥|Ef'I1cܣcmNy%\e^ᑛcBHQ츍 I[BRB#<#7.Zl%7;{vs d )ʃ򤬨7CRTeGS#eF9QΔBRn;5BQj uҠQ(m*S)DP4K(uqIs]AT) ԿT_'. >g3VLHWt@T ɲR֓-dَ11c4Vh\(KZ| Zr[lzpr )zL$nPW w7/)\fJĦrx6X5 9lh}þa6ܛ:< _ic2ϫ 6 6A4T x(}YT2Ŧ'q>-5/E͇t;ǟ2Ʒ8#EѷwmCl%RTGNGõI 2P)2^Nj GnoOy#[ yc fXF& oJ* i]nC%qߜ*рcl7EJwZ0Y]hGEi);wc)6pY<ɯ|[TNEY, ڸg[BRt̺o~NqQdNfQ6KΉ㺆'Sy 1B3`߰(5"%5 MNEfdD9sEхcg/`<Զn)_t$k}ryv.T@5Z%g8[ "{9ۿ4RuToCKT|]ima@גSXq}FM0glm4#<#ZS|ŽӪٌ&.k//\"@`Z^i']3K3f̝$lI[|B4h(c]+< n^-Lz46<]k?0:?Q_jkk*p\ LPEx鈧Rx!$Y-^bM?iiPUB-j@7", [e`QtdsԨԆRcFX"D7Om-?&V=%\InR-$`bIqTƢ=s%)P;Z"{]̰]U.F]E Dwx^$L Ύ |&`o4ZJ˕myJd E-璳ףep!) #]q"X`DTZfc0n=/U:ZE'wBppXUz`L^X) ?K׈W}tW?7.;^ wOAqjOm )pDn vew)$kCzfUU[am,4 zϙ&&빮& 7v#4A >2|ſA?Zh)[OҩsI3 b6o.CXXA,A`你Ϫ u|g XB0!if]e>L/6KD"-0R(6N臒~ s#." -Bxo'c0"?$ |$,rB\m1AA:_۷dN2w([XWd e{ B SaDq{Vﵷ[' ) )#2CN1a ;`{÷p2?O5{jH*lpl:aF/!O۴yif $]8XdN{P5H7 UT˕?O-V 񜃒^&Ffw^fJl7]1EN"J'pئl~׺3p;8@4GY& &ē(?$ z啜:ľwџ6&3>ƼM?IH }#{]'-!U7Ilvi3G|찅 ee׼ƐzC ],_.MۺwCt#o,^x{X-vkz{Beђwr9,y1x6é*hV+);^'}n/2f>y䨦̣ߑA֑#c)?Zf/endstream endobj 463 0 obj << /Filter /FlateDecode /Length 14853 >> stream x}]%ǁ /`^d c^xai1Z^UwK-DV+,G0϶Ky>{Og_OJūg3/1#쳯>alg˪g>ٷ.{*٥k|'=.[ln_ƺyvZǗO~?엵|vغTai*˶͛o_Oʍ:?ĀՀ_6xn~1_l9ߜZ".ގwsC\l[h>2ez]ʳbfØ.?{?={I}ٷ/.ɾzYoV/vC|v.͈Ed>KW#}eS#*}J]JJ2]h˪ּL]]j# \x@?|ԓ]PlͅȺX"EvqQ_Fzet#AqPt|XW}u"zQ."Ue>-ds^ڙѹf,qڣMdG_ٹ_Hc~1g*kMߖ@bu.їyU"qw[RG-"f\@c_ "#zϝ/˕7$ϾC;zQJ`ww\6!R lfa҉T$285PTJ :*$D.c,#.[Eл]hCmi@<ӈ !:YآP;UW>Q^5\E7R%hfBY_u Baư=+ᖫPM{bU"*1Fcc$c m g>6pCVJI dQ_9.rOBdpv|)98#Ps@idB0aQ](_DŽHJLu3:,c{Gt pY߹G'#'74@+DJ_B)\Zy<6?Cz)ZP B:Ispd]4 a\E!Iiz u2z4ҭK!0URSǑEoB1s* hC,ҠTiZԁ#Zy-+>0 Y UE;S[S%k Uro6Bq*|oX{uJ5WA1 ,TՑ2Q8.WA}u^47wbЦZ+4kn;12+gKmX HJĩ DjhI4p(h'm'  J`6%Wo]K"-`t46OfleNl (`N.*Cu~91uBv*\Њj7"{>ABO aB4gch' ?bL2wvم mAŹ i]{Yӥ!0%ڱ&j -1&a%Z@zh/o{C G-'ΰ ;Fdq@}1c.Z(#ϫXk W6"_ϋ#yʎ(ҥQdHaCs͙;uKe,G\U0wQ/Z 6?_4԰Dpq֨[}H|`ؚ*!X"ۀ/]k[*RZ#^G,dhcAz +ǚ(VjW lC[yƚDqM’@}V!t؊o au(2&@7đrkRe] 3 :ʖRÐpxu"0%oX$hQfvpm0QX ib`=,,R E'3 ؏zky"hgFZa=Qh8Rs](jKUgFiJd8c`ގwL1oDp1 0 NѺ(tg(Q Yp mGϻ(q uv FOG39&`{Ik6ҒN{}Xnxecy[o)Ǣ'X5@w z=wX,Bv/.q;p}5bc? ;wk<G` 0 _58PoL*LVkMp6q8c0P`aXy1/k^̥ |gì9Q9>K{%-LiqI!#X9>0m.C)ufXyC]~t+y-tk"fm!V BdR=%OZ0I k?h\B{u!u"-K ؄pL>VFgpL?%j3צى֝%".TiԼ+MY4;g)ڟUPW;D6졫 gܫJ Мq Br4p]DhS?Ffikx bBJlhaܼ p+ѴJc&D@=@hKgaIKfYe[}Bguv ^mTI|4w hh.7Pc=庇f`hVe6Y,A+ҾXʜ)+6ڲ-EQS*S6UjeЋ%ѫkFΗ[ab(,=wz `s3.h"KꃖkMLZ{]pE4u1D2 pYU`!,3-e.4fewR`f:5* ҨSgvAlZv `ʻʚHrM pC ly4!-F&mHìK&C4UiɗPC3,jM*cGb;LZa8.xu]-4b4;y[a rH s6B4ĭTA7bV*0y١T%1i2 YȈ!T јus!X aA;G>FՑ!tF8[84Y;4pz*CG#3 כZ@;M^G؁N7g‘WUglQrVmsG*=UOG:67s mtՉɩ#=g2Bѻ;7GۅX]u9\{g;}E>DGܴ r(Yd$9Bfܩ2jfpM:=; \[ 9sx];_3!舿;3‰=U?YJq S!IdEI { DdaqD+(}\Խ1;^anS0:c!qYud3r8R t#a4F؅}FafQPtwFx&^"Q.*Gf<Qls>8v!}[.EqQXKR7_8N4d(Tĥƽ!P%Tdp <:B޳,1RSW>Mɥk1G:KU.4o2Z|k-;2mNr.kpbtƔ%)}ב:z Nd(},/RvʅF{ϽWYD63Mq܌e#82/4"Y+D\YqR0#^m ˤ d'Ō$NĒ3b'R!틢0. $Bޥn^LD\[3#LD4V1o:° BSe#0&U ډ0hJmi&qvXTM K&qF $Nd\ J\W8$t8B7A (]T#5((3%uMДWeqeqR{URư#LOJt[^ȹu{c"Jioưr6er.1#bt\,"5@p 35 JA4*H΃FRԌ3yQa4`T9S8AAb AsUT)"ϹVU-#ϹmLBpwUM‰RW*G%>O޹弫N>OHrAEHRZVEsS3U;3RP"yS#^Es{'FP@3 =ywjF]Iymuv$gJCR[ƬHrjqڦV4Uf>RJV3QگuRڡU\xB3b$!J|)$m$+[b$@L:#~q]U-Wj4)ĎpƸut/s'slMShs5hWBz@4˽yF .%`P+SF&NSܱF]#@($_uB.Hx a2$V&6 3ƏrC ^gal cea*_ 4}0uLÁD"2 @L : @Qt3i0<u?hDH!쌐U*+$r*خa(l+$"(@D.6$qoZB D qr#MXhg<>5@+ĘB`m`ȇBjH?_I~9a7VVR berFH By-WH;7} G)FⵜZ?+ɧ {H^K Z0?t._!abj%l3:;CZK M DW@V-I %i- O$Q C4Q Z8ZrV^ RZ dT4%3Ei DtrZ24Hdd/h%$EOR{R2Hj!"rLDCR b$⫄Edkk2i I""a`Ii DtRZ8Ii D JK&)- U @JK b!,J S4b vC0Z`_@_RZQKJK S%$79*"ʦO2ZYJeÛ@ZG,=dRVxa2ZigFK z;册@LG&-CFK zRZ"#FK z-]dAjxi3Y$ah dV !NK "?si 'E7si1D %i @ rZaҞ8-+N%9-ސ:SZ fHi  5jboҨXYB&m)S|@D!%!jNz$&%\"Y3Xx8"~,,aV6%U<*",W| Yd@-Af18#Ef .K<Ë=-IM]yf9qY׆9O;\'pq'4m{zg.UiVeXbdD2i mtEwCfHWLcXGI[̢jiTVB@*K ░bEqep-vQYE%*%b%U\,0ϞT2jGh̶SO&KC7ڃ] ͬ*+D㹉S$洐b7RCB}swbbjǦZ$9L<It%j:b5ݣc D X pMc!;~X=# EJł$5[bX= #Ռ$%~fX͏b@ I,Vobp'2Sc%Xf`O,$XfT1Y,$bbRCwpX ~D<$4|X {[X ,j*)NK!!!X |ѱczPRc1]+ɜ"jBLGۯ}2Y1x0Ymd1|GLCoLC@2Y D! ,/(uLCP@2Ye2I2Y zf88,ohd",~LC`aq{.,@,|b3 b} h,/hIc1^X !$X, ÈY,ZM Y,g,C̠)֎ObMcIb1 5!2b=,GR_bfC.bX,qM6’AE8Hb{ H,ada1x֙b."XcXca| qap[$kprHbXkTn0X,>Ro`=jb=!R HGKHX $;X,> $IrYV⨄{Ȑ%by,Ѓ/q7*vE(dr ,J }p,Jڊq {zEd1뉜A"[$f,oQDAbHd1=, z͏b,֏?bH%&9$mZd~|LdNmu0Y ޡ&.D!Tb s`CŐ[ 9b:,YbI"aA&;c@LwHd18?Y'Y",H*$X8DvϓbHDsǬ,Y,>@C x,Ќ3 4"C4DHc͙bƐ[$ ŐtE]TREb$9X"b-!b1݋,,y@b1xN{D "u& H,o $X0 |5|f2'ŬFϥ %\MH[dRDSt!fE` h91Ʀ,c!,!KcVDaT3B$SAb &$CbQZ5A,!Ő(I,bHgKxilbHT^uI)OCVC!SXσV,7-N,ł2Ԗ,J|M4$DpC,X#5ۅ0YyVaZ ÈBKjNdXRMQ!^)jW |\͖dW"@EKEj"})J¸&DL̓xfb.17 ^2^]9W"T^/Fɚ+MBJyl]ek _W"I-J;lXȀh0``X"zDU$dͩ~W"`5g~!BranxW"~$%B̛8.֜@D4[ٕ9dAqQeLٚwTd̓)J5d͑+FJГ&J (JIJ!L5lMߕ?>HnOj"+kB'Z"g 1@P@qy%u*K%M%L4UKH D@3%Y,vfx1 x[jG=oDr G2GfW<2(Ltb_<OEh&Lj6}Ź=W(ה$ 7P qNT0-yeJqa*jPfSwt")oؾR+UX&p\C%ŪJan!㿕Yۛ4Ϧ:JѬzT~fML\ȑW(,͚>޲}ѬmJwhִp mU JPy%4knC|!'4<,zFUinոD)yG*4%T!OLNԙiy3@rD%0Siq6!cWBiRX<J0"<DNĥ*.DyOBXj ic8-+X/53g2tZ!oOW"DI{2 ?NQ^v= NHptڙR 8*gD=ܚ1 YGK]RZ^JgFpWXNE$IG"@^DUSes Y8nx R=gGDh:$1jPf<* RWHE3Gв;Cy:bj4KCsQ3Tfd*rw}*CfAB&irQW=H;MA<,>R)OU@<"``fP2w25T\ PcƏ(S Me蹉_Qw'>*18Ds93*K"637rQ j0Ɔwej&ѭ7V8]Ud2og]g]t4KqɜLHMW)D$BKHi'/\Ҕs [FH;L  I8;H$-_BȜZ>;062s D HeNd"bҋo'ũ3|B(3ѵΣ]#Mz+oyo[?xɒ-xM"pMMQ&}Ӈ kevWO[l$rc_Y"w\r.RJTK 'OXbX|xoyf/8 J;e~Z}6Z?L?hiK ƙkR/$\7¤|'2 2tIN߁yu Ril*/'}Aڱ,(ƺz[1UǻГKI:W|BH䩁]Go_V}=O-cQO~y߽fnoNv-=-TSK7wj-w4^c_w隧~>7xOІ/ȱK_ߝRQXDV$_Q 2`-Ef7]Yn*Rp# }j࿨ 7ߩO!Ss)o Au?:C9!q8*ʃj~V*Ѱ뭻ϟm%wM~Ԭֳ[|S@kxٗOͅWڳsaG-&qkѹoλr>ck87ɹ 8kY;hVSt H N1B*)~7g>BvQBϏ/(T[fSCòu @zRFdgk~1JO[ Gp?$s5E/|)G1]¶DG˥AqT?0%~>u^橵l5AֱGb~INU}l?r֝{)ףCe]\,,=R=\ 3Mrer?6z`"&=J<$]*lrI1|6{J_޽ucKXk#k)] _I{Xoғ}ҎNlҤ$ĖuOt)#O|5PÖJňh!.v_?wֳ5Ov gyZY=3)}"{}.a_ƸܨV]&7p- Z?S,(μ7#7Qr^>}r`<ϯG=a ߱?#KT^ۃ!zuտZ凪cZ?V}l|ZEjK.o߭^{9G,w)XXEc;ꡕ šo&}T3z/˹{,?cczp c=Sz}7e^zܜ{ :șq.]>{tAEǣkߝJ煆p]м77h|' ^+G~Zgw~ȃË52 >>@JgnF+'ջ/'?Wrrڨy#Wy-vm xl_fʏuZߕUۣu5*W֯o8GZo}y59HAm]-|a}ͫ7/=!·il߶OvMS{xhf :ق03 l l}*@*n޶ֲNi0Nxޞc^8U|^gq=/h܂܋N5߾R= G>3;畛{ac + ѯM7î#G׏o|CVsY7zUD_r_m_/V\Ux9U=q,ˇZZc|627C  w=r١8n3ߨHfvV !}1rwQPy7/eZޜۏ^P:pCx6)Oϑ2}/re|}=bv+oCooBTae察EGA鐊{-F􈟕6vwsDDڕy&|ïoEg == Ra_y֛OSZԮzبeGKTI΍GաF]޽ >ID͏Xx̓=$/L%Z܌+'c[nba;\g;g;Jba;(k~ԟ#|c_h8ਏ9SB{3˞NRլզθ-h򕛍{Íq:돛?럡F}C}ds ܿ|u/7Rb }nzMwRi.^zWendstream endobj 464 0 obj << /Filter /FlateDecode /Length 3019 >> stream xɎEv-N9PKG*}1c$4`2rHvB-y/̈vy|xk8 QZ}mFgg?1uZ>julvqsβaό2jv>{Fιm(iC0:ɞ'm|01E#g-<h]v;8iOsJ#k{Wǜo )8iȗp^PJYT*rɿ]s@UAN1&m<@ Y"=亴*#o=*TDUs/iG @sӮ_f?Pu:l{JnL>ܐELL4J:>32S}kׅů %]V!渦_k}@h^Gm>8.8l~h~{۫=:%$nY|F^FJHmU 2Lf{hና~*PJIZ%I"nߵn2 sΔ\Edi02ޭv7{iDkߨx@Ci-7_VnI`NJ%ԨF1k#. z@ZA;vPЍY*Yƀ9~_^5iʓs^/xKJ| hZr౔ʀn,5T|`"! p.|*%QJQpvlspɶ[itƚ}K(u ^F丐Q*"rizY885)uyXAkIhtdVwdu|2}<} ExކOF >JGF;ahƾJKrh9ixʪ\W*-$o`$f8W̠@YIJuIJ G~CJ g9+fApH Xk-^ g)DY1 __moAіUnLO}"f/W*TG|(_5+e-xK_ P+2 (:2iqIj"~(@,w~ݫneX+s,M*.xD ( mp$nK~k222-?ۻ󋋳oBf]ŦN@nN4hb4u hx2|=a!Ӳz1*$KlKU菃?&~ uRʌpLJ*ݦZAri r@0ܨ27XqOA<uٮ ?7 {Im| aALnuh|NDGv>u pwu2L(:wHp)=pC00HpR%B9cf鿩p픷x FXu׌* u_`{ޏ:į&}0ߓM_F+۟]{=2 |^7O6}3js.AY$KV7To}I(^Hr #Yi(x}]tWrBtwkͨ/vm&Ԯov׿N(ȩ@ ݭz e !DKv;g O\.?x!b~5]wn*\/#=9??! 4%Bz>>·s'1rޤStᴊ_2YzU8 TkGoLw%rjr?8%t2(f v0b64Be>CKFtc,8.x-XP%Xt =cZU@]qj*K,zN%PU1p#.zbЁ̵Q"3ꩆLA1*Ȕc*;^`K0ȃuZ1t>aGT@'*.\TQzhw}LHEC(w'ڟOfq!!RX[o03LbvC:Zv!6qhN{XPpYCmWqeD x 0.5@:_X2pE̴|;m|Wێ݅ ]IJvj}geu/iCƤa6_8$\T4܂ss M[EPj@h1sk*>ZUh'{-Z^٤SAէ[`[΍.=U|+?y{yĊc, a{s1WbdOB:=Awٵ>F<Z8T8O%TyƊ63E|m yrs89j;_LH 1֤#C8m0Dwn^qB0AҎGUd~::n_S@Ū5`l%NաTҘw q&Khpyј'F['7L@͙>~0SPI3Pj׸>cd]d-J)AUͶzU7oxП/t; '.v֧:?߷ӯsKջPURqg@:ƽwo=$l endstream endobj 465 0 obj << /Filter /FlateDecode /Length 2206 >> stream xXݏ??hQ譫rFb qZąQ}OP-ҹ0o~Ͼggfgg~*([W6zu_W}]-H0'j VÕ_V\h8_ej=^#k^҂|\P°’S\7BHZF~RkPFP[{X0N^'@PRO}oRᥢ)aOl d["{Ymkȩ3]XK%߽yްB{(hQhC5 >K%#8ߐWzNA7LP%-GHQ'Dѐ!RSXԸeھ Sw2U+A/ `taHuny%W@O͑܁SH{O%} 2a U֔H[* 9.卤ɀF.2uc#΁h&9=/zn cc[=\zp^5&(-$u]bK}P wz }0tlo/BA(9c)3pQ@S6'_] L3ݾux\X4qhIVҎTpu>m;l&yw4%r/CKFW@_ԟO׏o6]{!jhNwG$; _O"X>ȷR3zd{mv.|\qrԝe?C&,#\uK&jgd緺_F/^X>ꑅ%5QTyEf gp]6c"-UҦuzI9TJ8TKPȉTEd_GVoZ mN?͹vuCn V\ؚ>o*WzwxyC["qhdʠ}?`|]Se< $MpR}}wG$e.'fh>KБŞPYbep[KVN9UIT+5rڰhb{yڻlV'33>'1Orz)oț$̮~?Ŵ.&Z N\7*R$Q_X;ݨPYu)|"]P3zs-pl #!=9ĚO28fpL2tQ`~SpPb"eI ^D2zFNyNۃ6)2K\+@nph;RDO{OS1KFS_a_MA* 1w6z;AXqD9u^ q= oV7VWq-c0$u84u+E'وendstream endobj 466 0 obj << /Filter /FlateDecode /Length 3207 >> stream xZIoY\C7W) c8H# Zjɴ{Iʲ !=BVlɎdի|o-~X]/=V\\}8"w]VO [Z8<[Ѕ\.NG[XLVŽSzY䚖l .HtSuGF[^8E)yQe+劙ʒd eȦ (KCne%W'ϝ@:HɢT^̅2,>^;_]T)ϲ)!A F i,=V+@bx0Jy:'#.EP0ZRw~D6RЙ&MЌi>Ov%oIʉq%{OV8)}ߤmJX@sˌf9!*PlYD@;,%R:(I @{h61Ŕb:B '/NjhPIS0U-/E R&}\c3]%.VPLV +E ++r i ۊd( ΐvL2q;hm##g6DÓ]@\- '6ex%|,Z@sMnہ_ܯuIL٦8+sqP+0c=&>jpֹ7&XQI4Yqz]YMP@h3UF[ϢPm`>K'oRII|9ͧY{Gz" 8ณdW#m;$?E^43F^ A?OA" HYFJSeI&3w_x嘥l[& U7uG"{UW2 Ȃ)}orSSzGT6tf6i[d!/Zr-k| /f{F*Be?; ij.zS JW"{%EꬴPR`[eEc@cCwBR[jIvpf~T䯿<3("]ݭmj "?zne֝H4s dejh-H &€ `K*֩.>׻=FWڮ޺]5>Q×\zx|6 f՟Vē7UEu$8&z`Fj`ѣG=r>[og$N hʞД Aqe*A@ҋ#ZIWHA{:Zxl8Ԋ<FÁ K!=amE%,!=5Jn@GG~3Pp2'ŊEBGa(PT 36Lȭ6[cSxHg1aG4J r Z˛1n<5IGٯӺ<8tmfPD+ϓY &g)_1} pgCpʔ2GL%&dfS47m6%gd6'Ȩ8I5SlR`xZz#kS#T0DEIs0xNzv?y&zyO7Wٺ448|RcUX ]R FלdY\Ϧ?1!j}?aɤsKmxușs 3KVsb&T%Bwn}1el5K e0P1?P-۸hBi >ԍOF#ٿd- !4ACiXi*s y6+㨹OOGO08wő&D*NF̎s߶z@m` ]|'U_'ج*Р#D{fQNhӑqnȓ=5Do]4}'~G? hIWvz~F$9{,xoD^!6rnr\ZvˇV ?ĭxd-mBIŮjP} @H 50faäͻK/( 3r) |{i~Thfai#0%~}4ޓ7뮚1~wuBX6.BRWZo9 ]:L |Ǵ-$^fQ\A/6.Q9Me~s {WKGkݤܭZcpG n7#Z]lK. {8"~Qį.]dI߂1֛r$0hzӚ} J`KY?rƬt1/}HR_c+l4KX~W ܍BdogF1nj]9+scn8!o\ԗc鱀<~ cF> stream x}]%q~l̋23˰ Ȃd[Ze3;!ws"VuW!lߘvM6/Żg۫>Uw(.mW3^~g>G8zzzkה׿ۮ[-=m.(}{>/o^bs߽㺥6/_wcP6.>|~͖.zݚs'|m#.s( vTW,^<dSN:tw(o>aK>_^)U^y;_=y^}]S_}Swi__>K򒼅$_{H0)yB$i$(ukM![]s?KE95SRҵ\S^$է,%JzQƵİy$hgrt$p4_KZI0=phkq}H}H^Ktajj6{FS~̫Րhpv,ɾ<%I5B^4A׬v-,Q}S2)j/i߮5jK=[Į-*\D辊$M]}[$|l3Jŗ xb]0]UI]7D-. 3u5ӕB$F7w+= UHyՀvQwU~ uub6gn9̼*Iv{l~yMIv.IΒpASR-DMFIUE)s.!iM¹wIz1$(jS]!()+deIrr򠤪|)dC)iuHfCꒌqdp:!*lNHLLIwIz $\U^/hr\!I&7$ ?4%TLuU,J뫫%Q! :ÑA %T]&TEIǫ$E*ة7$9i1MۡppP2c:{lڶה AdbVI❎eOTڈ=4pm4ׄd5O"<5qTBDs+Ĉ$ErOHIVW{P_YiUi.G\U58.T4.rǩ 7K3|5꺯`}o@Ǯ3S'84O$i&];h\6Rd5)VòD:MJ&6}\ggk5:%I謫i> lZtW;[5.naX41g2XͻR gmhț7 oocܼYrl׼ʄNQּN]Yo4uW txP_O|F`9mQpS7qEi@aeUap @GIӠ•tڂtVU($i.:>tTk>C50X.8a)!rRH͕!hK4W}7+H*׀B*U_VWcz溏[BMLZ$!WcB:/n!x@Gs MCh>՛t|k huzjKz͟\b'U\ iPcduf!n4͙b#69.)XTOCXzfb:0Jρ: nL-Rw:wEڽR (Aϼf|&s+=QN%KRWg梤ě'j X3aIJSəGMݽ;@Nӷ\tU sJQN|ζtN0]`E?jo!vBGpuz!sI[B7}9K{s_ eY&s Ag9|QY;]N,xˀqvuW>.k]+glޟCV1Pu"w|Nm6gL9tAsc+W~ kW{?bVG%3Ow=#WƂQݵ_B\ A s78 8q)q CCbvIK'}CŮdt|8zbW9|a@#N h{4h9boW>hZ88)q\w|ǔ±ѡ&"{SͷMK%6zpC8U/Ɏb}JT+= .lP}!Զ3}buŞ@AB e \PcцA! c3> y[G}N"崅xbn{9/O<]!iP4dv=LfxHq\Pk*0s:+Xs |<>Z !c 8`H98̱8*'*gG ѢS`1&ףּV%R(S(qF?\kkpKbK W+- ['X\XH&KѮ4*QGc_7tiVTF/0ǶPU`vHҸ,!H} .[AB ?L;{|*,j;pyVYCTj~x687e[9BmpSʖ6r0kg -ުK Wc~ګ}C{wZU׵>p@u\ СkP̉3$pIPAn_ ]O,-NNGLgm~_aI<BCc[!c. in 6p[Ws9<210OIr΍h5 eJtdQ_W ֝L4.4kc K͡wza2΁x(C70Z( *c3]ml q6alHlk L|D:9&,sC  Q%m/I,G, 4;㘅+ySa/n[5Ix~EMhHpZFOҪA۲( 1a }r*b TC88*of1C@l":TU9J]dKue!M׀n5Ÿ~X6qW'XVsZQZJ&sh aRbaۮbh4lqI]I[4l۞(q [6t$s͵F30&[JAҍ xb[;V?27]못rTl%2UHFH*ej:kܞ6Mgk|r:k_h:wi:z*7Mccd]ԏƀHZt*Xs zx]bVKkI`R$gE P ⒬-k#DS`YuSc6K -oMͦ| 8l]oȥH]R8eN8ϻ,Ls]bkl:E)u0f֔;&&B}ԡ3A 0 +@VTRkV]3iFPGqmZBE)0O]z3i1I T[/A )1N~歍2 @8vV1_UK8zKITUN֤'J5ɧ m?`6`S/2#nscMg+8lUРjلmO.!^aCbafZIEd!D]!@ "V:e$v:Zp)L,po:j34Gxbl F%6UZɦj{ʜ*#gԲ$٫Xت0~MUgB5[ 5?X93 !>L33-f'-}2Z풸Y? |Rpi66A\ ,[\䉱4CD#.`0K {iWi1tdc0T+Dzx(tY­~x($ltOjL]e̓ %Jq) c3[IVe4o(W; A@@xK򬦹zc0KJXp-U9zʷew`(:q1sc0KJ 1yVg0KJ(ܳ0OUN7t1r EOuN7͙IdW]MnrId6 ۶6ڨr-F7/&-PlͳPO5Y7s.ɡnRt7((0G].5 QlP/3d*Jە ]W3$ Cz qVuE(iPXTy'"p#u^<"& ""ท!۷1C}B2C(?C"O!p2' Xk㦈((+piMtF]AQQ ma ?p=B4up{\3`AP,1ݾWܴ7)PTՉa 5wHBr0A1βF3DNn z>Mjq Rk]}U*XC"h1!T}e"EFnfXn +D‚ :bC~6Њ6 Hּ19+hY!U=77TJN&/kvDy S)A AWqagfTjbfHsGJR>d<CBu#3!uuSv%[<493RC8쎺fؙ Ts<%BRV|fnHDEe9թEs!SXiB GL)OIl@ 3dH"3CXYU!}ԀiL x*;-!):2%R`%2 {bݔL aRWZ8!E>/$#Ũ/ Rӯ-~H?05Ja 'EeHԐHdjH$&aۺ]ۺ1EvI ؖؼs*|8$i2e309JR%fqߩlH:M1FLLQP4CLrxR1%EEHpFrE.U aF憬)TVerHzIga**$-ީB7,wZL])0!meO%l G≢ nL ) 5feeD꜖1SC"Oh!'̐!h+N}k !-QVr,cfDRFFAFՄHXE۶H(X3CXl/+XPk;lgrH|ʗ䐕5UZrW#T*a\3V`rH$ RzE9NsT5S^dDW#!C?%l32Cz=dfdaa/D#an i$Đ 19s831ds𖁜K0132CVq YKR TyHL YyU:,L̐rHθ[ KBW-;35Ҽ3SC Y2cB#<#5dCy =Iʞ!퐽bgIm%gd~mϿenH=g̳nQNG2sgQ`(7:v=^SAY1,1y+*¥%9&oAg #n $Y d+1^!3%_1DdhS7x(c&Vǃחmy'놛jIZ q{jܞ(ܭ]rxI+=]t4nth<}?//ed$K?S۟vcMq( .mKO*CkC!8=dPNAc3}z$r{з4tdͣC֤S5b|a\?ZKcs0ҔK %$aPK /0DTAa?yͷwrq~ ;6X0P&*HͫoWo~wX=ƼyO'ww*i3ɎdifVKvxQQq$/sЕ\ϒ\nϒtC'ΒEZq#Tm CUKL yͶxt }'xPoQ7zI=^#Aq >60p g p[@OH#VhrhrKI`cGaq2\H>54-'5f UD;kX.Z# DZRk@ Aܳ2檪j8FA@gP8{T-B)F, *"!^uv pYt-lp#9je-(;*g-+Y$uM*bkUf"5P4mF!nt z5fay$5M8}Z@ !FqA3@0Rhh߶@hmk)8 2L&.M'N~J(/xq"]lv`60) Τr,r"EdzXR4#'-VໃG<3 H:rj4H1P!rc@ cG[ztU>. @t*Y7UR*oA b$I"5P=|8 ˇgEɢ>3.QJ j2LP3? - @6Lr6L P#34Tw_t~*i3+LXM+ɍV? Dn-.^Ô 4(U~g*F[HHLOM-G\9qצSY1r e=5#w~ƪ|}gE_sA2D>ZADb6Oe3$@ ѦL%Ńe8Zbȁ*Hj.=P7q$L;Q$))LY¤3R/M~bR pt!Hv ̈&&KYuY-b|-s/\I0+XZ=)D/eAİQR 8(+~;Z1ݛK;r\ /h3LcɁ f${V->"GJ鷂p?Y UCQuʹ8s}l}+ \,RSPTE4FB,a#̌%3T1K*"e9m $PTS`\)`(DQLPŒ}vT Xķs)2D!sE"班КBH! 3k6ӤYlf#IQ,)DvHg"T?ͬ-Lk{´*gFdDF~k#5,:~*BV^Id`=qIi̊!a 5 bXf ^4/EL:p_\KIi8 %9"[XW%蛘IpfUek)ߖ˵rZDYCe۪HEnLF@dU" R[D\8]1d慱;㑩h7RVa&(RF,{2?U:k^RV>U?!S<⥡,)LZfRE̟:Ҫ E:r)?w"Սuj b;Thuk|ǰ[*⛶3$Ey>h7L. *Qɷvc+_P"M!Zf?,>ELN*PL_E2d9LD3E"_*I1} F}ت)щ\֚T6ҿr"EH ?ǣԌ7(5:"Ie0OȘ(I1V}FjF EϟS"ei)@fO)Fo3J1&D4?2EA 5X3)|}ۘ)Aҵ ެX 1DHDH1Qd1YSzٍAڶ>^ [N5,~mfm[0=1sE" I*"AEd GW1Ս.ZZ0f,)mzfDn&+ۑ/Ȑ-b%+D|A'r%lHR"EUzDrF"L I HcE,pYӧoAio9^Z0gM-YΘ0yy=ꃾD=Mg鑨'5uo*na[Ҭ9YតꖯhlX2L.MueBSF"19r@!eDelVc.SV٬5Tn?ƔiƔA{J_ͺ1XWnzPc 7AcjFǘ.2 dޒj"[C,H\cH$ժ)s 5fC֯1]d2 "+86l|b5g=Xza[lecH=$7[-ںH[nT1/f,ʞxfD:M[uy#nI֕Ox6bt޲Í# rcȑǘ1O|K7B9X7N~rx1wǷ{J%[/eO_񥭻Nl'Iɀ17%v=rCf:K8oLz "N^&1)_$~̠Z.H,)җ v@y'0$htJeq>Kbu4;t}(d05tl2b$=g01krmq8nP]r{e~dIO=iű]?y <ݽmF"IB?ӥ]n*ShI0{+Ùxa⡟D]Chވgv'C/CCl͏@ELXۇ~sƟ7w /?bT4xj .]||. z_oSE[ԁ/]?{_Hn /_e9(9x>KܻV[_~k< OS:ُ_I^u8L*f{]~.}f 9~_F/UzY?K\˷6ݷvW.ׅwFXcpuux ovQQzy\ԛH_q}>8shx>asq:@?^xe_ޡ8ikŷG]F}0ۥxȇ'Ru7wwc6ڐړYtzߝzx2h"k|]?Q/mNp݋Ӎ=?1/VP7UA]/7{ߧU{ c/4R`XJa kMF|yÂx>"]8e"e7;3/wqA91^0?7kW޽~PCՅZ(>nۿ30wT{ ?}XsN~jafCoK-Dal9ZA/ۥ\Ԧ|ηJEg\m "C|92b@hK58j|),5kxb`oy4s$tӠ6/lbϗQ|zk&y`7fusC-M /޿CZ{Fl!lsDr>Sh=V =@@9aƉ?};˞uV^!u?gHTHƿ|`j'͓)gmzQEFWˮYdCL[O1ŏzƝTO[Gƽ~i釯xM7RP3 vx(3i&=N+O|:i86ߜf/h뉟e_y*gV=`*s3&}<94'jԎ ?t,}Q|b? jf̹]7 MظJ~b§~'/,+GuЏ^;/ϧclޣ9Zͧq~P? :UAaa7,tb&Cֿztv^~|r˗g? jd;_6T^'丩{s黎Fm0yUQJ|X8IޞϚ7hO}[{r@iY8_~l9N[}8.ShO;[{!~I\cq.5/ܝVvmu-;ۏC|}$!&/3"lnIyzwUp՛xvϧak=ZC!"]R{}qCpRS+AZx=|#ΘqCqb  .o֮^ʳ}ג ]ySқ=Wn/Bb,4?~R{e8?&3fo^JxU?;^ NtiLbB$#OˏOO,\^_^A hD5߯^3\?QXnbZmOۗzw<~DۓնS{Ӻ~_c1+pI3$#{bOd34b'g.=̊uljxl`=L!1G,M5FG>c'bDGK#w OYOmjfnOj|vp6ܟ,/w@hU畱tSn9Nۄ^:O]=qmsx  /nkve/؟2!  7tá?էf"W2%S{VZoJ:'Cͧ?u+.-_HCO^B}-/xQ ^5O_ب^uY86_PPOFEpj螅?g9XO=? {icY]Su'o#^T|?Dl8 s`.3+~b%tED&nendstream endobj 468 0 obj << /Filter /FlateDecode /Length 4759 >> stream x[oG恏Y^e,IsFΈRBȇS]u)K 꺝Q&{_//*?]?]E4iJju_UV1_ :^^v]ɏJ'qusf4N5a~4vһi4l/qvxI{\8NʧF4GTH/qawJ^ ;[^P8y^G<%,z\YնdGVZi5p]R/.hLCZ0R6iG:lW2քo0?݄Mx7wOz5$ 4*%PRBM &Kl$Q,za'Ƙ$v'F$M#,ҫIVMv hʟ*Ϙ@+OG87uo)u`Q JiӃɼ:;̬[d%eӵ6J FhMkhR55h}t:d6m`:#VOz ^ya8WmĒ٦&aڤTC[="JsmaIx; VpSm}ّuma>x>K昡TF]NΈ B1L[0,FΪP{>C 5+TauՑFfaC7I 6SS2iB|]5:`0t喳t΢y5I9cJ2CT 4A|i 筚rέ6໲Y ڐ=vf19)h=)95MNr%6K u6K(~Ǫ*ŧht4l.FE5:Qts\~ ~$?gg}wJ~fDT:~zN/m)~)U% |.|fI|.,3=c%`\g4ĝ:|ձQc7i%b#62aWжho9+4px%ЇsOg@@oRhkI"u4όhEu8[UE89z uSMw/VHpm0Zf!=#}5]j g'[^D?q E5:@0u=>{Emƿw_Dy":hc]&Y`[ܝ<? 4mzOKᨰBp3?[ْ'fȥnocO*-e kǿu pww'x:2Rh AßDʝ<2%=|p^1_now\+z-ϖ7bMg:m pEF%y}<$T64)95!1pd 7[oCE~X/4,QݪA3q1r>iow݊t ݊:㴹>bEM܎XЋ5V]/w7w`Dsxm_<椨”fNÕ QTInJ5*t˿{9B]_x@w;;x?U/ZpN} Hx|ڮOw0M~endstream endobj 469 0 obj << /Filter /FlateDecode /Length 4206 >> stream x[[P4BH$! h7u&XvQ BJFdoX]DRӰs^]3{5[I4~*eb $#^B5F,Ru]y⏊CI[EjIKwK{}F߶wXoYЫq5[Dy_`mLYNVV}ݎVZit|x6 TtIju$[Qt!9=E q'.5׾%@@m`63.z]_֌^mZYnҬۥS|iG!L _7ҸUqX6>Ss%8ޑ8$Wra`CV#`JﶧL\*;z$mO$]趆Źj)$h ewwO~O X qGeѽE݃|R 7[swy?lj'0z³{:n c@y^ $j -sZX?F0#Y5Bg CJ^ NDV(+G!TVv bgIF50aF2b2J mc<mB/7X}mqО"&: V%-m㲚#)e{ן*&ԓNc$Nv] ^`'j[A J U3Q7Q]n;`IpEϟb Q"i` ZR<'Ϝ L=BF̣#N&(+$!BcȬЂürꦽF9w">YQ*m-i rҒB` {[GjMCx2I>Sz$ b6DkLh[Dvwm,#2TVA6Ь NA&qYיj{gyBa{ 9Im Mk6R w&kQ{HJ>|i+!-1# >;U6ohP{\$L%fUMPʓ=QRFaSi&/ձ.\ j`;,4=FzrĞ!m?lR9S|4m?l' 快yxg +)b_d#hDxeS:-HUj# ncA4RjO'%#@E/wP(|X π[> jbl:S DҀRj0 1$Z9k(^DqIp:/pOqe.Aihr{;+R6Kx[!7Ȳ/ }Uc77cfӽ}+0bK{YZZօÛLJ 5f4ʘA p؟BE&@kO+pa X'jr;ʸǸ4˺%ZAPBZa4o)GC~okppTUjޕB9t0yN(V;2慪hD;U èNz #q6R(t.뮣E@wC`O ^ZuVW $ Sh_v}ήo_B;8NZA9}lrӜiBDH_D[ #VD1)O`F-HwE'v2~hSbG8d ,_9( x% P"KjP_jކ@g[jAdY3A& o[>ڽɱヤ),bmCZuA,I=J Z.KAJ;,{NNn9A[H4^9 \FG<`77NhO5N:V`j'+@bVP~ܱcCe΃͸6nU"{nwDٿIfr}kr4<:sxޭ9q>C(>BK) dxt,5{k +-UB\_A ֨sQν`zi!4 Yv9);5+55lU%{S~- kB!.xyۗcWH>+8@\}kq`Uÿ; J)g!gf:NXγ:wQ3Aendstream endobj 470 0 obj << /Filter /FlateDecode /Length 2832 >> stream xZKΙ*T%D0%vVbTloNI `䚀g̐jGJ3= };/ >/_5vVof8?3—Ϯg, i .j[xg9j!\QR(l<K)UQZ>M0@k=αOc{<ٽHθx  bx_r//,tilj rJEq[{+n^a$@v֓j܊y}`ё$kv7uw"sn. n8#³~IV<ze!(e"|À0yC,X5 ֋( }, i)sU],-鎐D:Z VglGF=3h0%ėwIUm0R5C$80Rz8e,B1/ 7̓Gҕ XjC1f1Z9>*?rQvl s_f$-L(XfXif_Ιs&@œ J cr=0d(mh#5 6.A#$ (?[4 8l"],RZomuIbQ1C%&Jt'PAg?gئ Dʰ-F8T e,nSdr˰ ڻҜlO,y1exbVXB_觙 ?@d[jؼC9*SuMi{$"&>ԬF:'ϾhoXҊ2ff#_ >I+YՒ<(5м'UǏO"q$B&͢Yc\.&pjД@LlO}-S#؅fYT|-m^V,ŔoomK^@wh)$p8(P7:z&/8NkHP:6-0:R[wU(\b{ `w2F [9gcǠ{A9"44BџRSθCv cǴAiDžLfU6S3F]͛H̳_F mzntR1ނ#x \snR3 e =z~@ *J1.=d5^a+ns`vN8х:VP4ZH9BeFJc7jıџS*=gRA1S# 6@樼bB~AL"qei;eWbC҆$ACcD]Pf7"Uϲ˶~?CeNdDͷJ{5fia! ;D/ jGK/sF6bqcvJ;ddNv;Yf릿eT26wv=āiBʰAưDWy} [@g)a:! Xtvs)DQjbP0^$w@Ϋ1 }@4%` EU#Li !.k*X363~hx?ȕ '菺z{ns)S6#v orYZ7EFg9J@qGc=K^#pK,o꺫.ŔE%t" ##p4TH~ /<{#Fh׀[h|$Vjϕ9s4|~ }כbP㡉%bXTHn_n6P_ L2OnyTG1/.8"n xcz91p-Zg|jٶQUuqFQVUٔGa:ʮ .<-vHB\2tXzY X_ m@϶Wz۵4C+aƞBW*F-= {6j^7_~6c]}lDdC.D`&',grL߉lAg^eӇyUHe$ Mf6ntKOh#o?p `Fh``endstream endobj 471 0 obj << /Filter /FlateDecode /Length 2617 >> stream xY[o~_"[G銝%.Mn :އ"m2JRRF{ϙ3,"ss9ߦ`S7ɟ/:]M~0uYO *ulzy; lʅ.SLᄚ^'Ōۂ2~F aԑMͅ5\D׳9 ) (H'/@ju('i{˿."=™."@l:@5} wr{ԋ\q :r,oojm_PcMLJ:XϷj]SErm⟪ȯ K/y6f"7e{]fs"sVpgsiF 3JdGel TN?zV$ւ)MxcNk\>}L/47z CZenB8o@A YVW%U&JP4 '9|*( rɅ$%HdgU5(z")m$٦󇌡 !ܦ8ӬSVO0CC:ew7d954z֛ q+" 74fVVV"D%bOɃTO+K+prVLv]:&dR`YBs_!52ivpɗ}B9#,YCUd76o IMGcɘ 7':Q6~= ^R؜8`XEDJglGh2gZ'!GӚgc8  }%'Ms/{Pː!`\RDjۿCܻM<io^Xr8^pG 'sD+'ZlVq\pwl~(p?qOg>eimC ɮ 9pcw=*=܃e"8{}wkjzwxUIWgx]@<~@MWԤ~.ۇζq4hx?Jm._$7p Ï ~)v6$$)hE)*Sk #5UeomY>V_ye @mr].\'i61\H`%Kuͨ_ &:hb^1D_oLv ӻB.DyKO^f7Q$ u,orp)e@uV}gJ"8|y^>Fm\]whԡPZūf )u}Q01M3z??} #=f^0rB{'MN?׉eyt= M=y#tABb+hQ3v fP7{P-c&ق3֍5Gˋ_ϣ-썝w|6GᲄwNDx{=endstream endobj 472 0 obj << /Filter /FlateDecode /Length 2625 >> stream xYݏܶ!AKW7Š)~H s/E(8+ѮI˥nC"Z_ݢJpf8ߐhV^Wtu{s_W5(),lu}S٪d+LaZ]`0*Mu׻wkZP% BmS ;X_oOɰ/veAd d.IQPZzxY,Ki7t8p}Ndy"$|1Q6E1eР4k_$@}: >*ϕj E$]R&$ΰ~5 E>j#HRd8O>urݳ15 {4<9''[A%yX!/"P C a9=)[\ 6ujIn fP\8φM=R/M }Xa6c8uJNq DqK~m,ڠ:mqNlO_]NmdẫSZaj,&]d!MnrI06Y)VD.%WF*ˤ>J/lCo:+^k vU&Ph> ww>KN<940 7J|+QnV:͹DwTYWmqX9e&9.!8g`U`!'+ #9rF^ZsVr| (/B,D$'~.Llhq5c!1Ȍ1yJ;,( sP2Kt!lA&m/@o&]PoghLp,N/1"DO!v3$#F ^\_}hԪ3iMa4;:X ("zEV)',a#P[H_ޮ `#JX*q3~#9ioB({ {\@`ޝWpԭnXNEUլ )!gt? ۹$)KNչq,re&:V9Z愠~jR>$0\ E>+Á&h`UuixCslC`;`p.r0ɳq$k@=;y8SS*]dQ|`S">#lZ *ZWN^j.ΧYfsPxtV+#8((<9Wx _#t0ғqsD#<<$\pa|MeU_7HH?'hڞǕ/cD 9˷FGml 9RaKe,ðU+v@:9 Gp}3v jI.12m'p)%dۚCuyvEe?S~<{+@W'.@0NS3L-QI:tsw/x,ζ$ϰ*jW-f%hr\Qp=ixV K]貌= v@t,i+_|$mC3 sYhһ$Ϲ?4^Mҭx *PL_du*:?NfG~WB7}`5q^+ta$çC*Z v: g/, SmW9"?c?ltFOۺ'bN]hNxW ՗ R\Yyۧsz0Qx՗rj*%,`þ#n#-˼t,F)58wtPA[iG}vUp3V Qendstream endobj 473 0 obj << /Filter /FlateDecode /Length 3108 >> stream xZY~߿PnTN {\IA\(i .!!]!1(#(E4s 4Haj`x].&o)ArL'e_:+vgّTJR\9(75Ct+`>#GkP/!D E=ge})!>8Q=eaЬ <M\=D1-1uOa fs5g]=be@ʈ)F)Bvq*^P$cַ/^A_g%蘝Ne=&NRvn1NQb 6LIh`UaQAD2Vf\ZS8ܨQ 6.T4 u< Ӿ*NsT[)XB*K]aKfm92\ 0Ң Z[^X*>)uMr{tykn=Vn^vEu<{F~\zYJS]ltwu*Es {0>,>>  QPVL#{He++3SILĻ (&)OpB&0lE"\ʋ&kAuXhx0yϬC#&h1|.(HG1uP)G oo*zptLϊ4cuFd|JKShp®}8yP?jrJ=֮ulv}5J&(%kn:ܿUU;,G"i+\ }'ϋiGy= 7SC}G+6/lNx =x|핰}1$ך0XW_|B>F_+ԝp}x֤ F6J]dm@psmai{"ߟvPMa4R?=: p{cggџ9a⟅kQB<ܖqhMSWOW~"JQ"[Pl\u(vNo~E 8\A- 9[pd{[έ!vmռpm*Ajc8M3Y^BدDg{wҭl}iy&d 8/92h$YtX&o1}2Bs&QQkڽJ3TP J;c`g.l宅Hbx=0\sofzdyݣ_űQL.2>))w]ZNr"a .V7#feM7_H A"18<CdpJfTxų)ʍP&rۨ*Ϛ`.Vm ?H%˱CtnYǼ|v׌К 7Q׸g 2κs#䳪]Um4w ο|L2&F`ؑ/8h<AYc~2:.2M]긑>yZV^6ݸ:|rc~)E4ܦ>ӌjUx` z4B> stream xcd`ab`dd N+64uIf!CO/nnC/}M1<%9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5P\0000103012_i eO_^Pw*ˋ T\)S^m. =wnw6/ i^O mӺvKc?{oi~}/.cA<_qsg.a][9\Ğ{z{&>&20gendstream endobj 475 0 obj << /Filter /FlateDecode /Length 10794 >> stream x}Ks%Gv׽;]8;,B$b^Z`M@1?yf@?q8b1AVV>;v ~w7_zWxww-B,m%׆1xJ;\{؏[¶~~~{yR>n-wWP&/?n!S뇛'tEzzg\_?cB={7]Wտz܎\Դc=b5\\u/^^eaamw,?֏a=mǞ/ Jt?uc$I[8BHcrIJ$B$ӰHf`O!)1fdl<]Uq S1vtL4I,iA ]U+%!񞏉.wx 4nHvZ'Hڱp,mb9&CtkhJ܏&۱Iϥ(ֈ$ik0DBBגEӠHz>#w\0M$WIp.N[I ,a$l|~L"HBw];HDj I6''NT՜3V+]բniSC8FD ZEYo-I0Qy VDRxjOfD]%kTD =zFj;]E:7VDUi-0Ϟe; Rh{HQVZ* ,0dkH`U74l^ lAi{[$ߠ)VjoǍvLEq?UXME`KKrOU<َ,kAX]`z,(AYEQ?T٨ɷ"+Ƚ[&+&f#&*Hk4,eTԋ쨓 Y Nj0K0;oDwfGm& K f[Uƒ 6 ۻZw̼-;V cP.$ɆH}BQ$M6f^ Dd$XCA(!d]l~RiBČfDL*X +3"7qPxhěr8bh]+ "[[p8)5m $UJSi.Ak+3&J_X0qo:oX'L\&L\ :&N ((('MxU2 GT{Ų, KKA6i9R9_$]UDKϘhOݩnUeK'&Z1.k!FDI!?Ff.k .)ZƈN%wyEȸyɮ<$loe'M7qsYiJ=jەʔFo4qvFŴǦV HWR\<8rl d+>M*å 8(,h˩̸#-6k R\VK IiZX}ѷH(3PX LP)80EE yp gI֟ )KJFC&VAQz\*vT4U , Oz'AB6//=GBYe!󠱐˲쉟`@nɞ77'DYY'LjZgZdn2Ҽ%S+~3djը>3dju2y_TRq2si;7j]3PTyն,*YZ̖0RҊ/}diE-}di'o'읷*OI`v&_#$8 !c ;J<0dLM@q3ȸi0qʀzM2 Y +y@c9Oȸhudܭ>|K= ` 08c/30V`Xbc[˭ˢX6fcƶ dlz2@ƦoM26udlZ~c n?90v;t`ّۼ! }Acw?E94v7]Ccwc:Nps6t<cwG0tO%bT[C9J-v%SBN AP2MJIQϳ(A;GF[8*B) $1]6n8q1%?PhB~nA8"vLUѴ2>6%U~n2v Is!Qvs!izF\ rK\UOI 99КE9&\脜=&\H);K;_%\x&gr.PEɹ,Yȹ<2QBhȹ4҆ɹz]r.|DBa3 )7$ոؒ*77<F[VGBEM /Q)\gBi*˙DB\61g7QZ.ŨB˅$)Uil+˅v)/w*/Æ'ֈ A0.r ExTZ-9}\%=,\Hآr!-$U˅kkFb.$Yvψ0˅GW˅3^n!d-2[6ݯriT-x171h:m5$[e>=?rԪVS+LHL\2Zuh9Ǖ"ZADC3qCv~ ԆV|fsx}iBؠmT4J|4RHG9MSlu2u-PϜgZN E5I!^édV/8FfUy6cگr γq WϳDP_~y6@DMn<$ZtlP(P<[a9V | ]g47{7e-HLpdϳAbZ /h/70L>/gq^&,=^^Yϕ:^feb--pa}'ǟ.Wi\-/pi ZɌ[2a]r7thrֶ妦7вjCOz'GQ<вql wH&xن3eI <fa,a2/Zx./g٦Exѱeۼ}4] / l4΀̪]k 2bȬ#f73Gn\1E;d6w~G_1r~2C1Gt^{VG} 1myxqCb`f)=8bCfqϝd[(K\XO}Vxc]в"-̧vVh$5'mȺ+ukbIBSn!! _)O/|K MhB݅םnzxSp8rʁ;I;Fc猸;TF1ՈDUBܝzVtw%NS4e4i%;2wm9+v-Qڮl ڮl-]6g Z$*]4 oqyE)J]Vە!.ۆIx&l]ؘIvXf.۪pi\#m0y$OXγmtD8Zdͅ ggЙ!a eP D sIn _hA["h𻻮vᛓwSޱvXvK΃[Sm22V olYol)o0ir"##ö[ TXLv̪-0V-~i\.Zjj咩)T$Sy+E-;kiOzHY崛I&3 =@\+9f`HV(i9,bᇜS+,RLT"伜3+e Jev5` ^^퍞ʬtGN #Rc/A WJ7wwI4 yC|i5jh?P"M_Leu&$(UW`k, اnmؓO^<^2D1X᰾wXǦ'Xǩֳjy'&XA}^>ӊꋟ3T_tfT8̨^OM` '&TOA}.^M^ |/O_ [POA}M?p?zdn 'L/G&L/00ZƄtL6ab'ӋN^my ֛O` Xog&\o/p\onulWq}?s7<@ͽ+7?@ň-8P3Pro+Ϟ;``ˌq2cL5rQn э4 ߸CA iPe71R%5g B1-;|cdQ 1\/32騗XeF\/"1̒Rwy[}F8.A%s,̈́cp,t0PV.F ]DžUW1[kE p2*=CpC <`3Kn̞L1*ߘk7LBIz pYD7f7o9(k)ݘ6Ӎ9)XVҍ9 S#xH8jtE_%:,tB7(tiJ7&*ti-n<ҍǒV~Kli[mFzzIp\p}3fp=y+O pvq}H:pc/+p\o"Ak(bz}%g={>`,cGfdڊNɑd'^rFu:IȸNN~;C^rKFdgD3o 6Lr39Ifh/&q@{SڋZO^4@{͂+6&K^u@{gnڛSƀ[ ڻ w76:7ݪC{w=;7ޝ"{pdđGOˑ}=&HNkg+:tiJuHʇWχ|7YP?3Si OB.?FO?c˷ Ԟ1%4{zF?3*S㟧)n\fts^^*x{fj,X'2ێzY/iL9Glщp`9/8#},7!ʽ F7"f{]J˸w7'J .;st}$[~%]x#bO:QoΖF=Wս~QCQi}sw1=lX{v,o34j6!C?S?{xKR[ЁTz²&~z^oaFJ'[[)1ww%<RC^j_Oo+56@6#+wUU(.' i1ߠ~R>Q'yX#u)XT>j#fʴOĖa޻ћԀyj.?EaeAR W( ϖDA`K^ J˾'p"u~r^lp d:R 㩯H^{w[Xwku`7o%ZtEǶãEKgo7p?9OREi2K2"S K)f.Q~EFX܄{<Su.w}X"-[O7/hs@FV;ʙ )ɻt؂q4hZ /rC%C=B/Sw?^fPt&̈5w "3#/zeq5m Q[L,@EAOL-o=^6!_4^!fUA]a6-1 \onV;R\# 3<,;:M6_>V<Ȑ+|Rÿa r/LXEl <*oŹ;_ 6#VwR:.;??^ VSOg><=qڐ;sk͋6=Ca,8.Tb+"^*^!nY,/Wp:O~{GE^[JF{ǹݛk9HB1m=}pn/=KgKOߎk󳌍ϗ] Ta@Em< (W\?K.~A?7tQ~TG!֤Id p腒+Z~&mJgi_Vz; ,1~oOx'I,k~%lo$.A*yw4j&0<͞}t?N'=7/{3jHT! `d'1ߒ =Qv,DɞJ܌P[U|&~b@t˭[blxOvJrK!:I.{(})'80oa/{ӷ.OTz ۬te,[(kgTXCFZ<]|alx,Zj΄)v}Zt‹:r1ltJ4y-,9 -\}oy}r5mOט qBi`~7|̠#{_-g٣*^A=|sJb |Uȗy953[zS[β"0}a=?x=8 'm{~x!G^Sij8N12HtoSf|\ ^fL1% u2j%HDuendstream endobj 476 0 obj << /Filter /FlateDecode /Length 3099 >> stream xZY~''kq 2˛A#KMc5jw_SLp+%2x3Uu~UwSZ)b;aBɻ _b;%T:6lj(S9 tvoX̸*VS]1`r|s;U0vao7yGVR.[+$n?s[QYfK`/jK6 *J-ko/=?^Ҫ::sJ[|Q`ׁˎ IVqYo mԛYM!\p82p.Te^}5+.QdC} yquSպmWހ\;6 Ժ[}f/8LTJ:>|9kٜ Ywzq)#^ͮf RɅ_iv Y<ËAvnղz^D`Xqv;AD:@Z2,yԇlGА"⁾xv]UR)梕,+(>ʔ5ſ<UYswl |XZˁf $ۑݾW985~!<*G@Vq &]nq%\-rJ F\E*9:auH-Q2CG{ P$*I<-'7@F+s6bJAm}]2r}[ +rٰsؘVq/UJo4D˲ZjOz,TS=~@1KRtPK\~Hhȕ!TR$9ѕ UP${験>>Z3#̰QؔqI4EA1xr`0RL7H0'7*x Tt}K.{r#=CyM{- _$g',G8chD:eYx?S szhr;x/ƧoJ2FC!}dw4j dj6 ;o"ZshlѾɿ2I8O9?b p嫿mvU1?`i^E\E@Kعpgӭ)@V[ - P4Ip4ށV 4E:ً{싈j QPVw4Op4|K_9C5PֆyC4F8%m8*Aewf-KT*xG13 w_ NBZ*1(p0/MKch)g|6c|bئ ҥM('͟*@fĈSJ1C95jϸ FRN hD Dy^|W&T'&0Ctg+LqA Ρ&Nv]Ģ9>\*9pP$O{H7F=5PjkPú56s2k\xV`-<>tXOK36_Jf'\ S^A[_h i9d1bcNh_UY&^7haZIJ1+H&5cXc'c%YN#[i<`ojA$7M&^qTte4y =Ҹ.r:ui ҈QQ߳ۏO7 (H _Jc] Z bYWסOqF9;bTehU%Jpyo`!XB1fyW3`}K (7A18;= }\I HJQ}ו!op E(WԀJ3N^q(C\ào!w_2Q~/zUѐv)˝Է'8ɚMO@Rvʩŧۉ02l&?S*.^* "%ަ8QףN3uXt _!30!!cx/> z'A-9zmFl%H"EGBPC Nr$s>|) 18X„;,9ߒY$ϻ]%:mSDHB|P6bǼAp2,b" "*$Q"|}πo6Xl뭇l\+&p&0^Rc5t'qMx#[A֟O8&w/1n|zM r58/+%iArJ揆:r%<we?6z6Im_rOgva(wX2+ i]N8,t"n=wfRB]gƠcԾf#b1h+E'7bDOwExvZk>M ؏›z;TVpzYE>XB? Lth(<|!"nOHo+:xS\ |x.ȫOS9͇s)CƱt_D\^FMA G̽o`̏ۃ2,5 I+gnȃcŽl? AI:ӝ19#9瞞q,l[Lm[*e^f3E[4w6)3)2-E} "˄@*]|#0MhF :9A3I8$JCc0!(jO { F"ʼݩDj0@.ƃfX)3EEgϋmҲ/rփV ~_srendstream endobj 477 0 obj << /Filter /FlateDecode /Length 3193 >> stream xZߏܶ~!iqo^U)hF pֹt{gڕ-vo̐HI{vJp8qύ3~?x病/ۋw^v˿]+MV%pS嗅.R˫bs̅NF&㢄WٳMZ6aҬ hpTxfriU 'g^b@V} 8`w\JB\Umǣ2Ek~r?CkqfPKg>}Sل𔆤:qT %I eS)Jx҂6('9`X4 @8Y2*nx {TY}t ;&ᢊFq܇Ҫ%pB!b߂aLۥE"L`PMcg\N\`_?L&sy6r9gg~O"Qv%x2-a``4WXi2Wgc ]!deiR{# 4eq7, !'I[ ASj sј9k5R.S\t3(LBI :}4WWْA>{^ݏʼnAAm'<ܫ*Ip+ kc5PQ8K狋?q$ 2HrgWQ[̎R.,n]ۡ2"\Aa'FT ` %fgn눐6AefjN s< }] RD7e9EtW+JySaJH@dpS£(AvJd,f$O34ޓ0([΃ ˰=PY ϺT!下,1MD%Qu;C<7  g J0A̙x)2R2$C5$2$>]j)o(R*{ C#0`Kp$4_As $1XN[&a.]{ %J9^!@ fIL!prOjM Λ?+q;ߙb3bM3ŞϓSr:cӞ5JjH<<C\aʁ%a =KM692E(cBUjDנ)6 r݅D۴1kLm @{`sgՍ8BH,4E#Et ̩d*Tjq )NKY-)H0s' PΏ]H2!dcC-T$FTcĽ).$E]J iOݽƜ0, 6.dtyeuL!4VZmaB~_;bqkQM7}O o,vx}J,̃$RظN_иFF-<$[?pWGq5SVd)]3kb?EކqL-ovB61۵k˵Wj}_j/40 +'|Te'agjy_6#<S{_3H?s2+hhxQ9Ih:jڒ^` 'R1t]^M}z3:C?%5D,ui;t52O tZjxmw(4s kʎ- ՛CؒJ:%^ mW]JKkvupw :HWuZjaV6mG ['}?ӜTPxLU{`~`EjnR!v}XCV6P8׉iNUV?nN fy[>J@E.Ɗ]䪲^Q{HƵw E84M;9<ŗPK *B}eVr(6k֢ۅU6]6F++C` 3^c.:jC\S~]r"秞"E(&Rv(j7E ~Ag!L3 d"ݟ+> ~Y4/Ai+C>MDoe"9ASi( ONzY! e*!wRީ~ KXOjz#<' 9dc!5Bg*rT=Z/VJCX.B@CPNKLsR;ٳpsSi Xn aM/)'~u0}R_ZJx'L$Q٠,>. ^?endstream endobj 478 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1734 >> stream xeT PSW~!KY y:{_2j Y[۩(uKUo!iDDdzR A`mYt+IgvƥSNTƺ}$ݝfν~B $e0FfMZb(r)B!Le3R GMhǓ$B(lѾU6+_VV5]teVYi0u2c2+=']4:rƨ,TJF,+ 7o߬ܔ.ۼ"P͇A$&KYU@_hx# DJl& ""-"~MdO|h". N$d%|,DrC~'IDDn(ĪY0 /.p}~E@ j sak4:V2= x>C%&^I$5SS&dFV|, > !%=b,N[pJ$B( ?xIqϯ m.o;ŷAjl}Ar65f5V:Z FhOK:0E[Gg-X$>z#K8+'U j~pIɹ˰(_qXH(óh1I)]Cc(Ua͚Y,]J-J\ >iβz`P:޽ۡA;GRFY 7';4P7TLYSo+}rw֑[Mp`~!b `Jy*hkNQIq(ETG_y C9~&-Ivn-4,N2zOj{@8^dad n -v#UIT;R:.w"v n(ǚ\~/WHɜ7_G@ 4^4Ý R*]ؖW8;$JG@ g'Pф \|uqO)<_}0j(M[Jۧ0-:X6ĴTv ڤ~T#  zgC#x.&am@생pkAo-#pajup8 ťcإKcG)*6<7 w:/ȒFa' 788p!*[=x|vASjR8qӧ^wM^Y)D^ UNn>4Z'W4̅5z)~pz*_ATToP$yl޻ /Gg(?Jy>~>cb‰q*49RTnH5lfuYXH%G5+ΐ,JǍ7c.,(9n> stream xYo!yX0 ACs1d{ծ||ε!)ܕl~0E9 lI/-]˻z(W0U8v%`/2jy[\w+n ('+ZP% vB0S]\ILiD%߅!e|[BV]GeVC9 V.HVXfAYh`-YK -r-XkOzSuX4Z嚉BIǑt_: 4PQJjͨ)K~}حPlf fyd`m`K1e+PĹa;a&P.:Ao/m7l49SH5}H" $`[=g71*t,-w1ZCP`xX-dKe]"0|( MnImW>8HHsOzUi dT]ԆfR {_m8Yj!h"dxVwm}Lߔ}yVYM89fDzuH}C. ZpTB/ @́G]`*'$y<2^>/{X) mWj if%HauC[gp*E^~ 9l`0݃@yTjG~cm?0uz8; IS%L\ܮM;Y{/ 欁CQm#g%7G=p.&ÅF4R;o%Ӌ0 u fJD4DdN)yR :y%r|8ISɥ%}7xAAƤsy| FDž)'8y-H݌gET$0Dt\kn &!Bt \pRffI*Uy&I}!&@edrMTpq"jR|*20qA2kmx=h?ZֻZ\Rj.^:I4=0YS#Iմ/b3/Bv2{xųibT)|BWk+.못}fKaMC \7MfuNOX1p& ďܴ$飚r(ZhQKh`.Zj'IV[?/~YC-ۧ;j ˖2 [ݧOaF鳯kzJf4mN0/S^0RɆ`e,g#\úK[!5L;& O':Px* ,do SB舳WCuBv zJvs,c%uK*(YF iuVGdP_|1_Ǻ'>|=? 1enX" $VX_ݨ/fpvi >*_'v$UVJy.5u2nG o"SpJW7`j(SHIAGLoF~Ag}9Ce˜46s8f (S bz*6+{{}St<"+$ Q҃z\i)`X[00vO; g}?VsXbJ|4XJR'pw:>F0OF/=i% 7)[7LT>{q>a`^fM(JiLSbtckw^ְ$Wa: `H}^.K}veNER(Ҧmvׅy|u(Q <*u( 盍.UA}2QϪq6ئЂ?O<WR;!1Wl uJaa]K:bey="ä2جa,|F v^ u-&8N>TmV J|M' +^=&׾

r_-vBMإ2^c]޼S/ة> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 481 /ID [<550730d5c956c20a151a77de87038909><5b9be343a534ccc80fc794e2095be4c0>] >> stream x햱/CQ{TiSSFA͋$$&Ab" IH,lb#l c7Ă{~?@_|{sr}I9:֍?klXB}9?c#Q٦yإ˱*(].fifbԺ#v\=:b [b!+{,|ƂMsPL^6ĬPSg}P8t&Ŧunq!g|q13\[epLGgY7mtif ^<4vǡ˖2N:!_$NUUc aIl/.šMtQc}h6< endstream endobj startxref 350917 %%EOF ordinal/inst/doc/clm_article.Rnw0000644000176200001440000030034413440713046016400 0ustar liggesusers% \documentclass[article]{article} % \documentclass[article]{jss} \documentclass[nojss]{jss} %% -- Latex packages and custom commands --------------------------------------- %% recommended packages \usepackage{thumbpdf,lmodern,amsmath,amssymb,bm,url} \usepackage{textcomp} \usepackage[utf8]{inputenc} %% another package (only for this demo article) \usepackage{framed} %% new custom commands \newcommand{\class}[1]{`\code{#1}'} \newcommand{\fct}[1]{\code{#1()}} %% For Sweave-based articles about R packages: %% need no \usepackage{Sweave} \SweaveOpts{engine=R, eps=FALSE, keep.source = TRUE, prefix.string=clmjss} <>= options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE) library("ordinal") library("xtable") @ %%\VignetteIndexEntry{Cumulative Link Models for Ordinal Regression} %%\VignetteDepends{ordinal, xtable} %% -- Article metainformation (author, title, ...) ----------------------------- %% - \author{} with primary affiliation %% - \Plainauthor{} without affiliations %% - Separate authors by \And or \AND (in \author) or by comma (in \Plainauthor). %% - \AND starts a new line, \And does not. \author{Rune Haubo B Christensen\\Technical University of Denmark\\ \& \\ Christensen Statistics} \Plainauthor{Rune Haubo B Christensen} %% - \title{} in title case %% - \Plaintitle{} without LaTeX markup (if any) %% - \Shorttitle{} with LaTeX markup (if any), used as running title \title{Cumulative Link Models for Ordinal Regression with the \proglang{R} Package \pkg{ordinal}} \Plaintitle{Cumulative Link Models for Ordinal Regression with the R Package ordinal} \Shorttitle{Cumulative Link Models with the \proglang{R} package \pkg{ordinal}} %% - \Abstract{} almost as usual \Abstract{ This paper introduces the R-package \pkg{ordinal} for the analysis of ordinal data using cumulative link models. The model framework implemented in \pkg{ordinal} includes partial proportional odds, structured thresholds, scale effects and flexible link functions. The package also support cumulative link models with random effects which are covered in a future paper. A speedy and reliable regularized Newton estimation scheme using analytical derivatives provides maximum likelihood estimation of the model class. The paper describes the implementation in the package as well as how to use the functionality in the package for analysis of ordinal data including topics on model identifiability and customized modelling. The package implements methods for profile likelihood confidence intervals, predictions of various kinds as well as methods for checking the convergence of the fitted models. } %% - \Keywords{} with LaTeX markup, at least one required %% - \Plainkeywords{} without LaTeX markup (if necessary) %% - Should be comma-separated and in sentence case. \Keywords{ordinal, cumulative link models, proportional odds, scale effects, \proglang{R}} \Plainkeywords{ordinal, cumulative link models, proportional odds, scale effects, R} %% - \Address{} of at least one author %% - May contain multiple affiliations for each author %% (in extra lines, separated by \emph{and}\\). %% - May contain multiple authors for the same affiliation %% (in the same first line, separated by comma). \Address{ Rune Haubo Bojesen Christensen\\ Section for Statistics and Data Analysis\\ Department of Applied Mathematics and Computer Science\\ DTU Compute\\ Technical University of Denmark\\ Richard Petersens Plads \\ Building 324 \\ DK-2800 Kgs. Lyngby, Denmark\\ \emph{and}\\ Christensen Statistics\\ Bringetoften 7\\ DK-3500 V\ae rl\o se, Denmark \\ E-mail: \email{Rune.Haubo@gmail.com}; \email{Rune@ChristensenStatistics.dk}\\ URL: \url{http://christensenstatistics.dk/} } \begin{document} This is a copy of an article submitted for publication in Journal of Statistical Software (\url{https://www.jstatsoft.org/}). %% -- Introduction ------------------------------------------------------------- %% - In principle "as usual". %% - But should typically have some discussion of both _software_ and _methods_. %% - Use \proglang{}, \pkg{}, and \code{} markup throughout the manuscript. %% - If such markup is in (sub)section titles, a plain text version has to be %% added as well. %% - All software mentioned should be properly \cite-d. %% - All abbreviations should be introduced. %% - Unless the expansions of abbreviations are proper names (like "Journal %% of Statistical Software" above) they should be in sentence case (like %% "generalized linear models" below). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} \label{sec:intro} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Ordered categorical data, or simply \emph{ordinal} data, are common in a multitude of empirical sciences and in particular in scientific disciplines where humans are used as measurement instruments. Examples include school grades, ratings of preference in consumer studies, degree of tumor involvement in MR images and animal fitness in ecology. Cumulative link models (CLMs) are a powerful model class for such data since observations are treated correctly as categorical, the ordered nature is exploited and the flexible regression framework allows for in-depth analyses. This paper introduces the \pkg{ordinal} package \citep{ordinal-pkg} for \proglang{R} \citep{R} for the analysis of ordinal data with cumulative link models. The paper describes how \pkg{ordinal} supports the fitting of CLMs with various models structures, model assessment and inferential options including tests of partial proportional odds and scale effects. The implementation and an effective fitting algorithm is also described. The \pkg{ordinal} package also supports cumulative link \emph{mixed} models (CLMMs); CLMs with normally distributed random effects. The support of this model class will not be given further treatment here but remain a topic for a future paper. The name \emph{cumulative link models} is adopted from \citet{agresti02}, but the model class has been referred to by several other names in the literatures, such as \emph{ordinal regression models} as well as \emph{ordered logit models} and \emph{ordered probit models} \citep{greene10} for the logit and probit link functions. The cumulative link model with a logit link is widely known as the \emph{proportional odds model} due to \citet{mccullagh80} and with a complementary log-log link, the model is sometimes referred to as the \emph{proportional hazards model} for grouped survival times. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \subsection{Software review} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Cumulative link models can be fitted by all the major software packages and while some software packages support scale effects, partial proportional odds (also referred to as unequal slopes, partial effects, and nominal effects), different link functions and structured thresholds all model structures are usually not available in any one package or implementation. The following brief software review is based on the publically available documentation at software packages websites retreived in june 2018. \proglang{IBM SPSS} \citep{SPSS} implements McCullagh's \pkg{PLUM} \citep{mccullagh80} procedure, allows for the five standard link functions (cf. Table~\ref{tab:linkFunctions}) and scale effects. Estimation is via Fisher-Scoring and a test for equal slopes is available for the location-only model while it is not possible to estimate a partial proportional odds model. \proglang{Stata} \citep{Stata} includes the \code{ologit} and \code{oprobit} procedures for CLMs with logistic and probit links but without support for scale effects, partial effect or structured thresholds. The add-on package \pkg{oglm} allows for all five standard link functions and scale effects. The \pkg{GLLAMM} package \citep{gllamm} also has some support for CLMs in addition to some support for random effects. \proglang{SAS} \citep{SAS} implements CLMs with logit links in \code{proc logistic} and CLMs with the 5 standard links in \code{prog genmod}. In \proglang{R}, several packages on CRAN implements CLMs. \code{polr} from \pkg{MASS} \citep{MASS} implements standard CLMs allowing for the 5 standard link functions but no further extensions; the \pkg{VGAM} package \citep{VGAM} includes CLMs via the \code{vglm} function using the \code{cumulative} link. \code{vglm} allows for several link functions as well as partial effects. The \code{lrm} and \code{orm} functions from the \pkg{rms} package \citep{rms} also implements CLMs with the 5 standard link functions but without scale effects, partial or structured thresholds. A Bayesian alternative is implemented in the \pkg{brms} package \citep{brms} which includes structured thresholds in addition to random-effects. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \subsection[ordinal package overview]{\pkg{ordinal} package overview} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \pkg{ordinal} package implements CLMs and CLMMs along with functions and methods to support these model classes. The two key functions in \pkg{ordinal} are \code{clm} and \code{clmm} which fits CLMs and CLMMs respectively. Additional functions in \pkg{ordinal} cover distributional support for Gumbel and log-gamma distributions as well as gradients\footnote{gradients with respect to $x$, the quantile; not the parameters of the distributions} of normal, logistic and Cauchy probability density functions which are used in the iterative methods implemented in \code{clm} and \code{clmm}. An overview over key functions in \pkg{ordinal} is provided in Table~\ref{tab:functions_in_ordinal}. \begin{table}[t!] \centering \renewcommand*{\arraystretch}{1.2} \begin{tabular}{llll} \hline \rotatebox{0}{Fitting} & \rotatebox{0}{Miscellaneous} & \rotatebox{0}{Former impl.} & \rotatebox{0}{Distributions} \\ \hline \code{clm} & \code{convergence} & \code{clm2} & \code{[pdqrg]gumbel}$^{\textsf{c}}$ \\ \code{clmm}$^{\textsf{c}}$ & \code{slice} & \code{clmm2}$^{\textsf{c}}$ & \code{[pdg]lgamma}$^{\textsf{c}}$ \\ \code{clm.fit} & \code{drop.coef} & \code{clm2.control} & \code{gnorm}$^{\textsf{c}}$ \\ \code{clm.control} & & \code{clmm2.control} & \code{glogis}$^{\textsf{c}}$ \\ \code{clmm.control} & & & \code{gcauchy}$^{\textsf{c}}$ \\ \hline \end{tabular} \\ \caption{Functions in \pkg{ordinal}. Superscript "c" indicates (partial or full) implementation in \proglang{C}.\label{tab:functions_in_ordinal}} \end{table} A number of standard methods are implemented for fitted CLMs, i.e., objects of class \code{clm} fitted with \code{ordinal::clm} which mostly correspond to methods also available for \code{glm} objects. Most extractor methods will not be explicitly discussed in this paper as they behave unsurprisingly but otherwise most methods will be discussed in the following sections. As CLMMs are not covered by this paper methods for \code{clmm} objects will not be discussed. \begin{table}[t!] \centering \renewcommand*{\arraystretch}{1.2} \begin{tabular}{llll} \hline \multicolumn{2}{l}{Extractor and Print} & Inference & Checking \\[3pt] \hline \code{coef} & \code{print} & \code{anova} & \code{slice} \\ \code{fitted} & \code{summary} & \code{drop1} & \code{convergence}\\ \code{logLik} & \code{model.frame} & \code{add1} & \\ \code{nobs} & \code{model.matrix} & \code{confint} & \\ \code{vcov} & \code{update} & \code{profile} & \\ \code{AIC}, \code{BIC} & & \code{predict} & \\ \code{extractAIC} & & \code{step}, \code{stepAIC} & \\ \hline \end{tabular} \caption{Methods for \code{clm} objects.\label{tab:clm_methods}} \end{table} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \subsection{Organization of the paper} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The remainder of the paper is organized as follows. The next section establishes notation by defining CLMs and associated log-likelihood functions, then describes the extended class of CLMs that is implemented in \pkg{ordinal} including details about scale effects, structured thresholds, partial proportional odds and flexible link functions. The third section describes how maximum likelihood (ML) estimation of CLMs is implemented in \pkg{ordinal}. The fourth section describes how CLMs are fitted and ordinal data are analysed with \pkg{ordinal} including sections on nominal effects, scale effects, structured thresholds, profile likelihoods, assessment of model convergence, fitted values and predictions, issues around model identifiability and finally how \pkg{ordinal} is prepared for customizable fitting of models not otherwise covered by the API. We end in section~\ref{sec:conclusions} with a brief conclusion. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Cumulative link models} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A cumulative link model is a model for ordinal-scale observations, i.e., observations that fall in an ordered finite set of categories. Ordinal observations can be represented by a random variable $Y_i$ that takes a value $j$ if the $i$th ordinal observations falls in the $j$'th category where $j = 1, \ldots, J$ and $J \geq 2$.\footnote{binomial models ($J = 2$) are also included.}% % A basic cumulative link model is \begin{equation} \label{eq:BasicCLM} \gamma_{ij} = F(\eta_{ij})~, \quad \eta_{ij} = \theta_j - \bm x_i^\top \bm\beta~, \quad i = 1,\ldots,n~, \quad j = 1, \ldots, J-1 ~, \end{equation} where \begin{equation*} %% \label{eq:cum} \gamma_{ij} = \Prob (Y_i \leq j) = \pi_{i1} + \ldots + \pi_{ij} \quad \mathrm{with} \quad \sum_{j=1}^J \pi_{ij} = 1 \end{equation*} are cumulative probabilities\footnote{we have suppressed the conditioning on the covariate vector, $\bm x_i$, i.e., $\gamma_{ij} = \gamma_j(\bm x_i)$ and $P(Y_i \leq j) = P(Y \leq j | \bm x_i)$.}, $\eta_{ij}$ is the linear predictor and $\bm x_i^\top$ is a $p$-vector of regression variables for the parameters, $\bm\beta$ without a leading column for an intercept and $F$ is the inverse link function. % The thresholds (also known as cut-points or intercepts) are strictly ordered: \begin{equation*} -\infty \equiv \theta_0 \leq \theta_1 \leq \ldots \leq \theta_{J-1} \leq \theta_J \equiv \infty. \end{equation*} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{The multinomial distribution and the log-likelihood function} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The ordinal observation $Y_i$ which assumes the value $j$ can be represented by a multinomially distributed variable $\bm Y_i^* \sim \mathrm{multinom}(\bm\pi_i, 1)$, where $\bm Y_i^*$ is a $J$-vector with a $1$ at the $j$'th entry and 0 otherwise, and with probability mass function % \begin{equation} \label{eq:multinom_pmf} \Prob(\bm Y_i^* = \bm y_i^*) = \prod_j \pi_{ij}^{y_{ij}^*} ~. \end{equation} % The log-likelihood function can therefore be written as % \begin{equation*} \ell(\bm\theta, \bm\beta; \bm y^*) = \sum_i \sum_j y_{ij}^* \log \pi_{ij} \end{equation*} % or equivalently % \begin{align*} \ell(\bm\theta, \bm\beta; \bm y) =~& \sum_i \sum_j \mathrm I (y_i = j) \log \pi_{ij} \\ =~& \sum_i \log \tilde\pi_i \end{align*} % where $\tilde\pi_i$ is the $j$'th entry in $\bm \pi_i$ and $\mathrm I(\cdot)$ is the indicator function. Allowing for observation-level weights (case weights), $w_i$ leads finally to % \begin{equation} \label{eq:clm-log-likelihood} \ell(\bm\theta, \bm\beta; \bm y) = \sum_i w_i \log \tilde\pi_i ~. \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Likelihood based inference} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Confidence intervals for model parameters are obtained by appealing to the asymptotic normal distribution of a statistic $s(\cdot)$ for a scalar parameter of interest $\beta_a$ and defined as \begin{equation*} CI:~\left\{ \beta_a; |s(\beta_a)| < z_{1 - \alpha/2} \right\} . \end{equation*} where $z_{1 - \alpha/2}$ is the $(1 - \alpha/2)$ quantile of the standard normal cumulative distribution function. Taking $s(\cdot)$ to be the Wald statistic $s(\beta_a):~ w(\beta_a) = (\hat\beta_a - \beta_a)/\hat{\mathrm{se}}(\hat\beta_a)$ leads to the classical symmetric intervals. Better confidence intervals can be obtained by choosing instead the likelihood root statistic \citep[see e.g.,][]{pawitan01, brazzale07}: \begin{equation*} s(\beta_a):~ r(\beta_a) = \mathrm{sign}(\hat\beta_a - \beta_a) \sqrt{-2 [ \ell(\hat{\bm\theta}, \hat{\bm\beta}; \bm y) - \ell_p(\beta_a; \bm y)]} \end{equation*} where \begin{equation*} \ell_p(\beta_a; \bm y) = \max_{\bm\theta, \bm\beta_{-a}} \ell(\bm\theta, \bm\beta; \bm y)~, \end{equation*} is the profile likelihood for the scalar parameter $\beta_a$ and $\bm\beta_{-a}$ is the vector of regression parameters without the $a$'th one. While the profile likelihood has to be optimized over all parameters except $\beta_a$ we define a \emph{log-likelihood slice} as \begin{equation} \label{eq:slice} \ell_{\mathrm{slice}}(\beta_a; \bm y) = \ell(\beta_a; \hat{\bm\theta}, \hat{\bm\beta}_{-a}, \bm y)~, \end{equation} which is the log-likelihood function evaluated at $\beta_a$ while keeping the remaining parameters fixed at their ML estimates. A quadratic approximation to the log-likelihood slice is $(\hat\beta_a - \beta_a)^2 / 2\tau_a^2$ where the \emph{curvature unit} $\tau_a$ is the squareroot of $a$'th diagonal element of the Hessian of $-\ell(\hat{\bm\theta}, \hat{\bm\beta}; \bm y)$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Link functions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A commonly used link function is the logit link which leads to % \begin{equation} \label{eq:cum_logit_model} \mathrm{logit}(\gamma_{ij}) = \log \frac{\Prob (Y_i \leq j)}{1 - \Prob(Y_i \leq j)} \end{equation} % The odds ratio (OR) of the event $Y_i \leq j$ at $\bm x_1$ relative to the same event at $\bm x_2$ is then % \begin{equation} \label{eq:odds_ratio} \mathrm{OR} = \frac{\gamma_j(\bm x_1) / [1 - \gamma_j(\bm x_1)]} {\gamma_j(\bm x_2) / [1 - \gamma_j(\bm x_2)]} = \frac{\exp(\theta_j - \bm x_1^\top \bm\beta)} {\exp(\theta_j - \bm x_2^\top \bm\beta)} %% =&~ \exp(\theta_j - \theta_j - \bm x_1 \bm\beta + \bm x_2 \bm\beta) = \exp[(\bm x_2^\top - \bm x_1^\top)\bm\beta] \end{equation} which is independent of $j$. Thus the cumulative odds ratio is proportional to the distance between $\bm x_1$ and $\bm x_2$ which motivated \citet{mccullagh80} to denote the cumulative logit model a \emph{proportional odds model}. If $x$ represent a treatment variable with two levels (e.g., placebo and treatment), then $x_2 - x_1 = 1$ and the odds ratio is $\exp(-\beta_\textup{treatment})$. Similarly the odds ratio of the event $Y \geq j$ is $\exp(\beta_\textup{treatment})$. The probit link has its own interpretation through a normal linear model for a latent variable which is considered in section~\ref{sec:latent-variable-motivation}. The complentary log-log (clog-log) link is also sometimes used because of its interpretation as a proportional hazards model for grouped survival times: \begin{equation*} -\log\{1 - \gamma_{j}(\bm x_i) \} = \exp( \theta_j - \bm x_i^T \bm\beta ) \end{equation*} Here $1 - \gamma_{j}(\bm x_i)$ is the probability or survival beyond category $j$ given $\bm x_i$. The proportional hazards model has the property that \begin{equation*} \log \{ \gamma_{j}(\bm x_1) \} = \exp[ (\bm x_2^T - \bm x_1^T) \bm\beta ] \log \{ \gamma_{j}(\bm x_2) \}~. \end{equation*} thus the ratio of hazards at $\bm x_1$ relative to $\bm x_2$ are proportional. If the log-log link is used on the response categories in the reverse order, this is equivalent to using the clog-log link on the response in the original order. This reverses the sign of $\bm\beta$ as well as the sign and order of $\{\theta_j\}$ while the likelihood and standard errors remain unchanged. % % Thus, similar to the proportional odds % model, the ratio of hazard functions beyond category $j$ at $\bm x_1$ % relative to $\bm x_2$ (the hazard ratio, $HR$) is: % \begin{equation*} % HR = \frac{-\log\{1 - \gamma_{j}(\bm x_2) \}} % {-\log\{1 - \gamma_{j}(\bm x_1) \}} = % \frac{\exp( \theta_j - \bm x_1^T \bm\beta )} % {\exp( \theta_j - \bm x_2^T \bm\beta )} = % \exp[(\bm x_2 - \bm x_1)\bm\beta] % \end{equation*} % Details of the most common link functions are described in Table~\ref{tab:linkFunctions}. \begin{table}[t!] \begin{center} %\footnotesize \begin{tabular}{llll} \hline Name & logit & probit & log-log \\ \hline Distribution & logistic & normal & Gumbel (max)$^b$ \\ Shape & symmetric & symmetric & right skew\\ Link function ($F^{-1}$) & $\log[\gamma / (1 - \gamma)]$ & $\Phi^{-1}(\gamma)$ & $-\log[-\log(\gamma)]$ \\ Inverse link ($F$) & $1 / [1 + \exp(\eta)]$ & $\Phi(\eta)$ & $\exp(-\exp(-\eta))$ \\ Density ($f = F'$) & $\exp(-\eta) / [1 + \exp(-\eta)]^2$ & $\phi(\eta)$ \\ \hline \hline Name & clog-log$^a$ & cauchit \\ \hline Distribution & Gumbel (min)$^b$ & Cauchy$^c$ \\ Shape & left skew & kurtotic \\ Link function ($F^{-1}$) & $\log[ -\log(1 - \gamma)]$ & $\tan[\pi (\gamma - 0.5)]$ \\ Inverse link ($F$) & $1 - \exp[-\exp(\eta)]$ & $\arctan(\eta)/\pi + 0.5$ \\ Density ($f = F'$) & $\exp[-\exp(\eta) + \eta]$ & $1 / [\pi(1 + \eta^2)]$ \\ \hline \end{tabular} \end{center} % \footnotesize % % $^a$: the \emph{complementary log-log} link \\ % $^b$: the Gumbel distribution is also known as the extreme value % (type I) distribution for extreme minima or maxima. It is also % sometimes referred to as the Weibull (or log-Weibull) distribution % (\url{http://en.wikipedia.org/wiki/Gumbel_distribution}). \\ % $^c$: the Cauchy distribution is a $t$-distribution with one df \caption{Summary of the five standard link functions. $^a$: the \emph{complementary log-log} link; $^b$: the Gumbel distribution is also known as the extreme value (type I) distribution for extreme minima or maxima. It is also sometimes referred to as the Weibull (or log-Weibull) distribution (\url{http://en.wikipedia.org/wiki/Gumbel_distribution}); $^c$: the Cauchy distribution is a $t$-distribution with one degree of freedom. \label{tab:linkFunctions}} \end{table} The \pkg{ordinal} package allows for the estimation of an extended class of cumulative link models in which the basic model~\eqref{eq:BasicCLM} is extended in a number of ways including structured thresholds, partial proportional odds, scale effects and flexible link functions. The following sections will describe these extensions of the basic CLM. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Extensions of cumulative link models} \label{sec:extensions-of-clms} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A general formulation of the class of models (excluding random effects) that is implemented in \pkg{ordinal} can be written % \begin{equation} \gamma_{ij} = F_{\lambda}(\eta_{ij}), \quad \eta_{ij} = \frac{g_{\bm\alpha} (\theta_i) - \bm x_i^\top \bm\beta - \bm w_i^\top \tilde{\bm\beta}_j}{\exp(\bm z_i\bm\zeta)} \end{equation} % where \begin{description} \item[$F_{\lambda}$] is the inverse link function. It may be parameterized by the scalar parameter $\lambda$ in which case we refer to $F_{\lambda}^{-1}$ as a \emph{flexible link function}, % \item[$g_{\bm\alpha}(\theta_i)$] parameterises thresholds $\{\theta_j\}$ by the vector $\bm\alpha$ such that $g$ restricts $\{\theta_j\}$ to be for example symmetric or equidistant. We denote this \emph{structured thresholds}. % \item[$\bm x_i^\top\bm\beta$] are the ordinary regression effects, % \item[$\bm w_i^\top \tilde{\bm\beta}_j$] are regression effects which are allowed to depend on the response category $j$ and they are denoted \emph{partial} or \emph{non-proportional odds} \citep{peterson90} when the logit link is applied. To include other link functions in the terminology we denote these effects \emph{nominal effects} (in text and code) because these effects are not integral to the ordinal nature of the data. % \item[$\exp(\bm z_i\bm\zeta)$] are \emph{scale effects} since in a latent variable view these effects model the scale of the underlying location-scale distribution. \end{description} With the exception of the structured thresholds, these extensions of the basic CLM have been considered individually in a number of sources but to the author's best knowledge not previously in a unified framework. % For example partial proportional odds have been considered by \citet{peterson90} and scale effect have been considered by \citet{mccullagh80} and \citet{cox95}. \citet{agresti02} is a good introduction to cumulative link models in the context of categorical data analysis and includes discussions of scale effects. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Latent variable motivation of CLMs} \label{sec:latent-variable-motivation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% It is natural to motivate the CLM from a linear model for a categorized version of a latent variable. Assume the following linear model for an unobserved latent variable: % \begin{equation} \label{eq:latent} S_i = \alpha^* + \bm x_i^\top \bm\beta^* + \varepsilon_i, \quad \varepsilon_i \sim N(0, \sigma^{*2}) \end{equation} % If $S_i$ falls between two thresholds, $\theta_{j-1}^* < S_i \leq \theta_j^*$ where % \begin{equation} \label{eq:thresholds} -\infty \equiv \theta_0^* < \theta_1^* < \ldots < \theta^*_{J-1} < \theta_{J}^* \equiv \infty \end{equation} % then $Y_i = j$ is observed and the cumulative probilities are: % \begin{equation*} \gamma_{ij} = \Prob (Y_i \leq j) = \Prob(S_i \leq \theta_j^*) = \Prob \left( Z \leq \frac{\theta_j^* - \alpha^* - \bm x_i^\top \bm\beta^*}{% \sigma^*} \right) = \Phi ( \theta_j - \bm x_i^\top \bm\beta ) \end{equation*} % where $Z$ follows a standard normal distribution, $\Phi$ denotes the standard normal cumulative distribution function, parameters with an ``$^*$'' exist on the latent scale, $\theta_j = (\theta_j^* - \alpha^*) / \sigma^*$ and $\bm\beta = \bm\beta^* / \sigma^*$. Note that $\alpha^*$, $\bm\beta^*$ and $\sigma^*$ would have been identifiable if the latent variable $S$ was directly observed, but they are not identifiable with ordinal observations. If we allow a log-linear model for the scale such that % \begin{equation*} \varepsilon_i \sim N(0, \sigma^{*2}_i), \quad \sigma_i^* = \exp(\mu + \bm z_i^\top \bm\zeta) = \sigma^* \exp(\bm z_i^\top \bm\zeta) \end{equation*} % where $\bm z_i$ is the $i$'th row of a design matrix $\bm Z$ without a leading column for an intercept and $\sigma^* = \exp(\mu)$, then \begin{equation*} \gamma_{ij} = \Prob \left( Z \leq \frac{\theta_j^* - \alpha^* - \bm x_i^\top \bm\beta^*}{% \sigma^*_i} \right) = \Phi \left( \frac{\theta_j - \bm x_i^T \bm\beta}{\sigma_i} \right) \end{equation*} where $\sigma_i = \sigma_i^* / \sigma^* = \exp(\bm z_i^\top \bm\zeta)$ is the \emph{relative} scale. The common link functions: probit, logit, log-log, c-log-log and cauchit correspond to inverse cumulative distribution functions of the normal, logistic, Gumbel(max), Gumbel(min) and Cauchy distributions respectively. These distributions are all members of the location-scale family with common form $F(\mu, \sigma)$, with location $\mu$ and non-negative scale $\sigma$, for example, the logistic distribution has mean $\mu$ and standard deviation $\sigma \pi / \sqrt{3}$. Choosing a link function therefore corresponds to assuming a particular distribution for the latent variable $S$ in which $\bm x_i^\top \bm\beta$ and $\exp(\bm z_i^\top \bm\zeta)$ models location \emph{differences} and scale \emph{ratios} respectively of that distribution. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Structured thresholds} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Structured thresholds, $\{ g(\bm\alpha)_j \}$ makes it possible to impose restrictions on the thresholds $\bm\theta = g(\bm\alpha)$. For instance restricting the thresholds to be equidistant means that only the location of, say, the first threshold and the spacing between adjacent thresholds has to be estimated, thus only two parameters are used to parameterize the thresholds irrespective of the number of response categories. \pkg{ordinal} takes $g(\bm\alpha)$ to be a linear function and operates with \begin{equation*} g(\bm\alpha) = \mathcal{J}^\top \bm\alpha = \bm \theta \end{equation*} where the Jacobian $\mathcal{J}$ defines the mapping from the parameters $\bm\alpha$ to the thresholds $\bm\theta$. The traditional ordered but otherwise unrestricted thresholds are denoted \emph{flexible thresholds} and obtained by taking $\mathcal{J}$ to be an identity matrix. Assuming $J=6$ ordered categories, the Jacobians for equidistant and symmetric thresholds (denoted \code{equidistant} and \code{symmetric} in \code{clm} argument \code{threshold}) are \begin{equation*} \mathcal{J}_{\mathrm{equidistant}} = \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 0 & 1 & 2 & 3 & 4 \\ \end{bmatrix}, \quad \mathcal{J}_{\mathrm{symmetric}} = \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 0 & -1 & 0 & 1 & 0 \\ -1 & 0 & 0 & 0 & 1 \\ \end{bmatrix}. \end{equation*} Another version of symmetric threshols (denoted \code{symmetric2}) is sometimes relevant with an unequal number of reponse categories here illustrated with $J=5$ together with the \code{symmetric} thresholds: \begin{equation*} \mathcal{J}_{\mathrm{symmetric2}} = \begin{bmatrix} 0 & -1 & 1 & 0 \\ -1 & 0 & 0 & 1 \\ \end{bmatrix}, \quad \mathcal{J}_{\mathrm{symmetric}} = \begin{bmatrix} 1 & 1 & 0 & 0 \\ 0 & 0 & 1 & 1 \\ -1 & 0 & 0 & 1 \\ \end{bmatrix} \end{equation*} The nature of $\mathcal{J}$ for a particular model can always be inspected by printing the \code{tJac} component of the \code{clm} fit. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Partial proportional odds and nominal effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The nominal effects $\bm w_i^\top\tilde{\bm\beta}_j$ can be considered an extension of the regression part of the model $\bm x_i^\top \bm\beta$ in which the regression effects are allowed to vary with $j$. % The nominal effects can also be considered an extension of the thresholds $\theta_j$ which allows them to depend on variables $\bm w_i^\top$: $\tilde{\theta}_{ij}(\bm w_i^\top) = \theta_j - \bm w_i^\top \tilde{\bm\beta}_j$ is the $j$'th threshold for the $i$'th observation. The following treatment assumes for latter view. In general let $\bm W$ denote the design matrix for the nominal effects without a leading column for an intercept; the nominal-effects parameter vector $\tilde{\bm\beta}_j$ is then $\mathrm{ncol}(\bm W)$ long and $\tilde{\bm\beta}$ is $\mathrm{ncol}(\bm W) \cdot (J-1)$ long. If $\bm W$ is the design matrix for the nominal effects containing a single column for a continuous variable then $\tilde{\beta}_j$ is the slope parameter corresponding to the $j$'th threshold and $\theta_j$ is the $j$'th intercept, i.e., the threshold when the covariate is zero. Looking at $\tilde{\theta}_{ij}(\bm w_i^\top) = \theta_j - \bm w_i^\top \tilde{\bm\beta}_j$ as a linear model for the thresholds facilitates the interpretation. If, on the other hand, $\bm W$ is the design matrix for a categorical variable (a \code{factor} in \proglang{R}) then the interpretation of $\tilde{\bm\beta}_j$ depends on the contrast-coding of $\bm W$. If we assume that the categorical variable has 3 levels, then $\tilde{\bm\beta}_j$ is a 2-vector. In the default treatment contrast-coding (\code{"contr.treatment"}) $\theta_j$ is the $j$'th threshold for the first (base) level of the factor, $\tilde{\beta}_{1j}$ is the differences between thresholds for the first and second level and $\tilde{\beta}_{2j}$ is the difference betwen the thresholds for the first and third level. In general we define $\bm\Theta$ as a matrix with $J-1$ columns and with 1 row for each combination of the levels of factors in $\bm W$. This matrix can be extracted from the model fit Note that variables in $\bm X$ cannot also be part of $\bm W$ if the model is to remain identifiable. \pkg{ordinal} detects this and automatically removes the offending variables from $\bm X$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Flexible link functions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \pkg{ordinal} package allows for two kinds of flexible link functions due to \citet{aranda-ordaz83} and \citet{genter85}. The link function proposed by \citet{aranda-ordaz83} reads % \begin{equation*} F^{-1}_\lambda (\gamma_{ij}) = \log \left\{ \frac{(1 - \gamma_{ij})^{-\lambda} - 1} {\lambda} \right\}~, \end{equation*} which depends on the auxiliary parameter $\lambda \in ]0, \infty[$. When $\lambda = 1$, the logistic link function arise, and when $\lambda \rightarrow 0$, \begin{equation*} \{ (1 - \gamma_{ij})^{-\lambda} - 1 \} / \lambda \rightarrow \log (1 - \gamma_{ij})^{-1}~, \end{equation*} so the log-log link arise. The inverse link function and its derivative are given by \begin{align*} F(\eta) =&~ 1 - (\lambda \exp(\eta) + 1)^{-\lambda^{-1}} \\ f(\eta) =&~ \exp(\eta) (\lambda \exp(\eta) + 1)^{-\lambda^{-1} - 1} \end{align*} The density implied by the inverse link function is left skewed if $0 < \lambda < 1$, symmetric if $\lambda = 1$ and right skewed if $\lambda > 1$, so the link function can be used to assess the evidence about possible skewness of the latent distribution. The log-gamma link function proposed by \citet{genter85} is based on the log-gamma density by \citet{farewell77}. The cumulative distribution function and hence inverse link function reads \begin{equation*} F_\lambda(\eta) = \begin{cases} 1 - G(q; \lambda^{-2}) & \lambda < 0 \\ \Phi(\eta) & \lambda = 0 \\ G(q; \lambda^{-2}) & \lambda > 0 \end{cases} \end{equation*} where $q = \lambda^{-2}\exp(\lambda \eta)$ and $G(\cdot; \alpha)$ denotes the Gamma distribution with shape parameter $\alpha$ and unit rate parameter and $\Phi$ denotes the cumulative standard normal distribution function. The corresponding density function reads \begin{equation*} f_\lambda(\eta) = \begin{cases} |\lambda| k^k \Gamma(k)^{-1} \exp\{ k(\lambda\eta - \exp(\lambda\eta)) \} & \lambda \neq 0 \\ \phi(\eta) & \lambda = 0 \end{cases} \end{equation*} where $k=\lambda^{-2}$, $\Gamma(\cdot)$ is the gamma function and $\phi$ is the standard normal density function. Note that choice and parameterization of the predictor, $\eta_{ij}$, e.g., the use of scale effects, can affect the evidence about the shape of the latent distribution. There are usually several link functions which provide essentially the same fit to the data and choosing among the good candidates is probably best done by appealing to arguments such as ease of interpretation rather than arguments related to fit. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section[Implementation of ML Estimation of CLMs in ordinal]{Implementation of ML Estimation of CLMs in \pkg{ordinal}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In \pkg{ordinal} cumulative link models are (by default) estimated with a regularized Newton-Raphson (NR) algorithm with step-halving (line search) using analytical expressions for the gradient and Hessian of the negative log-likelihood function. This NR algorithm with analytical derivatives is used irrespective of whether the model contains structured thresholds, nominal effects or scale effects, the only exception being models with flexible link functions for which a general-purpose quasi-Newton optimiser is used. Due to computationally cheap and efficient evaluation of the analytical derivatives, the relative well-behaved log-likelihood function (with exceptions described below) and the speedy convergence of the Newton-Raphson algorithm, the estimation of CLMs is virtually instant on a modern computer even with complicated models on large datasets. This also facilitates simulation studies. More important than speed is perhaps that the algorithm is reliable and accurate. Remainder of this section describes the regularized NR algorithm with step-halving (line search). The analytical gradients are described in detail in \citet{mythesis}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{A regularized Newton-Raphson algorithm with step halving} \label{sec:algorithm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The regularized NR algorithm is an iterative method that produce a sequence of estimates $\bm\psi^{(0)}, \ldots, \bm\psi^{(i)}, \ldots$ where parenthesized superscripts denote iterations. From the $i$th estimate, the $(i+1)$'th estimate is given by % \begin{equation*} \bm\psi^{(i+1)} = \bm\psi^{(i)} - c_1 \bm h^{(i)}, \quad \bm h^{(i)} = \tilde{\bm H}(\bm\psi^{(i)}; \bm y)^{-1} \bm g(\bm\psi^{(i)}; \bm y) \end{equation*} where \begin{equation*} \tilde{\bm H}(\bm\psi^{(i)}; \bm y) = \bm H(\bm\psi^{(i)}; \bm y) + c_2 (c_3 + \min(\bm e^{(i)})) \bm I, \end{equation*} % % where % $\bm h^{(i)}$ is the step of the $i$th iteration, $\bm H(\bm\psi^{(i)} ; \bm y)$ and $\bm g(\bm\psi^{(i)}; \bm y)$ are the Hessian and gradient of the negative log-likelihood function with respect to the parameters evaluated at the current estimates; $\bm e^{(i)}$ is vector of eigenvalues of $\bm H(\bm\psi^{(i)}; \bm y)$, $\bm h^{(i)}$ is the $i$'th step, $c_1$ is a scalar parameter which controls the step halving, and $c_2$, $c_3$ are scalar parameters which control the regularization of the Hessian. Regularization is only enforced when the Hessian is not positive definite, so $c_2 = 1$ when $\min(\bm e^{(i)}) < \tau$ and zero otherwise, were $\tau$ is an appropriate tolerance. The choice of $c_3$ is to some extent arbitrary (though positive) and the algorithm in \pkg{ordinal} sets $c_3 = 1$. Step-halving is enforced when the full step $\bm h^{(i)}$ causes a decrease in the likelihood function in which case $c_1$ is consecutively halved, $c_1 = \frac{1}{2}, \frac{1}{4}, \frac{1}{8}, \ldots$ until the step $c_1 \bm h^{(i)}$ is small enough to cause an increase in the likelihood or until the maximum allowed number of consecutive step-halvings has been reached. The algorithm in \pkg{ordinal} also deals with a couple of numerical issues that may occur. For example, the likelihood function may be sufficiently flat that the change in log-likelihood is smaller than what can be represented in double precision, and so, while the new parameters may be closer to the true ML estimates and be associated with a smaller gradient, it is not possible to measure progress by the change in log-likelihood. The NR algorithm in \pkg{ordinal} has two convergence criteria: (1) an absolute criterion requesting that $\max | \bm g(\bm\psi^{(i)}; \bm y) | < \tau_1$ and (2) a relative criterion requesting that $\max | \bm h^{(i)} | < \tau_2$ where the default thresholds are $\tau_1 = \tau_2 = 10^{-6}$. Here the first criterion attempts to establish closeness of $\bm\psi^{(i)}$ to the true ML estimates in absolute terms; the second criterion is an estimate of relative closeness of to the true ML estimates. % Both convergence criteria are needed if both small (e.g., $\approx 0.0001$) and large (e.g., $\approx 1000$) parameter estimates are to be determined accurately with an appropriate number of correct decimals as well as significant digits. The NR algorithm in \pkg{ordinal} attempts to satisfy the absolute criterion first and will then only attempt to satisfy the relative criterion if it can take the full un-regularized NR step and then only for a maximum of 5 steps. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Convergence proporties and parameter accuracy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Convergence to a well-defined optimum is achieved when the gradient of the negative log-likelihood function with respect to the parameters is small and the Hessian is positive definite i.e., having only positive eigenvalues away from zero. % Identifiability problems occur when the likelihood function is flat in directions of one or more parameters (or linear functions of the parameters) while well-defined, i.e., pointy in other directions. It may happen that a parameter is exactly unidentifiable and \code{clm} is in some cases (including rank-deficient design matrices) able to detect this and exclude the parameter from the optimization procedure. In other cases the likelihood is almost flat in one or more directions. These cases are not uncommon in practice and it is not possible to reduce the parameter space before optimizing the model. To measure the degree of empirical identifiability \code{clm} reports the condition number of the Hessian which is the ratio of the largest to the smallest eigenvalue. A large condition number of the Hessian does not necessarily mean there is a problem with the model, but it can be. A small condition number of the Hessian, say smaller than about $10^4$ or $10^6$, on the other hand is a good insurance that a well-defined optimum has been reached. A key problem for optimization methods is when to stop iterating: when have the parameters that determine optimum of the function been found with sufficient accuracy? The \emph{method independent error estimate} \citep{elden04} provides a way to approximate the error in the parameter estimates. Sufficiently close to the optimum the Newton-Raphson step provides this estimate: \begin{equation*} |\hat{\bm\alpha}^{(i)} - \bm\alpha^*| \lesssim \bm h^{(i)}, \quad \bm h^{(i)} = \bm H(\bm\psi^{(i)}; \bm y)^{-1} \bm g(\bm\psi^{(i)}; \bm y) \end{equation*} where $\bm\alpha^*$ is the exact (but unknown) value of the ML estimate, $\hat{\bm\alpha}^{(i)}$ is the ML estimator of $\bm\alpha$ at the $i$'th iteration and $\bm h^{(i)}$ is the full unregularized NR step at the $i$'th iteration. % Since the gradient and Hessian of the negative log-likelihood function with respect to the parameters is already evaluated and part of the model fit at convergence, it is essentially computationally cost-free to approximate the error in the parameter estimates. Based on the error estimate the number of corretly determined decimals and significant digits is determined for each parameter. The assessment of the number of correctly determined decimals and significant digits is only reliable sufficiently close to the optimum and when the NR algorithm converges without regularization and step-halving. In practice we caution against this assessment if the algorithm did not converge successfully. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Properties of the log-likelihood function for extended CLMs} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \citet{pratt81} and \citet{burridge81} showed (seemingly independent of each other) that the log-likelihood function of the basic cumulative link model~\eqref{eq:BasicCLM} is concave. This means that there is a unique global optimum of the log-likelihood function and therefore no risk of convergence to a local optimum. It also means that the Hessian matrix for the negative log-likelihood is strictly positive definite and therefore also that the Newton step is always in direction of higher likelihood. The genuine Newton step may be too long to actually cause an increase in likelihood from one iteration to the next (this is called ``overshoot''). This is easily overcome by successively halving the length of the Newton step until an increase in likelihood is achieved. Exceptions to the strict concavity of the log-likelihood function include models using the cauchit link, flexible link functions as well as models with scale effects. Notably models with structured thresholds as well as nominal effects do not affect the linearity of the predictor, $\eta_{ij}$ and so are also guaranteed to have concave log-likelihoods. The non-decreasing restriction on the threshold parameters $\{\theta_j\}$ is dealt with by defining $\ell(\bm\theta, \bm\beta; y) = \infty$ when $\{\theta_j\}$ is not in a non-decreasing sequence. If the algorithm attempts evaluation at such illegal values step-halving automatically brings the algorithm back on track. Other implementations of CLMs re-parameterize $\{\theta_j\}$ such that the non-decreasing nature of $\{\theta_j\}$ is enforced by the parameterization, for example, \code{MASS::polr} (package version 7.3.49) optimize the likelihood using \begin{equation*} \tilde\theta_1 = \theta_1, ~\tilde{\theta}_2 = \exp(\theta_2 - \theta_1),~\ldots, ~ \tilde{\theta}_{J-1} = \exp(\theta_{J-2} - \theta_{J-1}) \end{equation*} This is deliberately not used in \pkg{ordinal} because the log-likelihood function is generally closer to quadratic in the original parameterization in our experience which facilitates faster convergence. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Starting values} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% For the basic CLMs~\eqref{eq:BasicCLM} the threshold parameters are initialized to an increasing sequence such that the cumulative density of logistic distribution between consecutive thresholds (and below the lowest or above the highst threshold) is the same. The regression parameters $\bm\beta$, scale parameters $\bm\zeta$ as well as nominal effect $\bm\beta^*$ are initialized to 0. If the model specifies a cauchit link or includes scale parameters estimation starts at the parameter estimates of a model using the probit link and/or without the scale-part of the model. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Estimation problems} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% With many nominal effects it may be difficult to find a model in which the threshold parameters are strictly increasing for all combinations of the parameters. Upon convergence of the NR algorithm the model evaluates the $\bm\Theta$-matrix and checks that each row of threshold estimates are increasing. When a continuous variable is included among the nominal effects it is often helpful if the continuous variable is centered at an appropriate value (at least within the observed range of the data). This is because $\{\theta_j\}$ represent the thresholds when the continuous variable is zero and $\{\theta_j\}$ are enforced to be a non-decreasing sequence. Since the nominal effects represent different slopes for the continuous variable the thresholds will necessarily be ordered differently at some other value of the continuous variable. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Convergence codes} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Irrespective of the fitting algorithm, \pkg{ordinal} reports the following convergence codes for CLMs in which negative values indicate convergence failure: % \begin{description} \item[-3] Not all thresholds are increasing. This is only possible with nominal effects and constitutes an invalid fit. \item[-2] The Hessian has at least one negative eigenvalue. This means that the point at which the algorithm terminated does not represent an optimum. \item[-1] Absolute convergence criterion (maximum absolute gradient) was not satisfied. This means that the algorithm couldn't get close enough to a stationary point of the log-likelihood function. \item[0] Successful convergence. \item[1] The Hessian is singular (i.e., at least one eigenvalue is zero). This means that some parameters are not uniquely determined. \end{description} % Note that with convergence code \textbf{1} the optimum of the log-likelihood function has been found although it is not a single point but a line (or in general a (hyper) plane), so while some parameters are not uniquely determined the value of the likelihood is valid enough and can be compared to that of other models. In addition to these convergence codes, the NR algorithm in \pkg{ordinal} reports the following messages: \begin{description} \item[0] Absolute and relative convergence criteria were met \item[1] Absolute convergence criterion was met, but relative criterion was not met \item[2] iteration limit reached \item[3] step factor reduced below minimum \item[4] maximum number of consecutive Newton modifications reached \end{description} Note that convergence is assessed irrespective of messages from the fitting algorithm irrespective of whether the NR algorithm described above or a general-purpose quasi-Newton optimizer is used. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section[Fitting cumulative link models in ordinal with clm]{Fitting cumulative link models in \pkg{ordinal} with \code{clm}} \label{sec:fitting-clms} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \code{clm} function takes the following arguments: % <>= clm_args <- gsub("function ", "clm", deparse(args(clm))) cat(paste(clm_args[-length(clm_args)], "\n")) @ % Several arguments are standard and well-known from \code{lm} and \code{glm} and will not be described in detail; \code{formula}, \code{data}, \code{weights}, \code{subset} and \code{na.action} are all parts of the standard model specification in \proglang{R}. \code{scale} and \code{nominal} are interpreted as \proglang{R}-formulae with no left hand sides and specifies the scale and nominal effects of the model respectively, see sections~\ref{sec:scale-effects} and \ref{sec:nominal-effects} for details; \code{start} is an optional vector of starting values; \code{doFit} can be set to \code{FALSE} to prompt \code{clm} to return a model \emph{environment}, for details see section~\ref{sec:customized-modelling}; \code{model} controls whether the \code{model.frame} should be included in the returned model fit; \code{link} specifies the link function and \code{threshold} specifies an optional threshold structure, for details see section~\ref{sec:threshold-effects}. Note the absense of a separate \code{offset} argument. Since \code{clm} allows for different offsets in different \code{formula} and \code{scale} offsets have to be specified within a each formulae, e.g., \verb!scale = ~ x1 + offset(x2)!. Control parameters can either be specified as a named list, among the optional \code{...} arguments, or directly as a call to \code{clm.control} --- in the first two cases the arguments are passed on to \code{clm.control}. \code{clm.control} takes the following arguments: % <>= cc_args <- gsub("function ", "clm.control", deparse(args(clm.control))) cat(paste(cc_args[-length(cc_args)], "\n")) @ % The \code{method} argument specifies the optimization and/or return method. The default estimation method (\code{Newton}) is the regularized Newton-Raphson estimation scheme described in section~\ref{sec:algorithm}; options \code{model.frame} and \code{design} prompts \code{clm} to return respectively the \code{model.frame} and a list of objects that represent the internal representation instead of fitting the model; options \code{ucminf}, \code{nlminb} and \code{optim} represent different general-purpose optimizers which may be used to fit the model (the former from package \pkg{ucminf} \citep{ucminf}, the latter two from package \pkg{stats}). The \code{sign.location} and \code{sign.nominal} options allow the user to flip the signs on the location and nominal model terms. The \code{convergence} argument instructs \code{clm} how to alert the user of potential convergence problems; \code{...} are optional arguments passed on to the general purpose optimizers; \code{trace} applies across all optimizers and positive values lead to printing of progress during iterations; the remaining arguments (\code{maxIter, gradTol, maxLineIter, relTol, tol}) control the behavior of the regularized NR algorithm described in section~\ref{sec:algorithm}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection[Fitting a basic cumulative link model with clm]{Fitting a basic cumulative link model with \code{clm}} \label{sec:fitting-basic-clm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In the following examples we will use the wine data from \citet{randall89} available in the object \code{wine} in package \pkg{ordinal}, cf., Table~\ref{tab:wineData}. The data represent a factorial experiment on factors determining the bitterness of wine with 1 = ``least bitter'' and 5 = ``most bitter''. Two treatment factors (temperature and contact) each have two levels. Temperature and contact between juice and skins can be controlled when crushing grapes during wine production. Nine judges each assessed wine from two bottles from each of the four treatment conditions, hence there are 72 observations in all. The main objective is to examine the effect of contact and temperature on the perceived bitterness of wine. \begin{table}[t!] \centering \begin{tabular}{llrrrrr} \hline & & \multicolumn{5}{c}{Least---Most bitter} \\ \cline{3-7} <>= ## data(wine) tab <- with(wine, table(temp:contact, rating)) mat <- cbind(rep(c("cold", "warm"), each = 2), rep(c("no", "yes"), 2), tab) colnames(mat) <- c("Temperature", "Contact", paste("~~", 1:5, sep = "")) xtab <- xtable(mat) print(xtab, only.contents=TRUE, include.rownames=FALSE, sanitize.text.function = function(x) x) @ \end{tabular} \caption{The number of ratings from nine judges in bitterness categories 1 --- 5. Wine data from \citet{randall89} aggregated over bottles and judges.% \label{tab:wineData}} \end{table}% Initially we consider the following cumulative link model for the wine data: \begin{equation} \label{eq:CLM} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j - \beta_1 (\mathtt{temp}_i) - \beta_2(\mathtt{contact}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation}% % where $\beta_1(\mathtt{temp}_i)$ attains the values $\beta_1(\mathtt{cold})$ and $\beta_1(\mathtt{warm})$, and $\beta_2(\mathtt{contact}_i)$ attains the values $\beta_2(\mathtt{no})$ and $\beta_2(\mathtt{yes})$. The effect of temperature in this model is illustrated in Figure~\ref{fig:standard_clm}. This is a model for the cumulative probability of the $i$th rating falling in the $j$th category or below, where $i$ index all observations ($n=72$), $j = 1, \ldots, J$ index the response categories ($J = 5$) and $\theta_j$ is the intercept or threshold for the $j$th cumulative logit: $\textup{logit}(P(Y_i \leq j))$. Fitting the model with \code{clm} we obtain: <<>>= library("ordinal") fm1 <- clm(rating ~ temp + contact, data=wine) summary(fm1) @ The \code{summary} method prints basic information about the fitted model. % most of which is self explanatory. % The primary result is the coefficient table with parameter estimates, standard errors and Wald based $p$~values for tests of the parameters being zero. If one of the flexible link functions (\code{link = "log-gamma"} or \code{link = "Aranda-Ordaz"}) is used a coefficient table for the link parameter, $\lambda$ is also included. The maximum likelihood estimates of the model coefficients are:% % \begin{equation} \label{eq:parameters} \begin{gathered} \hat\beta_1(\mathtt{warm} - \mathtt{cold})= 2.50, ~~\hat\beta_2(\mathtt{yes} - \mathtt{no}) = 1.53, \\ \{\hat\theta_j\} = \{-1.34,~ 1.25,~ 3.47,~ 5.01\}. \end{gathered} \end{equation} % The coefficients for \code{temp} and \code{contact} are positive indicating that higher temperature and contact increase the bitterness of wine, i.e., rating in higher categories is more likely. % Because the treatment contrast coding which is the default in \proglang{R} was used, $\{\hat\theta_j\}$ refers to the thresholds at the setting with $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{no}$. % Three natural and complementing interpretations of this model are % \begin{enumerate} \item The thresholds $\{ \hat\theta_j \}$ at $\mathtt{contact}_i = \mathtt{yes}$ conditions have been shifted a constant amount $1.53$ relative to the thresholds $\{ \hat\theta_j \}$ at $\mathtt{contact}_i = \mathtt{no}$ conditions. \item The location of the latent distribution has been shifted $+1.53 \sigma^*$ (scale units) at $\mathtt{contact}_i = \mathtt{yes}$ relative to $\mathtt{contact}_i = \mathtt{no}$. \item The odds ratio of bitterness being rated in category $j$ or above ($\mathrm{OR}(Y \geq j)$) is $\exp(\hat\beta_2(\mathtt{yes} - \mathtt{no})) = 4.61$. \end{enumerate} % Note that there are no $p$~values displayed for the threshold coefficients because it usually does not make sense to test if they equal zero. \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-fig2} \caption{Illustration of the effect of temperature in the standard cumulative link model in Equation~\ref{eq:CLM} for the wine data in Table~\ref{tab:wineData} through a latent variable interpretation.\label{fig:standard_clm}} \end{figure} The number of Newton-Raphson iterations is given below \code{niter} with the number of step-halvings in parenthesis. \code{max.grad} is the maximum absolute gradient of the log-likelihood function with respect to the parameters. % The condition number of the Hessian (\code{cond.H}) is well below $10^4$ and so does not indicate a problem with the model. The \code{anova} method produces an analysis of deviance (ANODE) tables also based on Wald $\chi^2$-tests and provides tables with type I, II and III hypothesis tests using the \proglang{SAS} definitions. A type I table, the \proglang{R} default for linear models fitted with \code{lm}, sequentially tests terms from first to last, type II tests attempt to respect the principle of marginality and test each term after all others while ignoring higher order interactions, and type III tables are based on orthogonalized contrasts and tests of main effects or lower order terms can often be interpreted as averaged over higher order terms. Note that in this implementation any type of contrasts (e.g., \code{contr.treatment} or \code{contr.SAS} as well as \code{contr.sum}) can be used to produce type III tests. For further details on the interpretation and definition of type I, II and III tests, please see \citep{kuznetsova17} and \citep{SAStype}. Here we illustrate with a type III ANODE table, which in this case is equivalent to type I and II tables since the variables are balanced: <<>>= anova(fm1, type="III") @ Likelihood ratio tests, though asymptotically equivalent to the Wald tests usually better reflect the evidence in the data. These tests can be obtained by comparing nested models with the \code{anova} method, for example, the likelihood ratio test of \code{contact} is <<>>= fm2 <- clm(rating ~ temp, data=wine) anova(fm2, fm1) @ which in this case produces a slightly lower $p$~value. Equivalently we can use \code{drop1} to obtain likelihood ratio tests of the explanatory variables while \emph{controlling} for the remaining variables: <<>>= drop1(fm1, test="Chi") @ Likelihood ratio tests of the explanatory variables while \emph{ignoring} the remaining variables are provided by the \code{add1} method (outputs not shown): <>= fm0 <- clm(rating ~ 1, data=wine) add1(fm0, scope = ~ temp + contact, test="Chi") @ % Confidence intervals of the parameter estimates are provided by the \code{confint} method which by default compute the so-called profile likelihood confidence intervals: <<>>= confint(fm1) @ The cumulative link model in Equation~\ref{eq:CLM} assumes that the thresholds, $\{\theta_j\}$ are constant for all values of the remaining explanatory variables, here \code{temp} and \code{contact}. This is generally referred to as the \emph{proportional odds assumption} or \emph{equal slopes assumption}. We can relax this assumption in two general ways: with nominal effects and scale effects examples of which we will now present in turn. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Partial and non-proportional odds: nominal effects} \label{sec:nominal-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The CLM in \eqref{eq:CLM} specifies a structure in which the regression parameters, $\bm\beta$ are not allowed to vary with $j$ or equivalently that the threshold parameters $\{\theta_j\}$ are not allowed to depend on regression variables. In the following model we relax this assumption and allow the threshold parameters to depend on \code{contact} which leads to the so-called partial proportional odds for \code{contact}: % \begin{equation} \label{eq:CLM_nominal} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j + \tilde{\beta}_{j} (\mathtt{contact}_i) - \beta (\mathtt{temp}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation} % One way to view this model is to think of two sets of thresholds being applied at conditions with and without contact as illustrated in Figure~\ref{fig:clm_nominal}. The model is specified as follows with \code{clm}: <<>>= fm.nom <- clm(rating ~ temp, nominal = ~ contact, data=wine) summary(fm.nom) @ As can be seen from the output of \code{summary} there are no regression coefficient estimated for \code{contact}, but there are additional threshold coefficients estimated instead. % The naming and meaning of the threshold coefficients depend on the contrast coding applied to \code{contact}. Here the \proglang{R} default treatment contrasts (\code{"contr.treatment"}) are used. Here coefficients translate to the following parameter functions: \begin{equation} \label{eq:nom_parameters} \begin{gathered} \hat\beta(\mathtt{warm} - \mathtt{cold})= 2.52, \\ \{\hat\theta_j\} = \{-1.32,~ 1.25,~ 3.55,~ 4.66\}, \\ \{ \hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}) \} = \{-1.62,~ -1.51,~ -1.67,~ -1.05\}. \end{gathered} \end{equation} % Again $\{ \theta_j \}$ refer to the thresholds at $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{no}$ settings while the thresholds at $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{yes}$ are $\{ \hat\theta_j + \hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}) \}$. % The odds ratio of bitterness being rated in category $j$ or above ($\mathrm{OR}(Y \geq j)$) now depends on $j$: $\{\exp(-\hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}))\} = \{ 5.03,~ 4.53,~ 5.34,~ 2.86\}$. % \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figNom2} \caption{Illustration of nominal effects leading to different sets of thresholds being applied for each level of \code{contact} in a latent variable interpretation, cf., Equation~\ref{eq:CLM_nominal}.\label{fig:clm_nominal}} \end{figure} The resulting thresholds for each level of \code{contact}, i.e., the estimated $\bm\Theta$-matrix can be extracted with: <<>>= fm.nom$Theta @ As part of the convergence checks, \code{clm} checks the validity of $\bm\Theta$, i.e., that each row of the threshold matrix is non-decreasing. We can perform a likelihood ratio test of the proportional odds assumption for \code{contact} by comparing the likelihoods of models \eqref{eq:CLM} and \eqref{eq:CLM_nominal} as follows: <<>>= anova(fm1, fm.nom) @ There is only little difference in the log-likelihoods of the two models and the test is insignificant. Thus there is no evidence that the proportional odds assumption is violated for \code{contact}. It is not possible to estimate both $\beta_2(\mathtt{contact}_i)$ and $\tilde{\beta}_{j}(\mathtt{contact}_i)$ in the same model. Consequently variables that appear in \code{nominal} cannot enter in \code{formula} as well. For instance, not all parameters are identifiable in the following model: <<>>= fm.nom2 <- clm(rating ~ temp + contact, nominal = ~ contact, data=wine) @ We are made aware of this when summarizing or printing the model in which the coefficient for \code{contactyes} is \code{NA}: <<>>= fm.nom2 @ To test the proportional odds assumption for all variables, we can use <<>>= nominal_test(fm1) @ This function moves all terms in \code{formula} and copies all terms in \code{scale} to \code{nominal} one by one and produces an \code{add1}-like table with likelihood ratio tests of each term. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Modelling scale effects} \label{sec:scale-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % To allow the scale of the latent variable distribution to depend on explanatory variables we could for instance consider the following model where the scale is allowed to differ between cold and warm conditions. The location of the latent distribution is allowed to depend on both temperature and contact: \begin{equation} \label{eq:CLM_scale_wine} \begin{gathered} \textup{logit}(P(Y_i \leq j)) = \frac{\theta_j - \beta_1 (\mathtt{temp}_i) - \beta_{2} (\mathtt{contact}_i)} {\exp( \zeta (\mathtt{temp}_i))} \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{gathered} \end{equation} This model structure is illustrated in Figure~\ref{fig:clm_scale} and can be estimated with: <<>>= fm.sca <- clm(rating ~ temp + contact, scale = ~ temp, data=wine) summary(fm.sca) @ In a latent variable interpretation the location of the latent distribution is shifted $2.63\sigma^*$ (scale units) from cold to warm conditions and $1.59\sigma^*$ from absense to presence of contact. The scale of the latent distribution is $\sigma^*$ at cold conditions but $\sigma^* \exp(\zeta(\mathtt{warm} - \mathtt{cold})) = \sigma^*\exp(0.095) = 1.10 \sigma^*$, i.e., 10\% higher, at warm conditions. However, observe that the $p$~value for the scale effect in the summary output shows that the ratio of scales is not significantly different from 1 (or equivalently that the difference on the log-scale is not different from 0). Scale effects offer an alternative to nominal effects (partial proportional odds) when non-proportional odds structures are encountered in the data. Using scale effects is often a better approach because the model is well-defined for all values of the explanatory variables irrespective of translocation and scaling of covariates. Scale effects also use fewer parameters which often lead to more sensitive tests than nominal effects. Potential scale effects of variables already included in \code{formula} can be discovered using \code{scale_test}. This function adds each model term in \code{formula} to \code{scale} in turn and reports the likelihood ratio statistic in an \code{add1} fashion: <<>>= scale_test(fm1) @ \code{confint} and \code{anova} methods apply with no change to models with scale and nominal parts, but \code{drop1}, \code{add1} and \code{step} methods will only drop or add terms to the (location) \code{formula}. \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figSca} \caption{Illustration of scale effects leading to different scales of the latent variable, cf., Equation~\ref{eq:CLM_scale_wine}.\label{fig:clm_scale}} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Structured thresholds} \label{sec:threshold-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In section~\ref{sec:nominal-effects} we described nominal effects where the assumption that regression parameters have the same effect across all thresholds was relaxed. In this section we will instead impose additional restrictions on the thresholds. In the following model we require that the thresholds, $\{ \theta_j \}$ are equidistant or equally spaced. This allows us to assess an assumption that judges are using the response scale in such a way that there is the same distance between adjacent response categories, i.e., that $\theta_j - \theta_{j-1} = \textup{constant}$ for $j = 2, ..., J-1$. The effect of equidistant thresholds is illustrated in Figure~\ref{fig:clm_structured_thresholds} and can be fitted with: <<>>= fm.equi <- clm(rating ~ temp + contact, data=wine, threshold="equidistant") summary(fm.equi) @ The parameters determining the thresholds are now the first threshold (\code{threshold.1}) and the spacing among consecutive thresholds (\code{spacing}). The mapping to this parameterization is stored in the transpose of the Jacobian matrix (\code{tJac}) component of the model fit. This makes it possible to extract the thresholds imposed by the equidistance structure with <<>>= drop(fm.equi$tJac %*% coef(fm.equi)[c("threshold.1", "spacing")]) @ These thresholds are in fact already stored in the \code{Theta} component of the model fit. % The following shows that the average distance between consecutive thresholds in \code{fm1} which did not restrict the thresholds is very close to the \code{spacing} parameter from \code{fm.equi}: <<>>= mean(diff(coef(fm1)[1:4])) @ One advantage of imposing additional restrictions on the thresholds is the use of fewer parameters. Whether the restrictions are warranted by the data can be assessed in a likelihood ratio test: <<>>= anova(fm1, fm.equi) @ In this case the test is non-significant, so there is no considerable loss of fit at the gain of saving two parameters, hence we may retain the model with equally spaced thresholds. Note that the shape of the latent distribution (determined by the choice of link function) also affects the distances between the thresholds. If thresholds are equidistant under a normal distribution (i.e., with the logit link) they will in general\footnote{The exception is perfect fits such as CLMs with flexible thresholds and no predictors where models have the same likelihood irrespective of link function.} not be equidistant under a differently shaped latent distribution such as a skew latent distribution (e.g., with the log-log or clog-log link). \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figFlex} \includegraphics[width=6cm]{./static_figs/fig-figEqui} \caption{Illustration of flexible (left) and equidistant (right) thresholds being applied in a cumulative link model in a latent variable interpretation.\label{fig:clm_structured_thresholds}} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Profile likelihood} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In addition to facilitating the generally quite accurate profile likelihood confidence intervals which were illustrated in section~\ref{sec:fitting-basic-clm}, the profile likelihood function can also be used to illustrate the relative importance of parameter values. As an example, the profile likelihood of model coefficients for \code{temp} and \code{contact} in \code{fm1} can be obtained with % <>= pr1 <- profile(fm1, alpha=1e-4) plot(pr1) @ The resulting plots are provided in Figure~\ref{fig:ProfileLikelihood}. The \code{alpha} argument controls how far from the maximum likelihood estimate the likelihood function should be profiled: the profile strays no further from the MLE when values outside an (\code{1 - alpha})-level profile likelihood confidence interval. From the relative profile likelihood in Figure~\ref{fig:ProfileLikelihood} for \code{tempwarm} we see that parameter values between 1 and 4 are reasonably well supported by the data, and values outside this range has little likelihood. Values between 2 and 3 are very well supported by the data and have high likelihood. \setkeys{Gin}{width=.45\textwidth} \begin{figure} \centering <>= plot(pr1, which.par=1) @ <>= plot(pr1, which.par=2) @ \caption{Relative profile likelihoods for the regression parameters in \code{fm1} for the wine data. Horizontal lines indicate 95\% and 99\% confidence bounds.} \label{fig:ProfileLikelihood} \end{figure} Profiling is implemented for regression ($\beta$) and scale ($\zeta$) parameters but not available for threshold, nominal and flexible link parameters. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Assessment of model convergence} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Likelihood slices} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The maximum likelihood estimates of the parameters in cumulative link models do not have closed form expressions, so iterative methods have to be applied to fit the models. Further, CLMs are non-linear models and in general the likelihood function is not garanteed to be well-behaved or even uni-model. In addition the special role of the threshold parameters and the restriction on them being ordered can affect the appearance of the likelihood function. To confirm that an unequivocal optimum has been reached and that the likelihood function is reasonably well-behaved around the reported optimum we can inspect the likelihood function in a neighborhood around the reported optimum. For these purposes we can display slices of the likelihood function. The following code produces the slices shown in Figure~\ref{fig:slice1} which displays the shape of the log-likelihood function in a fairly wide neighborhood around the reported MLE; here we use $\lambda=5$ curvature units, as well as it's quadratic approximation. <<>>= slice.fm1 <- slice(fm1, lambda = 5) par(mfrow = c(2, 3)) plot(slice.fm1) @ Figure~\ref{fig:slice1} shows that log-likelihood function is fairly well behaved and relatively closely quadratic for most parameters. \setkeys{Gin}{width=.32\textwidth} \begin{figure} \centering <>= plot(slice.fm1, parm = 1) @ <>= plot(slice.fm1, parm = 2) @ <>= plot(slice.fm1, parm = 3) @ <>= plot(slice.fm1, parm = 4) @ <>= plot(slice.fm1, parm = 5) @ <>= plot(slice.fm1, parm = 6) @ \caption{Slices of the (negative) log-likelihood function (solid) for parameters in \code{fm1} for the wine data. Dashed lines indicate quadratic approximations to the log-likelihood function and vertical bars indicate maximum likelihood estimates.} \label{fig:slice1} \end{figure} Looking at the log-likelihood function much closer to the reported optimum (using $\lambda = 10^{-5}$) we can probe how accurately the parameter estimates are determined. The likelihood slices in Figure~\ref{fig:slice2} which are produced with the following code shows that the parameters are determined accurately with at least 5 correct decimals. Slices are shown for two parameters and the slices for the remaining 4 parameters are very similar. <>= slice2.fm1 <- slice(fm1, parm = 4:5, lambda = 1e-5) par(mfrow = c(1, 2)) plot(slice2.fm1) @ \setkeys{Gin}{width=.45\textwidth} \begin{figure} \centering <>= plot(slice2.fm1, parm = 1) @ <>= plot(slice2.fm1, parm = 2) @ \caption{Slices of the (negative) log-likelihood function (solid) for parameters in \code{fm1} for the wine data very close to the MLEs. Dashed lines (indistinguashable from the solid lines) indicate quadratic approximations to the log-likelihood function and vertical bars the indicate maximum likelihood estimates.} \label{fig:slice2} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Parameter accuracy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% As discussed in section~\ref{sec:algorithm} the method independent error estimate provides an assessment of the accucary with which the ML estimates of the parameters have been determined by the fitting algorithm. This error estimate is implemented in the \code{convergence} method which we now illustrate on a model fit: <<>>= convergence(fm1) @ The most important information is the number of correct decimals (\code{Cor.Dec}) and the number of significant digits (\code{Sig.Dig}) with which the parameters are determined. In this case all parameters are very accurately determined, so there is no reason to lower the convergence tolerance. The \code{logLik.error} shows that the error in the reported value of the log-likelihood is below $10^{-10}$, which is by far small enough that likelihood ratio tests based on this model are accurate. Note that the assessment of the number of correctly determined decimals and significant digits is only reliable sufficiently close to the optimum so in practice we caution against this assessment if the algorithm did not converge successfully. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Fitted values and predictions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Several types of fitted values and predictions can be extracted from a CLM depending on how it is viewed. By \emph{fitted values} we denote the values ($i=1, \ldots, n$) \begin{equation*} \hat{\tilde\pi}_i = \tilde\pi_i(\hat{\bm\psi}) \end{equation*} that is, the value of $\tilde\pi_i$, cf., Equation~\ref{eq:clm-log-likelihood} evaluated at the ML estimates $\hat{\bm\psi}$. These are the values returned by the \code{fitted} and \code{fitted.values} extractor methods and stored in the \code{fitted.values} component of the model fit. The values of $\pi_{ij}$ (cf., Equation~\ref{eq:multinom_pmf}) evaluated at the ML estimates of the parameters (i.e., $\hat\pi_{ij}$) can also be thought of as fitted values for the multinomially distributed variable $\bm Y_i^*$. These values can be obtained from the model fit by use of the \code{predict} method: <<>>= head(pred <- predict(fm1, newdata = subset(wine, select = -rating))$fit) @ Note that the original data set should be supplied in the \code{newdata} argument \emph{without} the response variable (here \code{rating}). If the response variable is \emph{present} in \code{newdata} predictions are produced for only those rating categories which were observed and we get back the fitted values: <<>>= stopifnot(isTRUE(all.equal(fitted(fm1), t(pred)[t(col(pred) == wine$rating)])), isTRUE(all.equal(fitted(fm1), predict(fm1, newdata=wine)$fit))) @ Class predictions are also available and defined here as the response class with the highest probability, that is, for the $i$'th observation the class prediction is the mode of $\bm\pi_{i}$. To obtain class predictions use \code{type = "class"} as illustrated in the following small table: <<>>= newData <- expand.grid(temp = levels(wine$temp), contact = levels(wine$contact)) cbind(newData, round(predict(fm1, newdata=newData)$fit, 3), "class"=predict(fm1, newdata=newData, type="class")$fit) @ Other definitions of class predictions can be applied, e.g., nearest mean predictions: <<>>= head(apply(pred, 1, function(x) round(weighted.mean(1:5, x)))) @ which in this case happens to be identical to the default class predictions. <>= p1 <- apply(predict(fm1, newdata = subset(wine, select=-rating))$fit, 1, function(x) round(weighted.mean(1:5, x))) p2 <- as.numeric(as.character(predict(fm1, type="class")$fit)) stopifnot(isTRUE(all.equal(p1, p2, check.attributes = FALSE))) @ Standard errors and confidence intervals of predictions are also available, for example: <<>>= predictions <- predict(fm1, se.fit=TRUE, interval=TRUE) head(do.call("cbind", predictions)) @ where the default 95\% confidence level can be changed with the \code{level} argument. Here the standard errors of fitted values or predictions, $\hat{\tilde{\pi}} = \tilde{\pi}(\hat{\bm\psi})$ are obtained by application of the delta method: \begin{equation*} \mathsf{Var}(\hat{\tilde{\bm\pi}}) = \bm C \mathsf{Var}(\hat{\bm\psi}) \bm C^\top, \quad \bm C = \frac{\partial \tilde{\bm\pi}(\bm\psi)}{\partial \bm\psi} \Big|_{\bm\psi = \hat{\bm\psi}} \end{equation*} where $\mathsf{Var}(\hat{\bm\psi})$ is the estimated variance-covariance matrix of the parameters $\bm\psi$ evaluated at the ML estimates $\hat{\bm\psi}$ as given by the observed Fisher Information matrix and finally the standard errors are extracted as the squareroot of the diagonal elements of $\mathsf{Var}(\hat{\tilde{\bm\pi}})$. Since symmetric confidence intervals for probabilities are not appropriate unless perhaps if they are close to one half a more generally applicable approach is to form symmetric Wald intervals on the logit scale and then subsequently transform the confindence bounds to the probability scale. \code{predict.clm} takes this approach and computes the standard error of $\hat\kappa_i = \mathrm{logit}(\hat{\tilde{\pi}}_i)$ by yet an application of the delta method: \begin{equation*} \mathrm{se}(\hat\kappa_i) = \frac{\partial g(\hat{\tilde{\pi}}_i)}{\partial \hat{\tilde{\pi}}_i} \mathrm{se}(\hat{\tilde{\pi}}_i) = \frac{\mathrm{se}(\hat{\tilde{\pi}}_i)}{% \hat{\tilde{\pi}}_i(1 - \hat{\tilde{\pi}}_i)}, \quad g(\hat{\tilde{\pi}}_i) = \log \frac{\hat{\tilde{\pi}}_i}{1 - \hat{\tilde{\pi}}_i}. \end{equation*} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Model identifiability} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Unidentifiable models or unidentifiable parameters may happen in CLMs for several reasons some of which are special to the model class. In this section we describe issues around model identifiability and how this is handled by \code{ordinal::clm}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Complete separation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In binary logistic regression the issue of \emph{complete separation} is well known. This may happen, for example if only ``success'' or only ``failure'' is observed for a level of a treatment factor. In CLMs the issue may appear even when outcomes are observed in more than one response category. This can be illustrated using the \code{wine} dataset if we combine the three central categories: <<>>= wine <- within(wine, { rating_comb3 <- rating levels(rating_comb3) <- c("1", "2-4", "2-4", "2-4", "5") }) ftable(rating_comb3 ~ temp, data=wine) fm.comb3 <- clm(rating_comb3 ~ temp, data=wine) summary(fm.comb3) @ Here the true ML estimates of the coefficients for \code{temp} and the second threshold are at infinity but the algorithm in \code{clm} terminates when the likelihood function is sufficiently flat. This means that the reported values of the coefficients for \code{temp} and the second threshold are arbitrary and will change if the convergence criteria are changed or a different optimization method is used. The standard errors of the coefficients are not available because the Hessian is effectively singular and so cannot be inverted to produce the variance-covariance matrix of the parameters. The ill-determined nature of the Hessian is seen from the very large condition number of the Hessian, \code{cond.H}. Note, however, that while the model parameters cannot be uniquely determined, the likelihood of the model is well defined and as such it can be compared to the likelihood of other models. For example, we could compare it to a model that excludes \code{temp} <<>>= fm.comb3_b <- clm(rating_comb3 ~ 1, data=wine) anova(fm.comb3, fm.comb3_b) @ The difference in log-likelihood is substantial, however, the criteria for the validity of the likelihood ratio test are not fulfilled, so the $p$~value should not be taken at face value. The complete-separation issue may also appear in less obvious situations. If, for example we consider the following model allowing for nominal effects of \code{temp} the issue shows up: <<>>= fm.nom2 <- clm(rating ~ contact, nominal = ~ temp, data=wine) summary(fm.nom2) @ Analytical detection of which coefficients suffer from unidentifiability due to \emph{complete separation} is a topic for future research and therefore unavailable in current versions of \pkg{ordinal}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Aliased coefficients} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Aliased coefficients can occurs in all kinds of models that build on a design matrix including linear models as well as generalized linear models. \code{lm} and \code{glm} determine the rank deficiency of the design matrix using the rank-revealing implementation of the QR-decomposition in \code{LINPACK} and displays the aliased coefficients as \code{NA}s\footnote{if the \code{singular.ok = TRUE} which is default}. Though the QR decomposition is not used during iterations in \code{clm}, it used initially to determine aliased coeffients. An example is provided using the \code{soup} data available in the \pkg{ordinal} package: <<>>= fm.soup <- clm(SURENESS ~ PRODID * DAY, data=soup) summary(fm.soup) @ The source of the singularity is revealed in the following table: <<>>= with(soup, table(DAY, PRODID)) @ which shows that the third \code{PRODID} was not presented at the third day. The issue of aliased coefficients extends in CLMs to nominal effects since the joint design matrix for location and nominal effects will be singular if the same variables are included in both location and nominal formulae. \code{clm} handles this by not estimating the offending coefficients in the location formula as illustrated with the \code{fm.nom2} model fit in section~\ref{sec:nominal-effects}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Over parameterization} \label{sec:over-parameterization} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The scope of model structures allowed in \code{clm} makes it possible to specify models which are over parameterized in ways that do not lead to rank deficient design matrices and as such are not easily detected before fitting the model. An example is given here which includes both additive (location) and multiplicative (scale) effects of \code{contact} for a binomial response variable but the issue can also occur with more than two response categories: <<>>= wine <- within(wine, { rating_comb2 <- rating levels(rating_comb2) <- c("1-2", "1-2", "3-5", "3-5", "3-5") }) ftable(rating_comb2 ~ contact, data=wine) fm.comb2 <- clm(rating_comb2 ~ contact, scale = ~ contact, data=wine) summary(fm.comb2) @ <>= ## Example with unidentified parameters with 3 response categories ## not shown in paper: wine <- within(wine, { rating_comb3b <- rating levels(rating_comb3b) <- c("1-2", "1-2", "3", "4-5", "4-5") }) wine$rating_comb3b[1] <- "4-5" # Need to remove the zero here to avoid inf MLE ftable(rating_comb3b ~ temp + contact, data=wine) fm.comb3_c <- clm(rating_comb3b ~ contact * temp, scale=~contact * temp, nominal=~contact, data=wine) summary(fm.comb3_c) convergence(fm.comb3_c) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Customized modelling} \label{sec:customized-modelling} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Using the \code{doFit} argument \code{clm} can be instructed to return a \emph{model environment} that we denote \code{rho}: <<>>= rho <- update(fm1, doFit=FALSE) names(rho) @ This environment holds a complete specification of the cumulative link models including design matrices \code{B1}, \code{B2}, \code{S} and other components. The environment also contains the cumulative distribution function that defines the inverse link function \code{pfun} and its first and second derivatives, i.e., the corresponding density function \code{dfun} and gradient \code{gfun}. Of direct interest here is the parameter vector \code{par} and functions that readily evaluate the negative log-likelihood (\code{clm.nll}), its gradient with respect to the parameters (\code{clm.grad}) and the Hessian (\code{clm.hess}). The negative log-likelihood and the gradient a the starting values is therefore <<>>= rho$clm.nll(rho) c(rho$clm.grad(rho)) @ Similarly at the MLE they are: <<>>= rho$clm.nll(rho, par=coef(fm1)) print(c(rho$clm.grad(rho)), digits=3) @ Note that the gradient function \code{clm.grad} assumes that \code{clm.nll} has been evaluated at the current parameter values; similarly, \code{clm.hess} assumes that \code{clm.grad} has been evaluated at the current parameter values. The NR algorithm in \pkg{ordinal} takes advantage of this so as to minimize the computational load. If interest is in fitting a \emph{custom} CLM with, say, restrictions on the parameter space, this can be achived by a combination of a general purpose optimizer and the functions \code{clm.nll} and optionally \code{clm.grad}. Assume for instance we know that the regression parameters can be no larger than 2, then the model can be fitted with the following code: <<>>= nll <- function(par, envir) { envir$par <- par envir$clm.nll(envir) } grad <- function(par, envir) { envir$par <- par envir$clm.nll(envir) envir$clm.grad(envir) } nlminb(rho$par, nll, grad, upper=c(rep(Inf, 4), 2, 2), envir=rho)$par @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Constrained partial proportional odds} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A type of models which are not implemented in full generality in \pkg{ordinal} are the so-called \emph{constrained} partial proportional odds models proposed by \citet{peterson90}. These models impose restrictions on the nominal effects considered in section~\ref{sec:nominal-effects} and are well suited to illustrate the costumizable modelling options available in the \pkg{ordinal} package. We consider an example from \citet{peterson90} in which disease status is tabulated by smoking status: <<>>= artery <- data.frame(disease = factor(rep(0:4, 2), ordered = TRUE), smoker = factor(rep(c("no", "yes"), each=5)), freq = c(334, 99, 117, 159, 30, 350, 307, 345, 481, 67)) addmargins(xtabs(freq ~ smoker + disease, data = artery), margin = 2) @ The overall odds-ratio of smoking is <<>>= fm <- clm(disease ~ smoker, weights=freq, data=artery) exp(fm$beta) @ showing that overall the odds of worse disease rating is twice as high for smokers compared to non-smokers. Allowing for nominal effects we see that the log odds-ratio for smoking clearly changes with disease status, and that it does so in an almost linearly decreasing manor: <<>>= fm.nom <- clm(disease ~ 1, nominal = ~ smoker, weights=freq, data=artery, sign.nominal = "negative") coef(fm.nom)[5:8] @ \citet{peterson90} suggested a model which restricts the log odds-ratios to be linearly decreasing with disease status modelling only the intercept (first threshold) and slope of the log odds-ratios: <<>>= coef(fm.lm <- lm(I(coef(fm.nom)[5:8]) ~ I(0:3))) @ We can implement the log-likelihood of this model as follows. As starting values we combine parameter estimates from \code{fm.nom} and the linear model \code{fm.lm}, and finally optimize the log-likelihood utilising the \code{fm.nom} model environment: <<>>= nll2 <- function(par, envir) { envir$par <- c(par[1:4], par[5] + par[6] * (0:3)) envir$clm.nll(envir) } start <- unname(c(coef(fm.nom)[1:4], coef(fm.lm))) fit <- nlminb(start, nll2, envir=update(fm.nom, doFit=FALSE)) round(fit$par[5:6], 2) @ Thus the log-odds decrease linearly from 1.02 for the first two disease categories by 0.3 per disease category. %% -- Illustrations ------------------------------------------------------------ %% - Virtually all JSS manuscripts list source code along with the generated %% output. The style files provide dedicated environments for this. %% - In R, the environments {Sinput} and {Soutput} - as produced by Sweave() or %% or knitr using the render_sweave() hook - are used (without the need to %% load Sweave.sty). %% - Equivalently, {CodeInput} and {CodeOutput} can be used. %% - The code input should use "the usual" command prompt in the respective %% software system. %% - For R code, the prompt "R> " should be used with "+ " as the %% continuation prompt. %% - Comments within the code chunks should be avoided - these should be made %% within the regular LaTeX text. %% -- Summary/conclusions/discussion ------------------------------------------- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Conclusions} \label{sec:conclusions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Cumulative link models is a very rich model class in between, in a sense, the perhaps the two most important model classes in statistics; linear models and logistic regression models. The greater flexibility of CLMs relative to binary logistic regression models also facilitates to the ability to check assumptions such as the partial proportional odds assumption. Non-linear structures such as scale effects arise naturally in a latent variable interpretation. In addition to nominal effects and the non-linear scale effects, the ordered nature of the thresholds gives rise to computational challanges which we have described and addressed in the \pkg{ordinal} package. In addition to computational challenges, practical data analysis with CLMs can also be challanging. In our experience a top-down approach in which a ``full'' model is fitted and gradually simplified is often problematic, not only because this easily leads to unidentifiable models but also because there are many different ways in which models can be reduced or expanded. A more pragmatic approach is often preferred; understanding the data through plots, tables, and even linear models can aid in finding a suitable intermediate ordinal starting model. Attempts to identify a ``correct'' model will also often lead to frustrations; the greater the model framework, the greater the risk that there are multiple models which fit the data (almost) equally well. It is well known statistical wisdom that with enough data many goodness of fit tests become sensitive to even minor deviations of little practical relevance. This is particularly true for tests of partial proportional odds; in the author's experience almost all CLMs on real data show some evidence of non-proportional odds for one or more variables while the most useful representation of the data is often a model that simply assumes proportional odds. %% -- Optional special unnumbered sections ------------------------------------- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section*{Computational details} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % \begin{leftbar} % If necessary or useful, information about certain computational details % such as version numbers, operating systems, or compilers could be included % in an unnumbered section. Also, auxiliary packages (say, for visualizations, % maps, tables, \dots) that are not cited in the main text can be credited here. % \end{leftbar} The results in this paper were obtained using \proglang{R}~\Sexpr{paste(R.Version()[6:7], collapse = ".")} with \pkg{ordinal}, version~\Sexpr{packageVersion("ordinal")}. \proglang{R} itself and all packages used are available from the Comprehensive \proglang{R} Archive Network (CRAN) at \url{https://CRAN.R-project.org/}. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \section*{Acknowledgments} % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % \begin{leftbar} % % All acknowledgments (note the AE spelling) should be collected in this % unnumbered section before the references. It may contain the usual information % about funding and feedback from colleagues/reviewers/etc. Furthermore, % information such as relative contributions of the authors may be added here % (if any). % \end{leftbar} %% -- Bibliography ------------------------------------------------------------- %% - References need to be provided in a .bib BibTeX database. %% - All references should be made with \cite, \citet, \citep, \citealp etc. %% (and never hard-coded). See the FAQ for details. %% - JSS-specific markup (\proglang, \pkg, \code) should be used in the .bib. %% - Titles in the .bib should be in title case. %% - DOIs should be included where available. \bibliography{clm_article_refs} %% -- Appendix (if any) -------------------------------------------------------- %% - After the bibliography with page break. %% - With proper section titles and _not_ just "Appendix". % \newpage % \begin{appendix} % % \section{More technical details} \label{app:technical} % % \begin{leftbar} % Appendices can be included after the bibliography (with a page break). Each % section within the appendix should have a proper section title (rather than % just \emph{Appendix}). % % For more technical style details, please check out JSS's style FAQ at % \url{https://www.jstatsoft.org/pages/view/style#frequently-asked-questions} % which includes the following topics: % \begin{itemize} % \item Title vs.\ sentence case. % \item Graphics formatting. % \item Naming conventions. % \item Turning JSS manuscripts into \proglang{R} package vignettes. % \item Trouble shooting. % \item Many other potentially helpful details\dots % \end{itemize} % \end{leftbar} % % % \section[Using BibTeX]{Using \textsc{Bib}{\TeX}} \label{app:bibtex} % % \begin{leftbar} % References need to be provided in a \textsc{Bib}{\TeX} file (\code{.bib}). All % references should be made with \verb|\cite|, \verb|\citet|, \verb|\citep|, % \verb|\citealp| etc.\ (and never hard-coded). This commands yield different % formats of author-year citations and allow to include additional details (e.g., % pages, chapters, \dots) in brackets. In case you are not familiar with these % commands see the JSS style FAQ for details. % % Cleaning up \textsc{Bib}{\TeX} files is a somewhat tedious task -- especially % when acquiring the entries automatically from mixed online sources. However, % it is important that informations are complete and presented in a consistent % style to avoid confusions. JSS requires the following format. % \begin{itemize} % \item JSS-specific markup (\verb|\proglang|, \verb|\pkg|, \verb|\code|) should % be used in the references. % \item Titles should be in title case. % \item Journal titles should not be abbreviated and in title case. % \item DOIs should be included where available. % \item Software should be properly cited as well. For \proglang{R} packages % \code{citation("pkgname")} typically provides a good starting point. % \end{itemize} % \end{leftbar} % % \end{appendix} %% ----------------------------------------------------------------------------- \end{document} ordinal/inst/doc/clmm2_tutorial.Rnw0000644000176200001440000004375212431104052017054 0ustar liggesusers\documentclass[a4paper]{article} \usepackage{amsmath}%the AMS math extension of LaTeX. \usepackage{amssymb}%the extended AMS math symbols. %% \usepackage{amsthm} \usepackage{bm}%Use 'bm.sty' to get `bold math' symbols \usepackage{natbib} \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{Sweave} \usepackage{url} \usepackage{float}%Use `float.sty' \usepackage[left=3.5cm,right=3.5cm]{geometry} \usepackage{algorithmic} \usepackage[amsmath,thmmarks,standard,thref]{ntheorem} %%\VignetteIndexEntry{clmm2 tutorial} %%\VignetteDepends{ordinal, xtable} \title{A Tutorial on fitting Cumulative Link Mixed Models with \texttt{clmm2} from the \textsf{ordinal} Package} \author{Rune Haubo B Christensen} %% \numberwithin{equation}{section} \setlength{\parskip}{2mm}%.8\baselineskip} \setlength{\parindent}{0in} %% \DefineVerbatimEnvironment{Sinput}{Verbatim}%{} %% {fontshape=sl, xleftmargin=1em} %% \DefineVerbatimEnvironment{Soutput}{Verbatim}%{} %% {xleftmargin=1em} %% \DefineVerbatimEnvironment{Scode}{Verbatim}%{} %% {fontshape=sl, xleftmargin=1em} \fvset{listparameters={\setlength{\topsep}{0pt}}} %% \fvset{listparameters={\setlength{\botsep}{0pt}}} \renewenvironment{Schunk}{\vspace{-1mm}}{\vspace{-1mm}} %RE-DEFINE marginpar \setlength{\marginparwidth}{1in} \let\oldmarginpar\marginpar \renewcommand\marginpar[1]{\oldmarginpar[\-\raggedleft\tiny #1]% {\tiny #1}} %uncomment to _HIDE_MARGINPAR_: %\renewcommand\marginpar[1]{} \newcommand{\var}{\textup{var}} \newcommand{\I}{\mathcal{I}} \newcommand{\bta}{\bm \theta} \newcommand{\ta}{\theta} \newcommand{\tah}{\hat \theta} \newcommand{\di}{~\textup{d}} \newcommand{\td}{\textup{d}} \newcommand{\Si}{\Sigma} \newcommand{\si}{\sigma} \newcommand{\bpi}{\bm \pi} \newcommand{\bmeta}{\bm \eta} \newcommand{\tdots}{\hspace{10mm} \texttt{....}} \newcommand{\FL}[1]{\fvset{firstline= #1}} \newcommand{\LL}[1]{\fvset{lastline= #1}} \newcommand{\s}{\square} \newcommand{\bs}{\blacksquare} % figurer bagerst i artikel %% \usepackage[tablesfirst, nolists]{endfloat} %% \renewcommand{\efloatseparator}{\vspace{.5cm}} \theoremstyle{plain} %% {break} \theoremseparator{:} \theoremsymbol{{\tiny $\square$}} %%\theoremstyle{plain} \theorembodyfont{\small} \theoremindent5mm \renewtheorem{example}{Example} %% \newtheoremstyle{example}{\topsep}{\topsep}% %% {}% Body font %% {}% Indent amount (empty = no indent, \parindent = para indent) %% {\bfseries}% Thm head font %% {}% Punctuation after thm head %% {\newline}% Space after thm head (\newline = linebreak) %% {\thmname{#1}\thmnumber{ #2}\thmnote{ #3}}% Thm head spec %% %% \theoremstyle{example} %% %% \newtheorem{example}{Example}[subsection] %% \newtheorem{example}{Example}[section] \usepackage{lineno} % \linenumbers \newcommand*\patchAmsMathEnvironmentForLineno[1]{% \expandafter\let\csname old#1\expandafter\endcsname\csname #1\endcsname \expandafter\let\csname oldend#1\expandafter\endcsname\csname end#1\endcsname \renewenvironment{#1}% {\linenomath\csname old#1\endcsname}% {\csname oldend#1\endcsname\endlinenomath}}% \newcommand*\patchBothAmsMathEnvironmentsForLineno[1]{% \patchAmsMathEnvironmentForLineno{#1}% \patchAmsMathEnvironmentForLineno{#1*}}% \AtBeginDocument{% \patchBothAmsMathEnvironmentsForLineno{equation}% \patchBothAmsMathEnvironmentsForLineno{align}% \patchBothAmsMathEnvironmentsForLineno{flalign}% \patchBothAmsMathEnvironmentsForLineno{alignat}% \patchBothAmsMathEnvironmentsForLineno{gather}% \patchBothAmsMathEnvironmentsForLineno{multline}% } \begin{document} \bibliographystyle{chicago} \maketitle \begin{abstract} It is shown by example how a cumulative link mixed model is fitted with the \texttt{clmm2} function in package \textsf{ordinal}. Model interpretation and inference is briefly discussed. A tutorial for the more recent \texttt{clmm} function is work in progress. \end{abstract} %% \newpage %% \tableofcontents %% \newpage \SweaveOpts{echo=TRUE, results=verb, width=4.5, height=4.5} \SweaveOpts{prefix.string=figs} \fvset{listparameters={\setlength{\topsep}{0pt}}, gobble=0, fontsize=\small} %% \fvset{gobble=0, fontsize=\small} \setkeys{Gin}{width=.49\textwidth} <>= ## Load common packages, functions and set settings: library(ordinal) library(xtable) ## RUN <- FALSE #redo computations and write .RData files ## Change options: op <- options() ## To be able to reset settings options("digits" = 7) options(help_type = "html") ## options("width" = 75) options("SweaveHooks" = list(fig=function() par(mar=c(4,4,.5,0)+.5))) options(continue=" ") @ We will consider the data on the bitterness of wine from \citet{randall89} presented in Table~\ref{tab:winedata} and available as the object \texttt{wine} in package \textsf{ordinal}. The data were also analyzed with mixed effects models by \citet{tutz96}. The following gives an impression of the wine data object: <<>>= data(wine) head(wine) str(wine) @ The data represent a factorial experiment on factors determining the bitterness of wine with 1 = ``least bitter'' and 5 = ``most bitter''. Two treatment factors (temperature and contact) each have two levels. Temperature and contact between juice and skins can be controlled when crushing grapes during wine production. Nine judges each assessed wine from two bottles from each of the four treatment conditions, hence there are 72 observations in all. For more information see the manual entry for the wine data: \texttt{help(wine)}. \begin{table} \centering \caption{Ratings of the bitterness of some white wines. Data are adopted from \citet{randall89}.} \label{tab:winedata} \begin{tabular}{lllrrrrrrrrr} \hline & & & \multicolumn{9}{c}{Judge} \\ \cline{4-12} <>= data(wine) temp.contact.bottle <- with(wine, temp:contact:bottle)[drop=TRUE] tab <- xtabs(as.numeric(rating) ~ temp.contact.bottle + judge, data=wine) class(tab) <- "matrix" attr(tab, "call") <- NULL mat <- cbind(rep(c("cold", "warm"), each = 4), rep(rep(c("no", "yes"), each=2), 2), 1:8, tab) colnames(mat) <- c("Temperature", "Contact", "Bottle", 1:9) xtab <- xtable(mat) print(xtab, only.contents=TRUE, include.rownames=FALSE, sanitize.text.function = function(x) x) @ \end{tabular} \end{table} We will fit the following cumulative link mixed model to the wine data: \begin{equation} \label{eq:mixedModel} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j - \beta_1 (\mathtt{temp}_i) - \beta_2(\mathtt{contact}_i) - u(\mathtt{judge}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation} This is a model for the cumulative probability of the $i$th rating falling in the $j$th category or below, where $i$ index all observations and $j = 1, \ldots, J$ index the response categories ($J = 5$). $\{\theta_j\}$ are known as threshold parameters or cut-points. We take the judge effects to be random, and assume that the judge effects are IID normal: $u(\mathtt{judge}_i) \sim N(0, \sigma_u^2)$. We fit this model with the \texttt{clmm2} function in package \textsf{ordinal}. Here we save the fitted \texttt{clmm2} model in the object \texttt{fm1} (short for \texttt{f}itted \texttt{m}odel \texttt{1}) and \texttt{print} the model by simply typing its name: <<>>= fm1 <- clmm2(rating ~ temp + contact, random=judge, data=wine) fm1 @ Maximum likelihood estimates of the parameters are provided using the Laplace approximation to compute the likelihood function. A more accurate approximation is provided by the adaptive Gauss-Hermite quadrature method. Here we use 10 quadrature nodes and use the \texttt{summary} method to display additional information: <<>>= fm2 <- clmm2(rating ~ temp + contact, random=judge, data=wine, Hess=TRUE, nAGQ=10) summary(fm2) @ The small changes in the parameter estimates show that the Laplace approximation was in fact rather accurate in this case. Observe that we set the option \texttt{Hess = TRUE}. This is needed if we want to use the \texttt{summary} method since the Hessian is needed to compute standard errors of the model coefficients. The results contain the maximum likelihood estimates of the parameters: \begin{equation} \label{eq:parameters} \hat\beta_1 = 3.06, ~~\hat\beta_2 = 1.83, ~~\hat\sigma_u^2 = 1.29 = 1.13^2, ~~\{\hat\theta_j\} = [-1.62,~ 1.51,~ 4.23,~ 6.09]. \end{equation} Observe the number under \texttt{Std.Dev} for the random effect is \textbf{not} the standard error of the random effects variance, \texttt{Var}. Rather, it is the standard deviation of the random effects, i.e., it is the square root of the variance. In our example $\sqrt{1.29} \simeq 1.13$. The condition number of the Hessian measures the empirical identifiability of the model. High numbers, say larger than $10^4$ or $10^6$ indicate that the model is ill defined. This would indicate that the model can be simplified, that possibly some parameters are not identifiable, and that optimization of the model can be difficult. In this case the condition number of the Hessian does not indicate a problem with the model. The coefficients for \texttt{temp} and \texttt{contact} are positive indicating that higher temperature and more contact increase the bitterness of wine, i.e., rating in higher categories is more likely. The odds ratio of the event $Y \geq j$ is $\exp(\beta_{\textup{treatment}})$, thus the odds ratio of bitterness being rated in category $j$ or above at warm relative to cold temperatures is <<>>= exp(coef(fm2)[5]) @ The $p$-values for the location coefficients provided by the \texttt{summary} method are based on the so-called Wald statistic. More accurate test are provided by likelihood ratio tests. These can be obtained with the \texttt{anova} method, for example, the likelihood ratio test of \texttt{contact} is <<>>= fm3 <- clmm2(rating ~ temp, random=judge, data=wine, nAGQ=10) anova(fm3, fm2) @ which in this case is slightly more significant. The Wald test is not reliable for variance parameters, so the \texttt{summary} method does not provide a test of $\sigma_u$, but a likelihood ratio test can be obtained with \texttt{anova}: <<>>= fm4 <- clm2(rating ~ temp + contact, data=wine) anova(fm4, fm2) @ showing that the judge term is significant. Since this test of $\sigma_u = 0$ is on the boundary of the parameter space (a variance cannot be negative), it is often argued that a more correct $p$-value is obtained by halving the $p$-value produced by the conventional likelihood ratio test. In this case halving the $p$-value is of little relevance. A profile likelihood confidence interval of $\sigma_u$ is obtained with: <<>>= pr2 <- profile(fm2, range=c(.1, 4), nSteps=30, trace=0) confint(pr2) @ The profile likelihood can also be plotted: <>= plot(pr2) @ The result is shown in Fig.~\ref{fig:PRsigma_u} where horizontal lines indicate 95\% and 99\% confindence bounds. Clearly the profile likelihood function is asymmetric and symmetric confidence intervals would be inaccurate. \begin{figure} \centering <>= <> @ \caption{Profile likelihood of $\sigma_u$.} \label{fig:PRsigma_u} \end{figure} The judge effects, $u(\mathtt{judge}_i)$ are not parameters, so they cannot be \emph{estimated} in the conventional sense, but a ``best guess'' is provided by the \emph{conditional modes}. Similarly the \emph{conditional variance} provides an uncertainty measure of the conditional modes. These quantities are included in \texttt{clmm2} objects as the \texttt{ranef} and \texttt{condVar} components. The following code generates the plot in Fig.~\ref{fig:ranef} illustrating judge effects via conditional modes with 95\% confidence intervals based on the conditional variance: <>= ci <- fm2$ranef + qnorm(0.975) * sqrt(fm2$condVar) %o% c(-1, 1) ord.re <- order(fm2$ranef) ci <- ci[order(fm2$ranef),] plot(1:9, fm2$ranef[ord.re], axes=FALSE, ylim=range(ci), xlab="Judge", ylab="Judge effect") axis(1, at=1:9, labels = ord.re) axis(2) for(i in 1:9) segments(i, ci[i,1], i, ci[i, 2]) abline(h = 0, lty=2) @ The seventh judge gave the lowest ratings of bitterness while the first judge gave the highest ratings of bitterness. The significant judge effect indicate that judges perceived the bitterness of the wines differently. Two natural interpretations are that either a bitterness of, say, 3 means different things to different judges, or the judges actually perceived the bitterness of the wines differently. Possibly both effects play their part. \begin{figure} \centering <>= <> @ \caption{Judge effects given by conditional modes with 95\% confidence intervals based on the conditional variance.} \label{fig:ranef} \end{figure} The fitted or predicted probabilites can be obtained with the judge effects at their conditional modes or for an average judge ($u = 0$). The former are available with \texttt{fitted(fm)} or with \texttt{predict(fm)}, where \texttt{fm} is a \texttt{f}itted \texttt{m}odel object. In our example we get <<>>= head(cbind(wine, fitted(fm2))) @ Predicted probabilities for an average judge can be obtained by including the data used to fit the model in the \texttt{newdata} argument of \texttt{predict}: <<>>= head(cbind(wine, pred=predict(fm2, newdata=wine))) @ Model~\eqref{eq:mixedModel} says that for an average judge at cold temperature the cumulative probability of a bitterness rating in category $j$ or below is \begin{equation*} P(Y_i \leq j) = \textup{logit}^{-1} [ \theta_j - \beta_2(\mathtt{contact}_i) ] \end{equation*} since $u$ is set to zero and $\beta_1(\mathtt{temp}_i) = 0$ at cold conditions. Further, $\textup{logit}^{-1}(\eta) = 1 / [1 + \exp(\eta)]$ is the cumulative distribution function of the logistic distribution available as the \texttt{plogis} function. The (non-cumulative) probability of a bitterness rating in category $j$ is $\pi_j = P(Y_i \leq j) - P(Y_i \leq j-1)$, for instance the probability of a bitterness rating in the third category at these conditions can be computed as <<>>= plogis(fm2$Theta[3] - fm2$beta[2]) - plogis(fm2$Theta[2] - fm2$beta[2]) @ This corresponds to the third entry of \texttt{predict(fm2, newdata=wine)} given above. Judge effects are random and normally distributed, so an average judge effect is 0. Extreme judge effects, say 5th and 95th percentile judge effects are given by <<>>= qnorm(0.95) * c(-1, 1) * fm2$stDev @ At the baseline experimental conditions (cold and no contact) the probabilites of bitterness ratings in the five categories for a 5th percentile judge is <<>>= pred <- function(eta, theta, cat = 1:(length(theta)+1), inv.link = plogis) { Theta <- c(-1e3, theta, 1e3) sapply(cat, function(j) inv.link(Theta[j+1] - eta) - inv.link(Theta[j] - eta) ) } pred(qnorm(0.05) * fm2$stDev, fm2$Theta) @ We can compute these probabilities for average, 5th and 95th percentile judges at the four experimental conditions. The following code plots these probabilities and the results are shown in Fig.~\ref{fig:ratingProb}. <>= mat <- expand.grid(judge = qnorm(0.95) * c(-1, 0, 1) * fm2$stDev, contact = c(0, fm2$beta[2]), temp = c(0, fm2$beta[1])) pred.mat <- pred(eta=rowSums(mat), theta=fm2$Theta) lab <- paste("contact=", rep(levels(wine$contact), 2), ", ", "temp=", rep(levels(wine$temp), each=2), sep="") par(mfrow=c(2, 2)) for(k in c(1, 4, 7, 10)) { plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") } @ \begin{figure} \centering <>= k <- 1 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 4 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 7 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 10 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ \caption{Rating probabilities for average and extreme judges at different experimental conditions.} \label{fig:ratingProb} \end{figure} At constant experimental conditions the odds ratio for a bitterness rating in category $j$ or above for a 95th percentile judge relative to a 5th percentile judge is <<>>= exp(2*qnorm(0.95) * fm2$stDev) @ The differences between judges can also be expressed in terms of the interquartile range: the odds ratio for a bitterness rating in category $j$ or above for a third quartile judge relative to a first quartile judge is <<>>= exp(2*qnorm(0.75) * fm2$stDev) @ \newpage \bibliography{ordinal} %% \newpage \end{document} <>= @ ordinal/inst/doc/clm_article.R0000644000176200001440000003357113575515117016050 0ustar liggesusers### R code from vignette source 'clm_article.Rnw' ################################################### ### code chunk number 1: preliminaries ################################################### options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE) library("ordinal") library("xtable") ################################################### ### code chunk number 2: clm_article.Rnw:792-794 ################################################### clm_args <- gsub("function ", "clm", deparse(args(clm))) cat(paste(clm_args[-length(clm_args)], "\n")) ################################################### ### code chunk number 3: clm_article.Rnw:807-809 ################################################### cc_args <- gsub("function ", "clm.control", deparse(args(clm.control))) cat(paste(cc_args[-length(cc_args)], "\n")) ################################################### ### code chunk number 4: clm_article.Rnw:840-850 ################################################### ## data(wine) tab <- with(wine, table(temp:contact, rating)) mat <- cbind(rep(c("cold", "warm"), each = 2), rep(c("no", "yes"), 2), tab) colnames(mat) <- c("Temperature", "Contact", paste("~~", 1:5, sep = "")) xtab <- xtable(mat) print(xtab, only.contents=TRUE, include.rownames=FALSE, sanitize.text.function = function(x) x) ################################################### ### code chunk number 5: clm_article.Rnw:878-881 ################################################### library("ordinal") fm1 <- clm(rating ~ temp + contact, data=wine) summary(fm1) ################################################### ### code chunk number 6: clm_article.Rnw:932-933 ################################################### anova(fm1, type="III") ################################################### ### code chunk number 7: clm_article.Rnw:937-939 ################################################### fm2 <- clm(rating ~ temp, data=wine) anova(fm2, fm1) ################################################### ### code chunk number 8: clm_article.Rnw:945-946 ################################################### drop1(fm1, test="Chi") ################################################### ### code chunk number 9: clm_article.Rnw:951-953 ################################################### fm0 <- clm(rating ~ 1, data=wine) add1(fm0, scope = ~ temp + contact, test="Chi") ################################################### ### code chunk number 10: clm_article.Rnw:957-958 ################################################### confint(fm1) ################################################### ### code chunk number 11: clm_article.Rnw:994-996 ################################################### fm.nom <- clm(rating ~ temp, nominal = ~ contact, data=wine) summary(fm.nom) ################################################### ### code chunk number 12: clm_article.Rnw:1026-1027 ################################################### fm.nom$Theta ################################################### ### code chunk number 13: clm_article.Rnw:1036-1037 ################################################### anova(fm1, fm.nom) ################################################### ### code chunk number 14: clm_article.Rnw:1048-1049 ################################################### fm.nom2 <- clm(rating ~ temp + contact, nominal = ~ contact, data=wine) ################################################### ### code chunk number 15: clm_article.Rnw:1052-1053 ################################################### fm.nom2 ################################################### ### code chunk number 16: clm_article.Rnw:1057-1058 ################################################### nominal_test(fm1) ################################################### ### code chunk number 17: clm_article.Rnw:1077-1079 ################################################### fm.sca <- clm(rating ~ temp + contact, scale = ~ temp, data=wine) summary(fm.sca) ################################################### ### code chunk number 18: clm_article.Rnw:1084-1085 ################################################### scale_test(fm1) ################################################### ### code chunk number 19: clm_article.Rnw:1107-1110 ################################################### fm.equi <- clm(rating ~ temp + contact, data=wine, threshold="equidistant") summary(fm.equi) ################################################### ### code chunk number 20: clm_article.Rnw:1117-1118 ################################################### drop(fm.equi$tJac %*% coef(fm.equi)[c("threshold.1", "spacing")]) ################################################### ### code chunk number 21: clm_article.Rnw:1125-1126 ################################################### mean(diff(coef(fm1)[1:4])) ################################################### ### code chunk number 22: clm_article.Rnw:1132-1133 ################################################### anova(fm1, fm.equi) ################################################### ### code chunk number 23: profileLikelihood ################################################### pr1 <- profile(fm1, alpha=1e-4) plot(pr1) ################################################### ### code chunk number 24: prof1 ################################################### plot(pr1, which.par=1) ################################################### ### code chunk number 25: prof2 ################################################### plot(pr1, which.par=2) ################################################### ### code chunk number 26: clm_article.Rnw:1204-1207 ################################################### slice.fm1 <- slice(fm1, lambda = 5) par(mfrow = c(2, 3)) plot(slice.fm1) ################################################### ### code chunk number 27: slice11 ################################################### plot(slice.fm1, parm = 1) ################################################### ### code chunk number 28: slice12 ################################################### plot(slice.fm1, parm = 2) ################################################### ### code chunk number 29: slice13 ################################################### plot(slice.fm1, parm = 3) ################################################### ### code chunk number 30: slice14 ################################################### plot(slice.fm1, parm = 4) ################################################### ### code chunk number 31: slice15 ################################################### plot(slice.fm1, parm = 5) ################################################### ### code chunk number 32: slice16 ################################################### plot(slice.fm1, parm = 6) ################################################### ### code chunk number 33: slice2 ################################################### slice2.fm1 <- slice(fm1, parm = 4:5, lambda = 1e-5) par(mfrow = c(1, 2)) plot(slice2.fm1) ################################################### ### code chunk number 34: slice24 ################################################### plot(slice2.fm1, parm = 1) ################################################### ### code chunk number 35: slice25 ################################################### plot(slice2.fm1, parm = 2) ################################################### ### code chunk number 36: clm_article.Rnw:1268-1269 ################################################### convergence(fm1) ################################################### ### code chunk number 37: clm_article.Rnw:1295-1296 ################################################### head(pred <- predict(fm1, newdata = subset(wine, select = -rating))$fit) ################################################### ### code chunk number 38: clm_article.Rnw:1300-1302 ################################################### stopifnot(isTRUE(all.equal(fitted(fm1), t(pred)[t(col(pred) == wine$rating)])), isTRUE(all.equal(fitted(fm1), predict(fm1, newdata=wine)$fit))) ################################################### ### code chunk number 39: clm_article.Rnw:1305-1309 ################################################### newData <- expand.grid(temp = levels(wine$temp), contact = levels(wine$contact)) cbind(newData, round(predict(fm1, newdata=newData)$fit, 3), "class"=predict(fm1, newdata=newData, type="class")$fit) ################################################### ### code chunk number 40: clm_article.Rnw:1312-1313 ################################################### head(apply(pred, 1, function(x) round(weighted.mean(1:5, x)))) ################################################### ### code chunk number 41: clm_article.Rnw:1316-1320 ################################################### p1 <- apply(predict(fm1, newdata = subset(wine, select=-rating))$fit, 1, function(x) round(weighted.mean(1:5, x))) p2 <- as.numeric(as.character(predict(fm1, type="class")$fit)) stopifnot(isTRUE(all.equal(p1, p2, check.attributes = FALSE))) ################################################### ### code chunk number 42: clm_article.Rnw:1325-1327 ################################################### predictions <- predict(fm1, se.fit=TRUE, interval=TRUE) head(do.call("cbind", predictions)) ################################################### ### code chunk number 43: clm_article.Rnw:1361-1368 ################################################### wine <- within(wine, { rating_comb3 <- rating levels(rating_comb3) <- c("1", "2-4", "2-4", "2-4", "5") }) ftable(rating_comb3 ~ temp, data=wine) fm.comb3 <- clm(rating_comb3 ~ temp, data=wine) summary(fm.comb3) ################################################### ### code chunk number 44: clm_article.Rnw:1373-1375 ################################################### fm.comb3_b <- clm(rating_comb3 ~ 1, data=wine) anova(fm.comb3, fm.comb3_b) ################################################### ### code chunk number 45: clm_article.Rnw:1380-1382 ################################################### fm.nom2 <- clm(rating ~ contact, nominal = ~ temp, data=wine) summary(fm.nom2) ################################################### ### code chunk number 46: clm_article.Rnw:1393-1395 ################################################### fm.soup <- clm(SURENESS ~ PRODID * DAY, data=soup) summary(fm.soup) ################################################### ### code chunk number 47: clm_article.Rnw:1398-1399 ################################################### with(soup, table(DAY, PRODID)) ################################################### ### code chunk number 48: clm_article.Rnw:1411-1418 ################################################### wine <- within(wine, { rating_comb2 <- rating levels(rating_comb2) <- c("1-2", "1-2", "3-5", "3-5", "3-5") }) ftable(rating_comb2 ~ contact, data=wine) fm.comb2 <- clm(rating_comb2 ~ contact, scale = ~ contact, data=wine) summary(fm.comb2) ################################################### ### code chunk number 49: clm_article.Rnw:1421-1434 ################################################### ## Example with unidentified parameters with 3 response categories ## not shown in paper: wine <- within(wine, { rating_comb3b <- rating levels(rating_comb3b) <- c("1-2", "1-2", "3", "4-5", "4-5") }) wine$rating_comb3b[1] <- "4-5" # Need to remove the zero here to avoid inf MLE ftable(rating_comb3b ~ temp + contact, data=wine) fm.comb3_c <- clm(rating_comb3b ~ contact * temp, scale=~contact * temp, nominal=~contact, data=wine) summary(fm.comb3_c) convergence(fm.comb3_c) ################################################### ### code chunk number 50: clm_article.Rnw:1443-1445 ################################################### rho <- update(fm1, doFit=FALSE) names(rho) ################################################### ### code chunk number 51: clm_article.Rnw:1448-1450 ################################################### rho$clm.nll(rho) c(rho$clm.grad(rho)) ################################################### ### code chunk number 52: clm_article.Rnw:1453-1455 ################################################### rho$clm.nll(rho, par=coef(fm1)) print(c(rho$clm.grad(rho)), digits=3) ################################################### ### code chunk number 53: clm_article.Rnw:1460-1470 ################################################### nll <- function(par, envir) { envir$par <- par envir$clm.nll(envir) } grad <- function(par, envir) { envir$par <- par envir$clm.nll(envir) envir$clm.grad(envir) } nlminb(rho$par, nll, grad, upper=c(rep(Inf, 4), 2, 2), envir=rho)$par ################################################### ### code chunk number 54: clm_article.Rnw:1479-1483 ################################################### artery <- data.frame(disease = factor(rep(0:4, 2), ordered = TRUE), smoker = factor(rep(c("no", "yes"), each=5)), freq = c(334, 99, 117, 159, 30, 350, 307, 345, 481, 67)) addmargins(xtabs(freq ~ smoker + disease, data = artery), margin = 2) ################################################### ### code chunk number 55: clm_article.Rnw:1487-1489 ################################################### fm <- clm(disease ~ smoker, weights=freq, data=artery) exp(fm$beta) ################################################### ### code chunk number 56: clm_article.Rnw:1494-1497 ################################################### fm.nom <- clm(disease ~ 1, nominal = ~ smoker, weights=freq, data=artery, sign.nominal = "negative") coef(fm.nom)[5:8] ################################################### ### code chunk number 57: clm_article.Rnw:1500-1501 ################################################### coef(fm.lm <- lm(I(coef(fm.nom)[5:8]) ~ I(0:3))) ################################################### ### code chunk number 58: clm_article.Rnw:1504-1511 ################################################### nll2 <- function(par, envir) { envir$par <- c(par[1:4], par[5] + par[6] * (0:3)) envir$clm.nll(envir) } start <- unname(c(coef(fm.nom)[1:4], coef(fm.lm))) fit <- nlminb(start, nll2, envir=update(fm.nom, doFit=FALSE)) round(fit$par[5:6], 2) ordinal/inst/doc/clmm2_tutorial.pdf0000644000176200001440000030471613575515131017075 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4150 /Filter /FlateDecode /N 89 /First 748 >> stream x\[s6~_N'%Aȝq|IĉNʒ+i~hYiG |&a1Lq&OcXL3)93Lf KYs1 ČKTgbLqWdLc`\3 3L$S&tjX%3Lʒ]1%D#d*D1-A$L1 3\:eF(ϘAE4tg{7'KL2!Y& +i+4R$Х8VRtldC-L ;P8%ȕ #2(12@PraI&FS4ӆ;hA *PPN @ "( Q44:K?b@X"cZB} ʒ(kP&L̲iPV1XAYqAaRCӏ<WUXvsz$5aIe;)(kG)&3҄ L3L )AaQ H`g@&?hYt6@8>g =\x||dO+u  5kMaO9=}Xʐ]2`ɮpaViGH']$%h+I>21흪g)\:[HLC{!=؋(z(}& fb96_g6+`:*׵6KQ #|_ {YOiN=qfSm| ]%j OCUch&HCEjK0],&MM.e9M,GLջ{ _ug1з}[8mMG˥ L^4a덂FRZON48Tq8$^px an'{lt?n ^@.Bcjܴ7ެW֝ꫫDh#8p_]4 *ҶGܺ{ЍnrhOU^;j4vՎAt}l7[D~`>Dj<^'"e8̇WI~95`d6-|Q4M&G(a8z0M>yO!ZEwDҲmؖ\S"=aG_&V|W^EpqK< ?G$,Z!vOU%)u+cPSNK^~>سx_a'0] jgzBwp6z Ϳ$T&0C[5/ت-xȦ vP^T(E 67X؝!9ڌ`B{vuPx! ̝rDL TgC*.o}I#3Eē|~0#.auH9!d{<鰴Z5]hrZJUMm;Ѻ6tx~pw=ɿu#ґc$wwmm^hjz.uA>{ $b$)}3W~ /.?^學Ec]j:fߺRzc3R~مaJGrwpS #~-ÈPFhA=U&,i|c}9x-x}ԇADZ;oK}u10ib_'$ pB#SG(GiS N=G]y|Y5VJ-5 wg3V '\Of֭-̴5K ̸}W}IVc WjJ7%(j۞V+ZgѧK.HM1<+-y@P4]IBKD#pt6w 3 Li7Z4n4Ê|n*P j#؃jVOnAޤ;\g/6|j* oEJ]餃SXQnwjz-UK5~T <>E<?Zyt G8伸x od>pߍ*ζJ^Hb,UK-,=y4黽n!c X3e-ZwMIWmf:].KBJfi/l\J[k&`%&XMt,  61=ZK@kt)%pp˒q˔9˔\10ߥ,v.Ŀ4B']p'F .#0e¿ .))3܏dG ɢSJ &1{GR87M~)9-^'{6ZQBWLE&T̋Ɔ/M=.Fg BC2՚n昴J` c3z&Wɰ%NURǩdx܄V;w]>saǣ|5_q{>ۦZfIb|r?mmBQ¾(v('~LpL|qUe2Czh퓦%ߓwYKG}V&V׭ rgzu^+>vTױLnا)0Jo N oN//(??H>&:\ygIy DD>PuXv)Uݓ?P )ACu7{Gı>|>|l ۇ@NGkU&ajHm"㔲۹K-rݬ __[v[ŻDt![Z}L!YB`׬^H}݇.Lo|QAfGW~OGm/j.K kb6'1mgVnw:剿ܩ_p'ԓ:C={43؄>~(ee՟&71MA~A7s dU[*<`zJ lMQl.ml-{6-[} 0{XKQ > stream 2019-12-15T21:40:56+01:00 2019-12-15T21:40:56+01:00 TeX Untitled endstream endobj 92 0 obj << /Filter /FlateDecode /Length 4982 >> stream x\[o\q3OA~`3l/r6İ o,AE%re%׻.}gzH^6{R]W՗?.Ԥ +?9?Rˣ?iqQwv 4H '[9zt΋|Xcd_bVn锂ˇձT9B.{W9Ŵ\Su^^AQ.,7X+D&qE}sUK&,fe r94тWtڸqedA ^~VJr)mi8!{nY~E*G,}y!)1 3X JR:;QRSB¢a~徍?';~R&űѠݴ89~ aY3[#:8hFc`F]B)2 0Em3(J@ Z 4Pim&k0a(uVx 30B KL HgA/<8:S| k=/ʅ]N}"(28&yR;Q>Y#C&JvE6)/V#(d"L6,4ZP̚[9Fo`L%0dwZHLW7âY|F'SBڣ@l\5d[94#%801S~lE\E]F,/y/&J-8MhlO Z ԉH27Yz=YPx,? YX~u Vq'c$j D۹a;ZŒ9ykқ5yӉ)( IP2O]Q 0Xʅș^z4 ~KK8_ʰgNM%Nu*>%Bh'xc X>_"d,P&^}N< 9 5x(-zGD]p ̰aowN7,i@͍O6,:,ROAL;h ˾$,6C)wLwftwl`SȎǑZ$`&9b _TaDrjm*Y9D&Ghcװ&*eb3Rܯ %E9Kc^&µ)ޥWﰵ~s"[`Mɷʊ30@P@ 8Xu2ij+}JFnn`; TaˆWy@{ ^O1v|jy7ehr`CZRW"Pt x,_/r[Cvt2 P2!LHA!dn+۝2ZeY'@{U*ܝA >-׭m3&ɎgюyYI􁀃%h)&@%IH)uEG\ߗp7cB.[F윱cؚ3VwC\j@ ڎ_xæ궁 擥Ka2Gs8mUNK`I0&Wkv%r EZliHl!ij7Åb)&>lLAΓw vZ"4y*Q\Ӱ-'xn1AZ$}׊Wb⎎GbOm϶0 ]?EDWu Vޡҫ'Ym-/0ϵU6ZjkZ$!"L m l(V[W-V[q}V+ jiXk>cEB B'ERdvx~XԭhZ{2xI f7u<_2nUxI٢ kcf$N![O'qBe3ʳ8EHG$â&\֗Bǁhvmǁig "BiE649<֋Qv kfHo.{1@S4( qN>k8۽6M&⽡)gO'2ȇgNwNO l5)+,Z_;p i:O (=SZ%Æ.7bcaZQ", ~E&u(_xDm`溽ơChOs(ݭj.>'SB=ìMG}պ hC[x.]m-A?{G\F)Qj)>!с^jm[YO@@:O_CO}?K@XOJ?1er~hJHبBf b2iaY?@)W7O9>6?~>-ݮtiAMؙ>qs-c,"@g 耠u9456tW<5;Eʓ"ž1))եaGt1H znj#öt]~J:4~fюl hU_liIOw b!+pɡps{͓"QIW ~c:b>bGoIϦI0<ę-#Sc J1DL@2AUYU|i]t՟K;T-϶@6D?qq/M 5,?O'R> stream xV PSg!{mZ&ڊZA]>X+PE~]Z3)`hy<2!׀ %N{f(΃<0苲|^z,Wvz 'q;MM%4-UKJrHPJt '.m dw1?&F֋2pRsYq-<~qzY>Jzpѱ$vDvkun ?;sk5;X|ϵ / B~-FO x*~?~g /,Ǒ kQuӟ^8b::l>OJm51WSv(vEcwugǯiN[^$vpBKqq ̻h"`ׂ>htꋘY<**{4,T!GߍU?/ t+'q2 iyՕe-Kw/I8$ag!>N@k~hmE#)ϯ4DzAe"zv (2nTZah^t朐C=j#ɛɑ?PC$fѐ|CL,3-$$iT7 uBp]@:Rya4r6!2FBb7zI20JF, -P\l11vvA(zaT)F{5C׼Pjk4+8N/{紣$KMIm,$}`l=khS7PuCrF^͎SFЈm1]`LIj2e.q'~落NiFZ8UKM^nQsOJj\I3Dt1aRטh<-d()=JsoީK";p%rNgq a( ccɭ̂@ؗh:J$@meJk#\ۗ㩋B5"mP6d >h^g]h&mѧ^p!:<VJ>0|ց\s/B}-V+@hZ;:Hg=@!p\N\13KPzP9uX tBVs 5O(E蘒Sa"_ #&8 .7$)#r9ThTj<E%ͦDZapLV*2¿3 pQx]rBt/Ȧu%SwBbYd9& +bHҤ2$?S6S:ޥk2 4(襑U@c(Gl6JYEO{?;[m]Y! 8CM_ydcJob? Z\@\k؛G-#G/_ FVL\AD:jXY!RS[U崸56~,]3+W.\@ vtM抐 ࡱΎ1b,`ZjVT'͟{6=Az?AgOy1#DBZ\վri{$-[ Pq:d۪-V[m3A&W+lRs5Ϊy$9"PZc;2fG`$B^_& XcXDkϲsQN'ܾ $9xʁzJzD"[ed4y(oZ [;a8!Iۅ&$~X\U5tq52g,Y 5᱑`(=(E4MdV:}Qr/_JȆV`pXZ=䀊YRg81j%vGxzb/;Judj4q@WÍk-OeF@<҃L+_z(Ka{>a瞧6kXNssc,Y2g;CN'|{nkiܲ"33XƉ^}hQ|=4{6{c/ N6JGHp /D K]#i_vF7nM6`endstream endobj 94 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 619 >> stream x]HSa3wNvZY7T0(%]5\DfkrgxvZ!s{#2Cs–@E0*"( z.:. xß!!p\5>}w{xL^ 1KaxmV^L.n& B)SQMUU=:&  [ [55KFPʇ++p[ VHJGCY9 T/:#[hC'ܢUC*() J`'h@X0S}̦4UK9rhO+8ގ p!WK,C+ &3CQ~dNܽ:O`&55y_6]3l+%%;%oX _ȔpwL(X|6 =?L%3Sa.}5aǖڣOm h78{wOk* f3pw3bV[%V#2BkE\%"4ͱ 0(endstream endobj 95 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 671 >> stream x%mHq6wMց0jœHᦑMiIOr7L1XHu= A#hV@zB[u/D :}ȷ!f9uW+ž-^YkqJlQ!b PvԢ2& ԹJɊJ<`4V5~*Dd=Jt0.bòH$bp&~TOFhCT &1;yaXBqS 0!A%e@^0 qxFHH xP%1GZ;MZLXяQ\#/vr3hqo=!R. QkBsf>r|%RDlz5jhok2T,J&z/F D3:NՏ> stream xV PWaUdڮ1{%A% &IDEG>f`f, a.#a@EHLh Q61/1V^>wk\T*Tm֫zs=J[B"H=|,}+<'fϓv`OQ3(I)(5i$udxV6/Qulnu ud\)k#r%ۢ ThdVGh1+ɣ5T5 d BP+BeUJO]nCUePZItSnQDGF-Xd2L&^!"D HF'"\oAL$!&Siij"-'%K.ٸژHO2vmdLa*I]+ƱBp|a&/A $WQ6#ФmU^ NX@JT;%aqFTfTАk?BmJQ. ?_S~sVPvz]QE!WqZFSWta&*L`1_޺sS<EaGǗdY4YmY+0gvmYΑsuuR^w,ރ0R1]4:Ǡ+1e@Dovն  {=buHGMiY̘@ieA6g&T\pz/F^hlʠ0UgҰ _ve .JG_/Nb B^{:Nְ[]{Sn n IOVJgWh:ZSfdṬQ4$`G>/n/2W\QE &呜1˪ۺ-n~6p.!*$j+Bđ|iYnI aK2)a[S\Sa+EG-#Cӛ}II =3hZYgof15cϓ3BͷD,u'9!MADmaV~fzNVVQ+Z> j-?@!I?Q~+9Awrm@I`6:Bk1C]#jIa%a]k]l7簸OK1[`Gy%FazX ݻ*wi[PLڬyv3٭-erۢJ)6$xo翸}9zIM+A $r3 fUl\|Lt}b},bInΓ~wv1N?OQz_г&_ ê~sK:o웤`BfkyGl(ukbcBϠY.OG<gĜN'dCoek尟vUwaEbp`-zIŃ'cĸؘʲB6//r6Br֦;wpzCdYY%CWŎ[g!T,Ncn WSBLS{籵4}p z,>1ރs8\eoS" CWީtqq9Yv_TkSObhT':tJP]"Qa̐jbZI6ЏMT>ŅRJwᐸ@)Q|42m$n=äV3qRݾ41|h+m4w^"@q5q7[F76T6%֨rgk>?1g}˃op0jL|ۈ T#xCvW+s 2sDӸ-mX|bqFx<鐹ýbgI_^6uU g!xouhn( UI Ptuc*uRlZGM7h ;ժX!VDZ)Q񶮼8~~"Aendstream endobj 97 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1018 >> stream x={LSw{]*cCǞ$[>2&RKֶPcPzJ ב2P!!S 1lb[۹s:,'99'9{TxEӴX_o'l„E\RH6~xs#)M,Fݬ+) Kii{v/ʵfFm3Rmj|QZ|BFjJOIlrK\ݼMg-峵R[21u_rSUk3Z(fbUPT CRQ)%ubCkQT5u.%-dkؠpG &w Ԃ=0'EU\2_|3c;A:0^QQxUHb##?`&']Őǂ^2AΊeBmH4-"4A>:PÞ] &WpndoCD>x|fa~]uF&|\VH!d/%[}MC+)n= $*U8?}lV#ݿ^3(v0vp[σ⫀/Oe2 . ,S N`= Pµ D nEP11H1G%v'}b5?~so279Pb7E?'?/˸@H9Hr6y9prpx?euu[4Q(cH:("Tt̀ ş@G=!~Hnc M*{VC71#rrO~cfeA⳸}}]]-G |,FcM6he })A2Ea4ĭ"/mol,`I `#q[FwVs4@{j2<8CC1=Dd&c`H 𲲳]BVuvɉv&% {|RxT+ҧ> stream xW PSg־1{U* IVVmkgZVֿh!$/sI  QhiVw[nm۵n^3ߛ]~rL2s{yss^4[K=5{Z|ߟ< !B =97Ў'@5鹺de^PHʕ+HJZB,II L;4d>Wpu^ /dgg/3it5HI .KqHN%S+ؖ?5LB'ݢ9Х4M.C!+[+rHLJNUCf-#g6mbE&"KA,#$1zbLl!"DF1.DQ%& G‚*vc3g ̊Ņք猡 E\L7]-DW4C>0|ꃯ"RIkXJEUy8}92Kn7y5DG |uȡ0 CPFy0wP "4 . 栠 81/|s{|2#V)8TK$| }q, 4+ ~U䕺=h%\[tB\JѼ0>U_ٙ3-FG^}+QP@ٹ"U~.%VzT\mihH60Ja.e{2%ǔk578>"j_љŦ"(o j[qqý6Bt{ f ɮHzv}w|'=5ȵ*r=|qTH\[;O&sR^gS+$`\.dIe',w^sL†`/SR;*AMPNIW_ Qw5[o]^Q1'pT.H)y L*N%꯺~ԑk/>+h!Zm0ܽ*={ DţKcsm z }a?c1Q^Rm42iN0he* ɼn'NFӎ~[P@ȧ<M L*" y*TEf71ΘuKcL6(#0`WKPc wSuD 8J);*55EU6AuCu_ѱcDڠ˅+)]_ w1/|}su*r]ݔI`R ( O<϶o6ynOk{@ +Ouuf\$ w_UON|;ǧw*-WN4;" kQ&]V&o|ss~9͘W@#mLc)9PQiuk.cP kBhYYá)߂OGdjwoٺ<0:nfGWelz\&Q8*5V 0;M+,c&]D=4h?D ܽiahM`U0^rw;{B=BQ KI$/X}RF 0@tZes 2? ES+2(? X`DWCLeimuyvrsb\Lf4]'x7=#|{Ofڕ=< 7׮|<6IN*Kjsڛ^yZA$Nwj͒ձ]$A$7+=DOh. w=o,8PjNxpklu_+c8%6/a^_ knƾ}MEkG7tK2Fh!Bir#x77Z6ι&v37B1cD6soP.Օ!.4cfNiʚU\ݕTvz(.ܿ*[e~pCxt?9HDT!2LS3hWķ R? DZӮ)Q2Ny  qGS%yT9trʭq {eE{W*NV ߺ>> stream xY XS׶>1prT= ڞõUֱ֡*"hEPpbÔB0 38Q8v8Z}mݷC(I=~l>k#̆<sؾ}d]~2⎼7|`aXnX9 MydϐK|~H* "qxA ua[ex2<_F=F/ ,sp,16eq3ƥxngM44+VQVVg~d7)o'xg;wແ^*Gr@\ˍQ8?>}C'5$@Ĥ 42PBYR <7sIs?1%O@,M"3Fc(hsPJ2CL:sKwx3ue*2.zu5dq>#HٽІ׽COG81("P'Ļf"y)ZxML+y0̑h4b4f/^ Dh[;{|~emmYlB倖׮'(>]jbOG hޅм- !%hĔ49/N3}]C]fyjUP P8m;|$8 8_ lSb7/^<}o]U N}:EROI' '~E,d"?@ MS[W:XAwTIqfh}Xq1L,n`H޾v?zrK{^JGaGkp, 2/4 g2 ULP}bqűd*}6`DŬEEZOkAK*T"G6G8C엨{ FO4x?a|-ᄿj)h26̦>qqt5m-9-JO:xW18Xvy!;# ٖȺױ5d`'${梄IR c$a 8.9wq.~ztPcD y0<{Ni{$_d2BjM AޟBG6,) =hM hnHKd ? Z 'yNl4#y d>%KCdb}c,h#rx, eelXD*Z˫BjzpFdr*NZ8)QO pRK99sqyr7:fNƞKl~Z|A4vGN;13&0Աwޕ:@f~YOWJ̐2qc(Ư }}Cǭ_ jaetuic).3` !Kw0Zue=pF( D Sɣ8Z !N5o7ɻ9s0w3AV$4GSp4$숀}eg3<+N>@J f+qf2l}NLriї>qYSLո#uHHjy:Cf:Q *)LY1 "|sYN\I*B,CS$8m%]a7!kY32'(;Ph8 z#]RAhT(r+,G*$:*Գq_ϿgI=s>Gk*,L4fZ@}ƣe U)ӕLڄR@ժյwwdM^Fkkz)]Z)H^%'4ʛeBB 2 lX1В6YW)ȎP U2+>/4(ngWS>_/_ V؄`j0O`#n+]\j zIE&5P.@Zt:N@nB_2ofkKb"lEIqgxᅬƠQgADGYk+b1cM8v޳l:zqq6vnq#Pk!&+=eneFeTؖ+Z`sԜ:w"TRX /[iu%8 )FΞ p0bm`IBW|}ypݝ ^:"CQ: pse-< E/Jyg|vቢ_1ȹ`7Xz.~G[x1hWJχK&3TTEMqKt};7{I7F KѯHix[Ϟ>] g'4Ґf?/wqX?7e^;9^ $fb6Y,P6*\w&E]L [YXQ $cZQ^]+n!ڛ,aC 5vOQ`yMa`&$#m@J-#=|mBfЮ:;:Ξo+Dkǐ>\oLۣmMGf@/E,]R49i]lyr5I94@YJΓ Eq>5pR*<_]Ȃm*7`\eRA:T"SAZ|ty4,~TucM'o9,n7h($9]TׇUb>` (=k/wl@:+ 'O_9jfL,JͤC:u|gAYg^Nk t*$D$ UU1M0XtcjX'S} o򹭿 Yp[bsO~<{toCݘ7b3I+\}b* evץMH/MW "D*UE LgV~KAGXyٶe=8uPP27< ٲRɼcN=pv}z#3o bwo"$26#] ҏ% \haYV01πtDCZel_pL$a303!1KԛgdfF n4ߩ> 3aFAxA)g*-k=zLNc |\/]I4wޫWOs@KF &G&OPrN赅ıS Tv-jxXiI5s-U*dj+RcihUEըL@Rs{gt%f N&-+gN@0,^ 0\bQ` oã8bSE˻EBٵY,Ytݵb(`5B>}ͪ̊&zcggO޻s㾵,dl󹫗<㖍?Y? ~9jM⇯~?w>$,Ue2E=#\_ԠPYMmznpaUX|n eh܊ p*%%YY٠Jw79ANvtrDܝ̧-·f'>-:vY2)#$Q[\glQd$+)'p=}ɵpq|mA>A>MmM :oo~B^Q.\}}]/;1) :)j=~[49Ge|n+ |sKrUT%\ Ԗ`+Jn߂K^Ȳ/?)pk@kEї7k&IWၫpUޡ?|nۑt*#RUQ%ay*jKK> Ve1l~3}Ƿֶlס;̢ tbTUyy{D,rt]ߋxÚխNx;'NUyľ=V]G;sN\4[4Aq4M I@b$Ŋ؋-sa%Uj*!Ezs}n{.6×tA+h;셩$㼀O+(`k4.>98R |`.}j0 :NzaeRf2PHROb!u" *RVcȐ"5Dq JrdpO<~ѓ9'?Y*Zu'̎b`x٧8@:5rpqp-nqbl~*KEG"{^>mJect7-x@[7Q:~f >i4w _/o5r#."}Jr"Q ) WXDjU9.AuJ1k48!hG[~F4'A{1c T8o_ihC?" pm@$*dLZPtq$G4*A^4fpdZуu4p_3t=9:Ĥ$9z.0Zf&Dd2jfQ@gg1 'OVsZR!f|eFF5uXTH@s8ǐhhGx7].b?RyN@o8Jc1qDꊆd{rCr_~ #QL!IUj&ϟeM즈xeif-4vm242 )mkU2YP ypݛWdRi P3QeF 8> stream xkHSaݎ6ʓI5e9QМPQpySq'jPZER`E!a-",O!a %g<@FժŝPܕ Zƫc jތNKFq9,mϥ|HOUinPf6V :(E=ɲB l/0SuRti;ua67MO+[fi2{4<^fwupXT`f@H^&V !OTDg2M/8j W{hKA" ̡8& iC> gkZRF5؏u,Rďs XڪW!rەAbAerf!V*7Ai]BϪZS9_fzi>cfŔiS>hKS˖WCa U <3Nq> stream xyXSgB% n^պgUT"FlH ? {À"U몭ZպQ>x=aWW/OEA+̛k~" sXDQҹA,](|q"ݢܗG{\wv'M,#1zرO8i)SPʑzZE VSPk)'jNFP멑jGmScj,ZH}D-Qj)5ZFM>&S˩JjeOP"*TW5AR=O(*El)1՗bHe'&N-#塬(P.žB gDzZM?g0M^==lalr{ um>} eߋ7_Z;`:ׁ]<0y`&J ͽ;ta8o ^o`u->t9+yĦm#E=gEy/ywadpYnIK;=K@F#io|O~?zӯcad>"gefUUKl>Tb+d~^P}+{{>#/PWHPnײa,4>8QqpKYm`5xJ]\6c-51ڞ|ׇD?MJ49 &2d ߩvc0+=D3jEE:N`@[&A;bȔB.n\GG)vēD؊h=of}{ '~=؋4jŕLD]ogQ[5Pǔlt6ۼ[`P_#GVY)eEtͱ#5ǀzv"=N"/5H:&0 wַf*vVDV5xgKp߿;"{-Rt.I-8*{2,rf$|EG# :=wQgYGʟ^Ϫ<3CFpg"2?a(Ńi'83oCD^-jhhRrmdL~S"! .ӗjkjqgI6RQfր7[m3ÿgYgiEڛdpch0/Gz Rݧ xNQdp "̑mxN秔>Cц"OէމF_YS}N})Ui;Ih c 97].-4Дt;*#Lほ$~YnYr__>>>@éAL>J~6]8EΤTW;đx-q- JkCē$ 3Q`C~8y6_ L6dJh}(Wl&Wa*h9kcqnE _\;4Fn/\zsӟ¿L/m&Eni44H/ܕyB4۸E-MMmi Iςqh4': k166IMksoqf(d#W>޶Ɩ'hꥫ߲iUn--Nr2̉^&J3 Ͱϧsߦ-L_4PSY]K۪Tɭ) oZ|FB96+3Yy,r70h;=f7E381&0 x5Dv޽G** [L UBIk, .Sf%%3!EbݬrH2조0[gCT ;YbIePZ_`n e*+5Fu%@¨Rmr>7˒ǒ2&%ŅtnI>nhA&1)ȻSғE;ty4۟h:(ltm@8͂mαDȝRP;yV Jwpocˎ/u>Dh5A5YP{\6L$ w{G(Q#$__"2 7}=^JH1u6଑ZcTiSLISsi+(slإ$k} )@Ę"2("RsS*H"\P`LL+ b;dzϫ|9w<ВJ]gS7'~Կ Er%4vY70b~ht+"y6I cTfqDSupS * ߠ]I6+-zکۯ:Q5}4 NDh~Ǝd |_ـΑj?`sMh_dk(Ui#BHk{&@ )讨9]p {MݡHޟKi4w9tPöug Y,~>:cePvI(e1[\L \7tY35ʌ{N@=4]rc>3][^-Dqmؤ<:,I@tBUB(/*QLUir~t3D*,G*hyM:WY~Yki"iM[z. 't1tV(@#:,Pb32}GCq oFms KsviR]Jwx cwDcgpH%ʶ\_ ۼmã[1xv'i,eh\8= P p6.~$A`& 'h>?E gBr!aLDQdUMѻn)sp +5~PDM!jy-DfPD} ~SzHYTnSEh" d'1v/GkN+GPn[yͻ^zW CsIJ][hdW7E$#=H#1,Ayn_~E)0O/]V.iӐ^B**Yj ĢZHWV禕3vѢO\^h?em{#c3Zdrf}հ6HV0cm<|!`FH,"XTuw2\csn!ՓףV-Hg@oYGC99Y%/g7M',q?i3^^9ѴEDo~>wsNu& )Y%/ͰXӄv6׭-%wwom]x)Zȯd)/iUT8L}i^6,8@5Vj #JmmOV^@__XOrX׸|MOsrv5섒UdwM` N tu˥wHJN/7gIeM6e)/7MƁvIiJrib*uёN~c«mcj+Ã*kj**j&Y^6  (!2ƶMgۑ( K KɅuhQ "!~ گ}ko z1#鱂\Х@PW'E\fۋ· J M&NmEV)#FUcub;&%讴+grk֔g"Vh-QʑAD'JƃOd6fE!Zضb,q 06mlooH.~KI.\n0-9u gI^*T+PKBRm+v)xd'Ҕa> }|r)(<B;,i'2_^Rs{6I3 @vhF{C{˧&%F_BQ˩Vv=P̜ oEɄS͘;6Z&6Ȉ^4ixA?Avȫ*Ƣ^Za9O ߷p4r"gj([!Z۰ ?kNzGZ9ƀf^msٌۜl2ܤlΔd9iyVֶF(f+͌WO=DVвUє3*u@^I X6ri>>2C0a >O \"zՃךeߵ 4hļB%%D!&4Ipmm>NDQY G-|'i(MI5RgF륨5K??wb+c|"wJRQBAZm7IL Z&3%-E6m6XrCfam;'i!YbՀCPj;/UF% d=?gprHfJ~4%nFZnPϵN`䩩r(K%χ{6!x #OKy5C]6$@ݚ( k٦F?kƒGlɴ2t  x&!?Ř}Agp-@id$DsA(t:(-:~2@ӱ7ZDG]~({83]=B&ŪLU)?wC֢nܕ`e*JˋvyX}#pXOϗ@7hSJ֕@lR帪 =|CBCC2r8ZVY"]2yn!CjNlKb g= ٠ Q rdOb)q' TSD,r mvz(뮐߆p;ﵭ?M/rG>Q,植?&o-૬'2D$;O N׳ E^.a9́ŲOF t1z 5m4$}rMZV|::JMxp:?=^hc޼h{ r}lhΜH46s?ty++}w)AʿӾ(d0X!B ?/3&zzd3@7a8sY' > stream xWyTSw!JymVX:h,(KH" {X4,!.U괱RFhWV[~c^ :99w~~}ڊ>sN 7 3 u˛ :;R`kP P3V(bʈpw]zx|0u\ q Qɤvݬ,TILL"P/}wkbJ4^Lr]QnK]G=gBVI>]Re Ao(bWz+Uk !]Ұp'#}sFcbFL#6.O[6b%M"Vk:b>X@,$| ML &‘n^ +Y))܃P-z/,?{Ggi<ͺ+dZ@{ü~@mt#~ڜ[!})!Kӳ&1 q djr;Ipz/ ECND6 M BYNtڴOקDn16f@T#d(!^D!Ph'JhG;iAOj>QkzhChr ` V=p%l|w LU_Wu.Ģ-ewG\a+R]5*]l9AYi-,$_6 )l4$~aDUXS~UɁdE%$y>Q!j)4ZYx%M|Z0kX/?MiHb8?\_$QT> !=Ế|]nA[Rr)kPN X!e\N,qI,X6Ɨ,l!AG!rE {x@ T!|O"i6#T[y<23)]9e2gP:'#ىTmzȚFrҌn@-5BYjKwu5T}?\c>?ݢHp4CEekקkW4f18vem'ϩ:u'̕I sz@KZs17/m؏M_궼Lrvf\\YaĘHMNS˧ Y4+FԂuxyujm9D. (o2F:R*R+P**(O-.yTMi[[TU56hAgQ&`X28#1ok ~[CkTv+ #/kM!:mѩ-p;(_:kYE}>HcM#IiAmF!hFUˍ툺m(9<&=7,I\5)*l]MZ:1rS$##q|xҸ欅?og\Q]P Eh7pX 7y3JD( CeȫɫΫyx<Wl ~. '[dܚ#]3 ߵG;<388Xbp؟pӼ1=X  TF;bmZ`⫻52#:=Ź7GBUVfrբNjz2ؚ&mIDZDEY^ǽF~8jg"i"͋n迚d;wRкvEM$\6G | J0~BÐLv3ϔTtO"Q %S"'M/e^#$2H[_pA`S*+)BԾq)=3c+}qyf~[o#|mU y^G231]W7EXP3= 3xnOzQʼnM Pjf f P'a;L[IAGD=9%{O6 %Rʤ궟 VcA*yYy$dk0Y;Yw{%QRettXK\ѦQ2ڳ?!g!JMLIɢ R5y HA)k*k,Zd<$$-=3 &TEã_Sȫ<~oMxajCC<@Z ]yMk}2nE),+#DN4 nA Mendstream endobj 103 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 676 >> stream x%}HSQuko9xiV MdTF V5_n4P_EQQA"$2l`A!}`}Կ9sw!!aFZX 4a ޔeJTlF,n3G܅2 eTfn8sqbFBqr> |% r z9unt3U9GSlfh24~u5{8 lj@-+f@ 0x /AG% OƤDGșXelﺪd^mH^b׹XP =h9hDq*i8P:R.A8u)2&?Xj -lS~IH%xw#a;vJXڐq\8 4#HGJͬt!pR}/&'zVW|XR;uKD$ɳN MGgѾ/5/_1D蛐u) Y'd c3\WIyϏӷNiIA _ u[k&P6|z|k~WD?mi༨X MY7֒Ul!;^5Yێ`;Q>=H BZQǮ+e&&endstream endobj 104 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6293 >> stream xYtW!42`1xC$Cc-[%K,KVsm顦#5l) !dӳM#$@.s_0shq֒eyW]6q] )S_K6k_^zqy#G6fq'L,4Y:h1XMDkċ:b8Al FQk&b Ol!㈭BbXBL"Rb J,'+JbB CDѝA!zyDoїN#;&+! xټ]^RߵWW` - u,lRؑӻ{={5קnok^X2blA% `pSC& 9Zpy{s^qhm(Y@Q2!7 c'7-8]Rvї[-+̔3v0ftAN;T7֝>{KuE:%%wpV_in%g? R:m$s _[\7^ v wl_0}܆a@~c)YaDqJݻ [/9#ҰP_=XD}5{0A2;?—|Eb+_߬~eJlT9>dxHJ#*JQ;̎j#S1DX5)ZR `jHMNUدЍxuBJ ı7[+iZH P㒹Cᣧw_r %ͭvO?qj= ٘R)kpj:?<h8T-PRMbp2T/E[b`& ?^oMwDŽrA|Z$\47$83bnuBҋ`/;T\3j֕ҀDHRvYUl srOyw5հ)^y}ujCs}DT=V`0՗FVZjqfhoXKE Yf-riطɂ-jɶ ! |vz6Y֖˾_jx, 9sB@SHg-h |FArMλ,bs"5#}eP+~W0+ )t5>#AJ3緆LF;(/OtI=B'J@E~xęq}hǿ6\DK* % ,2R~LM&pn$hj{cL7ntQb`8mKB D3ǂ}Qi ϡeՠDԀn 4>WTWDxAd*/Ub^NO.t0Oc9?g9)Б8Κ8u$ǟPQ xO{zO \xXCn>۰,+(\G]nv269H[)} A%kFS<1q:#oH4M RAp2@sbgEl9No 7~wqeGjM!f:/.8m`Mx>;>` Qm5P˻I\?7\443J?R}7,D=PIJM0("6B'A鴲 A]3+j tf KQ^$߱Yx/ibWo|.h)Q+ Lv/d-@cc:VxZqx -ym89mW0kY{2ei{W{n( ux=YJ OOj(;O߄%jUZ6V:kђ 9^8]p=Cƶ-D;6SH(޿FUPfޖ :uW,J?X^30ױ*g~U]Mn7v]W('1QIHCb8}S<,+W[ZzL.lݗ?'o$"6 ͔˓ Wt\@E G5YG!){vQ_vqH)F@g]: ߡ?9vC][ m[;MvR%ҕ@OyVRMb'IQjjMT *O!A0uͱEzZ}=ύާl(ӓ\_̠q 4 3A ou4tHAǁ_K|0w֛`|Lah݇7X`~t;u:aM.۔Ѡ$S2oV^0%߹ f`asB$%Z!B&kLC̱K7q$9،|ST}9բ4ڳA[P^r-6 {s ̼G*aQrNfB\2DeF=69!.H-[|Cƽl)VP#dVe|IxUjK.h5shqč?9uyN:ce b?39D.7 >dԩ nw2x0mx=(ܥ4h $؎Js#_5!:>ʸ4/^G\wPEYO@Ț[7^<(SipEvWp\߰eMqr#;R!"TnЏ_fՂ&csCJv?c= +N( "Fʢ]XC[Rdn*8ZϺb"5KACF͜ JA2wZf;EF)߾r%8Ά F-6 &z> zʃg4svPZ+#N :UE_]w)rĤ4oƁCfos> IEXt ޢRK%@Bͧ oV]~n17]f Sh|iQ/9`߃Sm<`48&'Q 9ܭ`d[(U 1lإ⛯U3j' ^]!DʼiҮ`W%!ڂ@ZQPbk8q.<랇T^VeT\hrXV {1bXJT1b*,9ybiYa V,9fu>s(p*j-MfFo[WTe 3;?y3}Ga{/NqMYJE`tn1-ν rﰎ#H?׉W=C:8Lo>Q)MSk#@#H !dg;%F8Şs<Ƈp6iRWoֵPA؏^7{v }Kl1YK?TVKJe%( ~uV<5.fy!E< AP, 4G 7W=W#~p$4z曷* T( ٸpsbkiEm]u]ez@U PfIH&iר:f8N{|ϵr7iGQX.U0lP[auAuv0{El SRVVf41Ίuػl*I3hʖǖqaE\iO^:1(B2OxDnRNSݒ&AɵG;li>_: ?h:" )w!G0jJbֺF9uPW[7n3n-Rf7֦o VTF:g[=v\<>P1y4M.@ PQ92wmd-p`VLFT%(on1! h 5b4(C NaQWp DbK7_+=]žaEendstream endobj 105 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 226 >> stream xcd`ab`dd v 140q~H3a~'k7s7)B_?``bdr-(-I-ROI-S,H-/000030d'g&,\{gO$ZYUY]WS֗ܓܗ[^aJ )}s'.+9]BL=cqżni?&wl{r\y8.JXendstream endobj 106 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2418 >> stream x PSgjkgB;vmk}lunmZ.*>+j%@ oBڙߥwwv:;3νw;;9?KNY[֬}}}>>"(XAR/܇zHs/Ueu8=,ڤXnYz] R<_ȖeRdR˥zSgez:sժRJVdRTkdR3IJs!{dJez6#[U *_%RcSɋJ%<orm;MY~C->) T:Pj0Z 8xzf=AFisB<-Zi#RYwj>3-REV05f0Z] ^z~:vZڏN2쑽"(͝S(*PoIdWo n [zPANEHSHl7$%( R N&q.AyǸ(ZBs|e0t UjqCm?|Û {i76&"UzCn_hu] =xk68JƺYlB /lL81v1v5E[y*+B}h ]‘/JezN~oQ-p5Zh(pf348(f6Y\al ͵J "nvAvTK8@Uɘ n Sќ_?843Oj֫1\o7J{1&yR_tmbCj^#yjSXږ#m+rW Vq3PHyI.Vn1wǵ";w *{ J4[Z(_})MrE! LYIbwtWyn8 kT-1:L(ʵ*uT 5bI44zA] fcMׯb:JNvt&8]Q8:sPjX|:Fo~ʮp}@񴸱Enoz:n1o'-wIp;<:w~ܸ{i ^a3޴' U01zɣL)?{ n3)7uM.6A@lf+Q˝Xv ҡ:D`xpUBrW\ ҉+~hrt=R }"Q׼+ic9,ܿ(.rEt{}d:qYIC\wdn 1t{i758Ճ6䄷\Q{ze`p5|i DhP-f̟+ؚmDഅI4٘ A0>hX^g->}mk qVX--V8c5  'CW1)40%`hʄeɭ^Np-E+ҋ#+ݧG8޺3n9i)f6IsPz:;{x&Q.O>sH(:mtlS%'HN$?N#]>'䯐;h H-Y@Ƅ$0o=k) :))> stream xE[HSqsܴuAh0#Tc͈ܚ'.:ι\sf3"&E`zBCS/!npD& ޽1G`VG'xG}kݼ0a^CEa"e%v^\pZ 0R:S5RS茊55&Z3V g+0RaWq#n۱F=Aa:+ت ). 7`2};n tA "$-l3<Ō$#Lw8MXV,H#xEr;VL ֖y6[gqw>Vڍ6( ̠uE2f+N//E[Ar4;\HF,c jkiRsp>>xBOzhj!n$4$4ψ hZۋo8|`ՓD'au'Rde܂aH*+Gu} E=sП$q_,}2.je?2+9HjA&3ZV8 "60Ѭ!Iz/ؕL)Uo\ 6a1Fb=!Y&]7+0-&#e]Ep k/73k죖1o0?HV߳Tq/ZGxK<p=jendstream endobj 108 0 obj << /Filter /FlateDecode /Length 205 >> stream x]M hƅƨ0ˀ|f_xsJ6}~$|ěL,o*tdtz,@/Ql жjpN1 /$k~2ID&;*X Ut}V9Bh3UκlIKn)aX G~HU< faendstream endobj 109 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1051 >> stream xuoLeZF OiW2"cL'ƨNǦBavm)k eWzEbhna Of6} MDWwݕŃWO~/ϓyPL(jli=~cw)JVX+xA*>UF?"<,{P=`(jFKF{l4QC @-d}}~F:o!(i#(v=ϙ(}\.Waskjq2oN1H> ?JHXW*v[E:{3:HBJ:ѳa4V±U6O^~M5|DB(A4 9 AcgjEPlU)GeZp' 8_k &U_ gދ9 J!'!7#0`@' ,wL}@`Y1[쒑 0FLT%rv^/G"g&`f l ɔUPk-Yb4\)d<73ap+G/tf 5'̀yh77eK#)Hq%;83vi&G) uYq.S igub$ˍĊKBJnEQC}O/NKPH A,@ጾ+?3x : h< I$F}vJi7MW& AX;,!M䊮3^[nkU!Q?hmԜ[? Ǹtq<N4ysyEM, aI&9x}#njWtWY~WO' '5ycˀI뒢ޒ ΄A_MX:|Zo/FV?CiozXJ~p / `5O%_rWdnAx6`wcMC#ĵSXxa\ɫ;t_/-rendstream endobj 110 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1199 >> stream xmPTU w^LG1e23E7,+,-.vW^*L 4| 2"I;eЇ>93g?EQ )Bi%^rBq3XSBMQ7% {ZMI],.VDG tQ.զQ6yE6}V4Lf2dިYx`֚ D4D}Iآ։/2ߑJMQHbPPѤu1+VB5 (mA( q(N4NEPgBP?O*#vQeT?ⶫv)<an;$uL6n|pzOa6[G714|xj:OBJyjr>q.ƻ(6:O vfH?yn| =pc pstHNa(4%)9!8I+ɉZbe CM7.|1hӘ aWH6:&\7-`C׮w.ݤRbW8 ǔ!x"&u!f>T~krZ VOgk|=ů-5ҳܗ.~7zX:pN3^`mZJLbAIx^Fۉ嶣(~~ Cו[y٪f$^ә.PCUFVxY0})kҲ3 Ns#v~8n.;ߟZlUzդ`5L# À}RH^p8H8uvF--ywy.߁?8F yFW}mậovun=f{GC\cZ_q/4~qpv못:GvѶgҶ{ryDgI!Ê+MpgUN о7o:ڐWRPX l>'eD}c@1||6Bendstream endobj 111 0 obj << /Filter /FlateDecode /Length 4082 >> stream x[Ko9 =&qr(,!aMR)KߞZ(;:4S]]իDžBࣧ~qzs ?rQ].>=,rgCqTRŅWz./5.TjXJD9\)Q'˕Qf4r@?]`a} Et>z7 jJEpn8A>z! gaO09^<' #pQJcW**Cc4F/ FW O}P6`$atq3sM!C47<˚JP>Ǝ{1 &!D |z\W@Pi\#z%y ~ uذ,vxb} cƠ^=<0V1໇vr4Q/8.IDR Oq-xoilEVƅ h9w A+m jT]/W ٍ@6NPЛtv&ٰ3oK qxa=`QK1*}ؒqPԍ4t j F.RS'o+w>(;pBfZmT#ftFrX4(*@ L&UT|rpVaʲ)/J¾oJH=+B1϶8rg{Q-++w^Yö]+'SQmrSWCqHFxJ3!+*gfؼ=$n$0" 4#%:0e24FVӐ&YW$>+)dĀPcU;aa i2^I|Tjj[x@r*jSGc"(nJip7]aPV'hYpcCEa!ڤtW(š Oʹ&HP^ {NHu"Wk=N7 !Ƽ7@(C¾ 3@}x0'OfC`aPގ#]&y̨ϻQhD0 )k.h7My.תݤ"wϯ֢ tUM6@|8q9p)9 \qY'kR{vCRя`! If6]cwYTi9RW oso1G 7ddW[;O?@z98ּt 07}s2nX;vc,ପOŚXb>@Kg0akb|.IAzR[@D:haɀn  zBvdd0 B+N_Jt8z™:}7gF˃`(yLry x dsn۶ɎPD\X)wU,d]ywý by{BDGX *OEQ'Y qWE*JRˁ֝2-R|mMV?;ta+d2K`Ħz,ImCK37 k3; X vkweR@hw\;*[-lhx Ҿ,L\3YGWW)ɶy߆2^%C]!(GjZݛ,U $4c/ )ibWK`o-P$]7J5x*|GjoGDC c\y`mK5S}H7 d2>Eڕ*$ZYĢ ب &bhf`%B \ݶs--ʡе)@1L F fb~Gb"jPsitL^<|xk innT_t8+r.J-pZ?x)MIY+q;|}IIƨ-VkaճZfPJVr{* y* ^%]̊^=҈NXKIIvCfָ~!cRu<b9ll}cJE0> <]tPWڛHň|;߃!R j \[0CQ-9 s'凴e5QSHٲV6j6_FPX7coe\?Ļ02S+(. wM.;`9l*1A|JH9X +_VRWjncs4UegP6&bۂ1燐6ڹ~\ 91]~uFDO2aIg)SRV㸦QG[=X3S`&y+D Ȼe#e}pUTL/nm#y*&Jt.ŹB^6s#ڀM#Iy}~Fy7w,SvDcV~mCc2/ě bgvJ:8'8F߱cms˒'o[|uoE5}Kf<LF`IheT3%+-.,d\f+R@"N,  .XQ>.^1|i+{UEJx+Ǎ|=a:Gf;xu뚕`ˣ,''l:3p91 鞱$@JPu3 tO,`}YM9+ݫ&vs6\FĽ}#c#M#-%gKji7 ! qH%~/lXXW`[U8{޲I4nd܂_5/]*I3adId=t~j 9&G7Ʒy8^P W+L9l ˍݰ=5,wg*@$f 'A<.o3R5[;%=F y/B9amى)_3X2PmpWSWYR4pMb2  J~,8$Q!"W{mO >+)PIF~ƉE, ta~=ٰS"56[\;L'$[H:QŸ <9Av6E8~ET4 )?H/fooMw88ō$>NsZ2g.R({^q[eJ d4_=d>%x-tAtG|Inw7dv7Xqa_ڠ۰_GL< ^uئ2 ߐ ' $v1WBBNP'a𮻎xLɤ52_̱t <%v]&]TMibse@/a zZpC\6!91:aCٺCApqʦ ?h/YʤE1Q% :+<_K{fG`>a xS",SaՎ|A|]¤{t|yH6ܴDV/E,@U&ՅlaN-bma"芌Vm2P[[xnB.fpyv^ۺ(Ʌz*~ǜ7j]z]: qMg ں3Ӕ*ИtHN~3+%+QFb]r@wY֬0(4Y<]zw{ӕMRQx~;D}Q?d;FbzOIqx/tendstream endobj 112 0 obj << /Filter /FlateDecode /Length 5138 >> stream xĐlH`HY#:w oٹ])ɪW,HVmYU$ 3o"DCo>NF_ ;@"eIafsF{W,0!Zן3y_K]G}OųU%{Ԡ{񿰮!W}t/(gjym"xٍ;G|*ya 5ˊdlRcX>()={;zQ՘t~<@#)kIu݉&$Mb}w,/rrAd }s GBe%]fK6\Hcz4ВQ" 67>t-$e$u$[ u_rX[c%GJr̞z"%F'f!:XcC:PѹS?`dG2#P,ǂ@6 )`I.TClΚ`+i4 *sE&*2R6q]T dcÎiO~o#4$?p>oᐳS30 N:1vZ1gHF)+WsU#ā8s vcད8Wi&d0XLЇ|ey3?^|'^r2&4)M?aiT,2t@0W,hh*fY=v,| |"ځݡ $F&HR%c&s\>8? ,kxҔ ri׬I~ɑEƒ-QLK1|OXA|ϊyu)-v%߸z&;97K<d18E\qI >髎!(7aA"j,k`2u^I,uώl (a&aJ]FRmt0$[*;[BL؃T4hU1DTNѩ:ѡc4FӆaGXQ 8 \jPɌr Ef$0e{ ?!*!)I(˘v@*כW((4$69aỦԁ!_"8] x9jemrHH SF$`R6[88`4psChRBV79ǭt/-,ASSF %S+G6idFZ՚曼.q>i>#S&NwꋌCGUiF)yIQJc TvQI u? 3?ӆoڝb0wh RTl(MKWipԨ<m6+"$AFbsi&oCT|DD-+ҺiiРǜ{fS9RqӦQ~NZnH.;ƾY1\sN#~aDaa>Qm6 gØJrA8 46QU1bgטb 1LHẌ́Kd' βM#'L3P݉io~Il:y=Qu3:Sf1S]qܬ"$Dc퉏t"JSn q(;o#:XRˌzsN j/J>KgfsȼRfTao3*d[:%<ދvL|_wJzpZkd!i>];„Rmծ/zPUd?w8*M-iH)ą3ٓt?JhaR͒hWP%+|ՐǧG#Hk(fhWG.~^9y% <"t yb <C D$G3(-gT*_aT{-:;8]F e/gOތL JSGYׇ֙ >wՀ7 -j/ f?}IwqɭI:ƾdbb{ I%v P&W(>:Oo:Ђ{sTe}kK߫KICC6~.bDU\x4@T^oYx-"$='KV4Jܬ8LdnS2 o(ˋ<hb駐iY!+&9Kɰ&@淂` a;yƭwUϻZ*wI cTI>-0|%XRYƒP W̼ cF0I:Ss{' %HbG+z~~9~CZ4 *ThIN2)2i)SX2ޅUϢծgId\"ҫL46D^ѫnB["*M_}>S;S.0<ρ1*v\'8D3trX&@W ǬyjF{<. <)Dl&$.;` ٚFO)cf9 k4lo:ܤjv\йm2ެs:y[n> stream xmkRa߳<!S8NGqt'O:TSvo w#T@MjdJǮx?}x2Dz8SJ&5ViEĮyz^//:x*YraI5m<`1U:N\Wϐ 2<>A8*8/\]Izs܃sfv$0o-&߼VE0WM?`ݘendstream endobj 114 0 obj << /Filter /FlateDecode /Length 3786 >> stream x[[o\~K{%h I[pЇ4m,+ײov,V  ?x%ùs~\X?>#؟ɏ'<_tF$[VgO@~\ltޯήO{ͽxBƟ**}^]%8hΧ49;-]4KwaO~+uw/+3&&y%JJJJ2\h^ZyZ0qEfN2隍1J:jV<LSmΜ1eegBM!8/סd{=8՚\asEssWm衽FG-kt-'gxÙʧ8}XIp9 򴵰#-PGN iMbA eHZ/M`\Ab̃4%WJ u͉DbSSE|JkuԅGJ+ #w<,s‰ej#-8śɶIڛ4ëPFex6AOjO9锦%#Cm&Dye:8E;zOְl?l k| 7jܑ_N\TvD/L#úij9Ea8up&qRB$Dh6gS'(LRSDg$=k<7tAQӵQ69|<9kEѠscyۚR3FG +ܹ1Y\@h>:>*'ݳh;ee~E]" c- ]i^}M:)ӮgU ،ܦEj\pRq3c@9@7Uж0|:Z7L|"CLL! Չf\It 沆M&&#2+ݞ6S3=O{ho&j7Q/`엂|Lqzf_ =;HVfH^%n9@' (NxY'YVʙ# n~6E3$An0 a?ǽZlY[|+l٧ր5vf,c `@1[qc[)?4lNk ymi~sZK]r@e _aNc M ~볠݌ivCVsD*;1j5Q3: 21Bk!кeiatE@BՃ/1@qFΒH1 d4 $[瑈+Gז `V$.=/|#Fiim~DYX5#w9 ۺutJYRJ-K=IʗdB,K%pJdz!-ԐT.w-0t9m7mL=^]t3E@[kNٛ@.@9zCׁkM}?qAvλ~Y*_K䗅T% l a{=lTa(%p XDU,3wMԄ!]XPP/C:A}P8sJ0%z x;Exq&qO7I:t.;.%2N b;[a48\Ӵ\+bqUX܃ܾK).~>"ݯJ)ZIqݥ.0՟xBr%)gdYl,#Z;* K2*)B K UX(朕RPQyb}(%¿-؋"Bd*ݔ0 掛ZIS+H_o c.j R7*=** .RA VE-bgGxG!ܩ byaip("? T7 u!&$x\[S #AT\츴3Pg PBl yyBdmb`R2לE6MY&>?H<[Bgf sWK&xIRVb7BH="ZZh[VIsqT~3=Pb h"FŰם|a2QgvwD E~=PٮKH}ek}cw%l.X5+L.*~G!wKS Q!"j:-OK\SC .qx.O,p)Pj!.xQ1fsg.z=7W*8Xjj.h#|sSv~q. `ul8p  xk}.`α7{0taF =@*΁}WLXw6W1M(w>Nshآ"|uq-p =:*uJ@%w}T )OT۷YI ]4yk~b_So~fڇdZk8){in=ׁIN>V> stream x}Y]Gr;`^ 7p#}i x0nc~Մ凒DQ&YIZy2ϭS\0t*Kdy+wWgՓ~Q}Ó{-gΩ]T9ԫ7wWtΧpzRWtv9%B% !)(2fHtL>;CD@s3H4ŝ!UNS@d:OHĄ Ⱦ"! PH3HhԳ7Hb*! &v\s$*xEԳFsd"c 4B)sE9B!yZ>녆pBbV#1BH3 Aq1HSbaε`d %m+0SrA4 (T@)"H8 Dؔ,aȉE`y-Qn͐2:THQZDzc) Q`%؜WRBUDv6Q4DDE )+%h•D]De=gnV5JDtH(t9-j0(`99,N!FIXT]Hu'KXP(ZfZ!K^M5aa8d%m;X+mLWbх3(ԻEh b@!ө*L@ (KG+XBfC1TUaZD  $.]|\ϓUw\ JJ8`#(ZA#+[lsi`lE6 ]`@eZB^m̱4Lڛo8݋)l Բ-UP4T+ fƚ(9Hlr85W1JA.xc޶\5\63BS.C An vCh_ @!$xIyo1-j臨2I#0:X* KfW'hjtn!C ~FHda\` Ih 5\3R4Db8S`9Rp03)RD.>DݸvdǸq[_{A< ,fR4PydDܡblf8 RF%2 p@ )؟ғJHrՐ:.&Զ'Ň'E3Uճ8)`8=ꠞ(Q7e: Ђd#0@4b رM5 $ "He!&D;#cŀw12Ȑ"Fݍ =ȣgfH62|%dQ#/5 3VK E7u uf4:Y`Y4ΪYRZ v7+1#ՎhX[ƂL x\{#h6#EpG6@zn $(5૚Hܦ|M!NFNBKDq24F=G'k&+ l)a"TN&)%ֱijk3Y`!1IQi y2 tGbN2YA] pf#HfA 0\!dm"T0bddO-܀Yj $ҍags6F;RT Ĝj,,eW_\S$F>nf _\fl> jKvP@rf#2J gf\ Z )q^IJ@K#-6hj%9J@TT-[]86Zy$MybF+0hQt5_;"H @ %K"DtjwThFTmdDXNHV#89STmkZRՆ<;`5= Z`A$ Ӥ|0HR#-e%BbxP3"pVaâ3*bR, (D"ixZVaA/ĭO)o ID<@Y&RyH Db Jmj nc /1b[ܕ߮-V~\rPAX$hq̈m3 bGax)jĐF#"E5AE[QC`7H] )њ^Y4W4JnMG@WJfPa"~2 2"nPB) "X){@|fBt \)&mTA2i!EԘ,`+5˙ F`84+8+M)CLL =QZ6D:uRv*P,)ۈ :sZH6TnTUiŒDg zjbf'YFҜ $Vi QF#D⍱ ZD@! o3G-5o 2T&P+M1dAsʨh@7i5P5 "TZ'm*J u R,»b R൬C 20],yS9uA4+1[5F•(hW 5FF)/1Xc\~D Haז/Xckg@vEX_&/1Fk2DziQ.ce{\j5H0̘Ǥ_ Bx{DT @Mmж%MaL3[xh:CΎ]}1k[@127RI!v.ĬXF&8CjlmE:m꣭FJfrm*Ds$xtm 9mlN7յ’O"6BLJKм|6*ʠ[kiċ gckS]Jt|moV|ݐ[iĶ Rq:> 4(ɞ!%۳<.T['@)͞cgX E2,ior"^1òƖ)C|WD j5~D#cU?+-~\ X A0h_Q\e V9h,.|rWυD^o[spAn7O7Jۅ,٫':)0?LO\= "|LJz`ې6QsȀ!H /C 2Ge 1*YlCDGk)^q6b䏠)뿴/"w0gjks* R(\7x#|] g$+O|˛w/|| tw\#nş^v_kDs4ue%AG* KDЁ)Tk~B?aO~*,\7+B맨a ")חӿ\#.^ROxa-5z@6r- {V^} 1OX ɦrf[qG ђ]̤zv+$si\G;1"TUxInKVh8'n@ωaQ{kiuq잢ÉaK O)SMj%14F_Oؖ :n93i-]J;}} 'N@k|꧷m|oѼąq ]R]CH@[7yҩ{e0ٙCOx+z堊)x:}i}9m!{CT =ݨ"B & 78ܼFC8uڤqX/g|]Wʣ,~Bm>K5rnd]ggYnzo^Oz`_`ee,=+]5~:A, g,l [=Htwƣ7 #n5i> V%]ה z:ݓ>4~Jp\t៧*d At?5Ik@E׋ ߼Fˮ+î\+{X 0mxe ::z|~vĭHП&f edDN뼘j1|Q~dny.h 3a#d)`a[SC\re ^U &)Bƶ!thaUjQҡ"E @fRBJY(<іǍ0(Cp ?#Y>EOэ$5-H?;~qHA\<7?M}[+$dߏX}dkUqB[BG\ ~҅2&)-$ "ڋXMW.bVq] ?w& Bo괦1nVMrqky1{>zPBeUq/tYJ&.}&R)foΰ]9C\^@iYdżwü%jUx=j=xv{;6[;JO!:1pN'n z# k[N-}d)Erz_+i@$1׽S!]J*P[N^YmͶu}}F)5ڻv{@$0V?o{}s/m ,mοts.!x1dW؝jXI}!摻u:^W"BVBc~xtgݤ5EtJxi'xH"I GR^LyC*tߎ oJTR/B˃˷} l>:`wOh~1]xyS/͠eAsE< ;7j&m;ӪܶAƑm;ڱb=P~!6;3 :y9?isEl^̳T`saƞ)ֿnvaûi]1:[jw#X,.|8 8|Ozt21ט4 Xv0֣n񫟞t`{)LjI, ĩ>讄k:0%'&v??yw:}L8vw b |W3Ͽo|=?7 s /|5?pRHd!ڢ>0?y~q~?OPS~~xc.z~xв39Zxqbsirc~5???JCYf{vGꐞ?؎E7װo#x  YTgG-C)2BEN~>?o}Tet\4އXT۱Ym ϺEzu8`*YʑЧZsq/چW~YxzznnLoRHa}Ț5ކx~A#C+Q:Go9UninpP?e'߰@eYavό LsN?a kf@BGʣy;֎,!衃rZ,,떖| z/ ~R]˒wӌ;~a+i =_gc.5zOZz_:EͧL,̵\ӕO6x>.P)s3OɵG}"V[_[۸^_rf.DEk=GG1nb|f{ѨKcwGr5vt۱\X|v.{,΄=[4R>xS]JP^Dt-;%?j=ZVxȏ+D.&[^ %UcvcMKLGﲾ_e662V+>h){Q*ˮfQs|&z~-9nYNw6`m$NY.HNyeQnp.fs}Fו5XγԾuΪ˯W;2.lZ[Z߲Vuu W}Hd)^oe m_xְ- 8ë\։ZG<9~,1M{H5r^9 zo2qNVS)Gֆ6Ix`OKlEݝxZ]VߦMt,pu7r5A_zܛ7ˣ/$vr;3)=%>h;9(uhO~͠˥1([q;?ɠ_ qpzsLDE>L/~][u^@J!-`Uo)jEhlxF8GxjEZoX\W]*N6jU֓i}jꦛ{ռ$(rEnK t_Xrw ~Lbꉃ/4裌^v] WDvvox?,O=%B{em2_'_釷#8c_{g˭o:?^n7aS&+GC7Kwƍ,v誐F=xR?A Y6sYxo'S?5"g락Iգ=U/=纑ߏ|ؑ뮽]JԪ).^a)lj{?j׷>o׻?r8 ģeѯ|\wѸ=z!8oovz}.wۗ\??6ZFY1++2lo,>|3|Io$u۴Kzڎ,h/YZY ӣ]}w3N<.D"\Z$wsW o,ۉnܠ^ߟ4yDћi~.vOB54;jns{> stream xUViXWR#e%EPqa " B@MEA8&jTѠƠFQ(KT !@"3:-k|uwyhJo E8Hݯ91-#}(Iï7ˑ `'ADX%iO7gmHS o*g5&5>.I9,c7'$'ߚLJUz)b&DK>JrIvMIMۚ>3*:&7n_M vg[εomc`rEfRlʟ PTeNPP!+F-ܩ<MMS uHM )L ʈ)a#u4C;cLD3N/$7zfd[Ɔ90lgcǖ3v7 ZZ6 zPY*PQqyGRj9 'p}< ݰ۰Ss t3Me%T5.ꋪ[ғKWj[cD_ q'9|7( Ee%nHٖ'|䁟 ,*t}HbOk,5ԪepN* vn}3c0N sx `ü~]# ǢnA7ݯfhdS ^ d@:FX ̰v4óDxNۦReD37sl{Vv|nߴ֞}Yi([jB-<%!x`h QcVa:%<ętΓ{O~Y0xBjq0뎈3ωKOtNyIIRA\Jkg=28B-YvxZ%$љS)?ߕqq畊:[u>V#~>E*,_w]yBb#)xvTusѵ8B צ@T d+%{/m\Bǰ-<Ny,ZwsLe:``*FIVtNȤ彩/C_".鳓Ejܫ5:NA\/skk,ne2wܷiϖ0Y<^ǵu ֜OCбEG8<{{JZ|BsNI x~n N#@%ywʰjF|hi}b^b "0pX|7ZbAso {/낉0nW?~=9_;.|{3ߖCְJ&x-a*b":+]L`Rg+L1YE"n^@+|Fu5Wwj!Q%&̆Z"gXc/n1 &T_xT);R0pw6x6|b;ԵKk>&{α2쌝L{) 8Ӗy@~6"➋5FP^=kXaIzȑlkB'#Tv3WO*;reV4rw8E8n)T=r*(4?$# @L./Zq-Y \>QHz5k. \kc '5$ F ɔpdaE+1oe6++Ų:UU<6X45nBY0tJLv,Sjg< :.n&W={rftsI{Y6{H뒾*̼ˍ&~םi+CpՅ nh$=BF=Qш@^l8lVJUC&C# )Ho%qwaƹ:6"ڔVHpzg׎Kf%;mAÿN[dːpR+ҋldh AmڻRyg@܋_ ,Dٙ0x u.på7 /Vȕ N)@n*W`D@WH0,l0$ciRyNzODzq- ͒(_x|(2BQ 81v!Nk+4ޮh7ZB4z:A;}mB!smˁB&`|f[ޗ쉅>":0&?:{{1;Ah]YV#By6|l6 4< 9!Cš !ٰ "T4kvagҲaPSw2/ ,za?{|r`I 63~Es33wq+183131}H`IC! :Mu,nP)R|Tc@SFՕz 6x6w-J'K.r}/ ]Wo s_Q;׋E"1#kd/؄)db#WF,ϰEX@XS5 d+a| 6ň9\5znFΑ@xVkasbWhl#Vl.Ǧ8{AF?-siRI;Pӌ/=rȡC=Y9|XE%k=endstream endobj 117 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3403 >> stream xWyXSg>1BsEV[VXQ@T aI!%!  AY\AjVmLΗ>=}//J3h7 RHeFcqEzథУZhv@e4,3y& yI9.zmHTTQ[G'jV@9F" tjϣD}'ƔnR7 elTjuמS>9dG2=CFP'we}p-:U.!^b8;T˅h*v'Fa~w dX/M+^k䶒M7w@)Ku2j}1jAU:4ڛd??7w}#|>;Ms4Jw~o^Y$}#S29 ez0=*ꭹ y(>e(=4٨hy`S` Snl yobg=^w,D0"I"={s{.Rg>ܕ"ύ%%O{K,s=a2٠ 7-(+lLװt*KiD r:+@QJOn_W/yzЊ;h unCK+ &hqYLU$#z-y+_{?<>݈W2 T[voQT|؜6{GUՇ<3EEIL (hRM/Pdj!5VL8{mry"z`;(Lx\}d1;`~r6|@ eChڿE_¸^q>Dώ1^w[xSRE*[ J LAD|VyR ZZ;v~+gs~T^Ho~af8Zts-6 yC'NW V.hkHy7| RS|Z1U^WP\X\\nWd{%ƒtG9rdΛV [%+1rr m1Cv 1>;J/_CUF2t4!w.ͦK腫?BoEd'qkwj{ċ5d33sL@uZL+L_ L&=89yپ|3HezW撵uS8#ƶ=wVK3+TƥS88z;R}WGzTGA'Y}+PUFP)J#HJ'ԝ5+r0qt!Sn>NrmTl;3w Ht@I9 Qgbt!޾̳-Z :j@'ՅeR?>?S29}8sMS}DjF,G+'p_OZFON'U sHvip%Qbe/Go{h[˜|im$WnN>>DJjR3+F۾zg2uh)]ڂ^4Rm'H+8ӃC"wV~BCYCk@7.5mwDx7g^=)i2k\hZ_2d\"C$eHhv)UY|IKg3K(3dcwP 6Kfs3};^H:# t"9iY7nTF"Q|H| oZ"pz:"N& >*{5Z)A 2_ةv]ŢgDk+WGĒI"^/!z291NZc6,]OYGW|!dt<5}BCVH%T28MXqB>1-O"?B juAРm/ 9[ۤaMc=GDhy4uzשZ0OZF<$ܓ \1GuTA*nw[>q\aNr5,v\> stream x[[odq/ ~p qExA0I@74lHx#1.ɪbFr#{vsˇߞ(fJ+lFg7/6=9Bz"7O,ӪsvSZYo VFmN?-F=K7N9*9'?g}iAbhgg6ΚY7XOKl[p5Dh9;i`A2lm|A#= `A,$۸hhAZR AWnd4fvZA5S(e 361L_//O?!MRR g%P'8&l.>~(c813٬b6K |1L$_FO}Rq*w*!dê# +z3KϱHT*# K 8VaXThͱrHUGJz\ z r~I9)60fm_]ېl@TjNZ9|H:RQ%CYF~ֆ$3p2PHޮۨT/%i(HC),2NY ld(p8td;[S umIt^ӰlJ A˜GqŮ\Ԭ\gNfλ0άz֑5 Uň'5 "tf('vV판bj[6\l'gU{!QgΒhj/qFvV !\fG2,wc+UXG|ZOz0L3a(Iڥ(]"58]%P/Q:;}Ƹ$ticqdaRHa&)vS2Nle"<6LJI ai8ĨL+P"[E:"6#ýL[4ܻ[":k^" vA&2O(nh!Lgʜ”z!wR[VQDž)3Et7"/ $ЄU:Nl6!:#<`v)­Ԑ@qA7ZP @Ҙ`s8t%GABAV 8CGCeqRDn!bɘϴd1njҴrփ!dR@ZkE/k`eHY@-3tnH/ۣ$Yl(9+\ דӿ{GZ;PVV4Dr(v~nMuF:ReԒ.P t9 Sq2\DC0|VH:&7UWXuhaY];m7CT3%!H Um30OJpVɸg6Nwغ{Ux*)`qͬ媅f C?}L{dwi4XuaG>9nb/_gYv.W>WB 1Fܸ"-eZWxS 9:MusX=mbg >1E  5𱁅l0U%lC"ÝCiQx:O##!YC_a9Z,S.8ӵl,N~.8>/Cu-Y :Ev%=S%9M>-VZ5gC% 56%~ &ªjաѧߐW ^m7Drb& 8ԇߗÅ:-bf͐vaw\,1ka ̚W0+z(#]Sի/8Óم?U:߀E4["v):iuNj2.J>kgUcY3We&.C.TLWj232]$&NI2jjS"mt(zcf БڲY2Z,t1bT$GYv]:^ziNTֱv=[t:uۅmHJ6 T̗rY$zrijKձ/S#B7$Om/BJKEuoE`M"(Vo_T+\Ắ+ש]@F\}`l(q ]uK?kZ;)Z5e/(~E4zhmn?-&1XT?WevuXVG*egMTyJ"5 䶺EM맏eaf絨g,QDd=4Go_|E^.cyF*O1Mq(z*Ðv]'eE/Ձ"ٸhWX0 mx\F*)ٓ$%HʅoVNW$^t]TA?4>t ;S|FBjEζPin︝'0Suô]vS2Hb0:^WGj wNB3.nNA'9YvS~*k̰z(| MIe@A8Oe))Y:ݦrwQl .\sWU:!-ݟ3M[zi>N؎G߷F{h=9wV 2K'"UQ0wgZz(0[0bqjY|߷a"zjar/X޳.%]Er7Le(^%/w$}ND(5q?W&9UOǖ-KsKk R@_]}iqS 7kv{XXEPybLϞ\-dqH٥FD }w`W㚗U+b>Bi¾*9^.ӤaeBwU]03٬!p5 n9?@I1#$~ؔ"kTKڳKK蜰  k^\w#CpHwXsMEy[Oڞ@EϠj~4aXs,JظIidhQMg,A]__Q[lC|Y%f0֖ 3 3o !SsA!P=(~PQKqwX8R?H ~xþ]Ñk̮_Jޕxf 9t*iZ[eD,y-)Acc/2yJsף{ }Ok,_TyWfr-^{r1 mG.pA!÷SC˞$J§ uli e%䙍DyNjj]Ԥ%>7Y:aOcG~ZRa7?+EDVw!ݛQ:-MW./geam0/b5YscoRңw?a Q {*}ܲ  ?boT+CvY샣7=Law-o y̅]~car=+s,$̐ISi`ީ@=hh3Q@@9U)VMr;V#iu=`̗MV}rwyEi!e> stream x]O10 D'BVUAp$6|/`,@L4[ub,,O\r)x@w|MWBi )(ItUwXI%0`ѴxO%ES&qMsT2Χ_lSendstream endobj 120 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 269 >> stream xcd`ab`ddds 4T~H3a!s<<, w }=D19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU;ML:)槤10002&FF|aӂg2ً/)鮒󂭺hQy?헰ξk<<@/\endstream endobj 121 0 obj << /Filter /FlateDecode /Length 3741 >> stream x\Ks3,UR5hx 0pT!qX)$WҒzcf{$-|P4ݍE+E/{tnl'^4A}z^P{*Ӣ?{| ̢Ӯkn Խ9^,Ey/mל,ZV9]-!#b4on"j#o6 Y͈7iEu +eE*ϖ)a \*a+vRKie 7e$ĢҊV031י-ip8׎fdd3Y#4 wF`:i eiP͛ɞI4J3msQ9zKDw{{2c2d'$"$v+Јv4Z}MZWwQV[ %Ido]LR:}Q!F*4 8@kZb+1:jt}W \+d')]E׼HQi\ƧČiYyX]߻Dj^1Q$#NLvf_ ҽ(y0f)ALk@,OiT$Jei)h#ąu5j՜ U"r?ﺈ0䇯|=EKkb7M{=k~ dw_Ni^ ^p]*࿛Ÿg#t]PY= 9-3 n xSoIڪvZ% g,Gy@}- v,/î-(v1KCɩT5)ĥ̘go<2yP%9cgcbp3{u(XA^bl%8$@6 DvZ,!T V !`[szTqlUre Ւ!\=2dIǩx2+`Ue-!&ŊA010E, T_@qzW[LG~)ߡ짬ygR{t,76dy?bp PA[̇$M)`ʀG^2~n²w89x\BQT;u ) !eiB ~{YCh[fݵex;[.p=dGZLVdOlH>+bT\qv L0l+hY5v= )ہfîa]} Q`@%p̣>FG%p!458 nb膊_Lbc__@¿_7*`hAڡ%i;ԛ%RpXFC0a%f:6]MVp7i϶xf73FS8ei4T̕5㋱I:M0j᭦yM)]\jM~=Bo_)(V7$z?;,qjh&#uڦ[2!cHRlAJ]AɌ23@f +XfǞa& ٟ]&)sU'(2.Nu l87b %°#Z#v -ei"%=ֱy#sHr MB>W R\S"~8MF y ^LuCF$^+;DLb$4sV=["dl!9O$4TgLchB ՚LUQgl5N F9Huv=dQA( 9n`t[Bz*`Cy|%3gmG<%[p:/0mɆ:Px'j G灥_1.b A'wKxӀϦtZ{ N#vaGa]RFglYU3 m`./jt>RKraoO7+x^iwj'.i^Bmvsa | Sv2đ)ζ`0k,938~V*8idnqPuJuFI8Z?p53P #GGa@XG~>wcc@߳s!H]ާ8[1x5OcG.7,&sNa+>O&|F c?@~_@(*298+V{罊$_A^uvОAgXyao<^9AkGIb<"s`;plńO7uḾz mg_.{Zog(Cty+`U W3V;o.-+-qݑ`Ug2d uO:Zt.'u|`oZ916"Q.>o[l`me:Gs\ZwuCV'oSf|Sv38!>f0uO&$s<g8ӭ!8 bw 'ɵНxvGgb!dFRVTq|xaCA^|v b:rU֊޵|l']=#1)_|52\ѾS|6&y1n+{ ㋚~x?^DTTzViEg,w`a̮~ oaU7ޱ_S =ϝR+8v>EB$7;{$E:)9g|68P=}#d=~/BLUIuT+51+8I5 ;U.ך,F6@txbr0fDҍ x71q&*1DCw9whu}^u%ic~ CTj@<Ýw#9)ժŹTT@ٞ{Ԫ89FoV>VY.7c3M뵻_qFOOl`qEKX5k9 :F +j-AM &&ԩ[Ed099x#xJ~1ɏ ud *’Ai[A1t[>^DU'⏻仜rZZ5\NےbOuer)4%Fė $oA<endstream endobj 122 0 obj << /Filter /FlateDecode /Length 2038 >> stream xXnc3?q7A.HYdY H2ıϩKQSa HOw=Nqϋ_q.ZΛP:R5>uWΑ6P=>E?\+}W.MX7]񹋉)YyD`B%śAPI8 Wb`i%L9l>.n: (&XRwj\ƆХhM;]w?w\%)tx}.&3xj8<痫׻ wWúZɆ~vw߬؈3!9"WCpiz 9BJ진ު8S, yDikIQʦq,P^AMjfFܽEqXy(TN-g0%~{Fj4)ޞL&=IR"E:,PU&<5j,PFSFMJ.c]d!JYdșwSgԠAQi6t_l8noaG9-(_dG97{;vZppYeCcoky9H4/[x5@]ul|Nf dap5Z|7T.at;Nc|9׈l,%?dLGɲsSsQzsÈbї2v6vП"`ŨզT1[p9ǤX0ݟ TWY~TUaI4ـ8ar|endstream endobj 123 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1392 >> stream xUTiPGYVakEfG [QJ &BY AEY"DCR! R7&BT}潯$!a$ŤGEzj͐Zp`")Q,Rɘ-l`X,f&CmrfJ|l\*r'!ΫWE]Νߛ MմHI&'$'=5I:~;o?!dMNѥGfFE$$zcAh E( y!ov@!k [/*E3.L+3!RE[,1'>*>+߃f(evx x;ŕAC9Ac2KM82Ne`?LXϰQbÏ2"$ $LݠJQMM|` @ +Hc=>![|;@E`Q)B{cZ| V1]+V3E}~1TsRodv;]ɂj#8G&xSTT+1`*R(Dc }o8a8! iꌪ,bߖc+2#Gni %6ΖΖ=wūxGrp-bn.xLM088D>m+.;Mʾѡ?^TYR!?L #%M 3$,n&PPqʪں;bRϩMiټjx{Rjzed;X`!(+Td4[WTɮUeSse'+s5 kihݍ-8/ޔH1H} Jg|Y=D(#~V@ ֳU)9z})wħa,fHSp)=N" mƅGdۯ'GUfED(yUz!z٢ m>ʡMFf؜(Sy~s3 sm?7p"w;߮[٫i"08*=\z` ,QdPk3ZoefjfVY ZI"_mtPMWvD,86yjq QuR*U%z}i`oJ b+B+9BGg֎endstream endobj 124 0 obj << /Filter /FlateDecode /Length 142 >> stream xE @})u7`cl'VB/1pL13 dLǀi%D ^PXsW AfzvVMNK sC5, a_v#kb ,u@L7Ր}jt.endstream endobj 125 0 obj << /Filter /FlateDecode /Length 1003 >> stream xVMo#E?1'ԃUGhŅe$,c;!8fi?+}gl>WoRw{Zv~Ee7}lݫ *vx'S9풷vayӯQ`gɛ>?dy~D8:Ea fs/xm| 8.t:(;h bt㏫!W'A.ǣ*q4{I8S'~ա9jO &SφTm_Ғ%e2cad@ Vi"̇JT 7nxE9T\lǫk%Ar$E1 q%y5 cd8mO p/8%HQ!eQSs!ckcQmIcn'pN~bW8n}C4 yP_%Za?Հh\L2<$nMH*w 'JULKUKTj[I59W:#IMSt+ے$`Z"W`<%r.Sc/#EOrm^NPM f2!p){(Tsy4?8Om_K;:x`aY\^2UyY<'|PBYVex,,`HeGdd^ tnŴFv7gT*$o?N?[w'8|Da[wR9!mdtNX͛W{L&Xv=sn!]9cts_9f6\0<7o~n2f"mZ7yb2oekeUEO"c-3 hB%ubVVR g]yӄ?̺< dw'=endstream endobj 126 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1025 >> stream xmmLSW N!1ۋL'fZFq@],J)oۮbu `G98gř,îm1wN$ItN;۞n7IIA!D_󣑊BK+s pd-A$_kmR\LtYnf)6qFd5)/f42foI-v3wTi9L%/s&nniKo-l\._bA*M'DQ@"zb`QO$?&v*S򪗣QDIa<,9- %~lD%Z@oa9?T/G`{ɾ],Z-XU---CA Ti lt2Eb=s,+yLs7JN-@%vizPnANoNJců"FX[5^shyچL9 zij^Wڵk3G]PϬBJX}A[4a n?Бׁ0G}@SBLjЪ%;,%)P>$3hBML 4zXWMtd>H|GA681N MaߋL &Ȫvw F [$dOJ* @ /P^K[V:Ӊx]1y"Eo#Qߥp/j*QF DR5KviR4E]b_h 3u;)G)f݆ؔx].m!MS $[p.$ɣdrĿrA#00I'\84o*_N5{B?bYֆ h=GO9xt~> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 128 /ID [<6c162fc8f196369dc25882cc9602fe66><5b9be343a534ccc80fc794e2095be4c0>] >> stream xcb&F~0 $8JP?@6[3(m<@$0DrM! / "e` Rd= )`]`5"1`5@_3DD-LT2? 4X:d1R] endstream endobj startxref 100381 %%EOF ordinal/inst/doc/clmm2_tutorial.R0000644000176200001440000002164713575515125016527 0ustar liggesusers### R code from vignette source 'clmm2_tutorial.Rnw' ################################################### ### code chunk number 1: Initialize ################################################### ## Load common packages, functions and set settings: library(ordinal) library(xtable) ## RUN <- FALSE #redo computations and write .RData files ## Change options: op <- options() ## To be able to reset settings options("digits" = 7) options(help_type = "html") ## options("width" = 75) options("SweaveHooks" = list(fig=function() par(mar=c(4,4,.5,0)+.5))) options(continue=" ") ################################################### ### code chunk number 2: clmm2_tutorial.Rnw:152-155 ################################################### data(wine) head(wine) str(wine) ################################################### ### code chunk number 3: clmm2_tutorial.Rnw:176-190 ################################################### data(wine) temp.contact.bottle <- with(wine, temp:contact:bottle)[drop=TRUE] tab <- xtabs(as.numeric(rating) ~ temp.contact.bottle + judge, data=wine) class(tab) <- "matrix" attr(tab, "call") <- NULL mat <- cbind(rep(c("cold", "warm"), each = 4), rep(rep(c("no", "yes"), each=2), 2), 1:8, tab) colnames(mat) <- c("Temperature", "Contact", "Bottle", 1:9) xtab <- xtable(mat) print(xtab, only.contents=TRUE, include.rownames=FALSE, sanitize.text.function = function(x) x) ################################################### ### code chunk number 4: clmm2_tutorial.Rnw:217-219 ################################################### fm1 <- clmm2(rating ~ temp + contact, random=judge, data=wine) fm1 ################################################### ### code chunk number 5: clmm2_tutorial.Rnw:226-229 ################################################### fm2 <- clmm2(rating ~ temp + contact, random=judge, data=wine, Hess=TRUE, nAGQ=10) summary(fm2) ################################################### ### code chunk number 6: clmm2_tutorial.Rnw:265-266 ################################################### exp(coef(fm2)[5]) ################################################### ### code chunk number 7: clmm2_tutorial.Rnw:274-276 ################################################### fm3 <- clmm2(rating ~ temp, random=judge, data=wine, nAGQ=10) anova(fm3, fm2) ################################################### ### code chunk number 8: clmm2_tutorial.Rnw:282-284 ################################################### fm4 <- clm2(rating ~ temp + contact, data=wine) anova(fm4, fm2) ################################################### ### code chunk number 9: clmm2_tutorial.Rnw:295-297 ################################################### pr2 <- profile(fm2, range=c(.1, 4), nSteps=30, trace=0) confint(pr2) ################################################### ### code chunk number 10: profilePlot ################################################### getOption("SweaveHooks")[["fig"]]() plot(pr2) ################################################### ### code chunk number 11: profileFig ################################################### getOption("SweaveHooks")[["fig"]]() plot(pr2) ################################################### ### code chunk number 12: ranefPlot ################################################### getOption("SweaveHooks")[["fig"]]() ci <- fm2$ranef + qnorm(0.975) * sqrt(fm2$condVar) %o% c(-1, 1) ord.re <- order(fm2$ranef) ci <- ci[order(fm2$ranef),] plot(1:9, fm2$ranef[ord.re], axes=FALSE, ylim=range(ci), xlab="Judge", ylab="Judge effect") axis(1, at=1:9, labels = ord.re) axis(2) for(i in 1:9) segments(i, ci[i,1], i, ci[i, 2]) abline(h = 0, lty=2) ################################################### ### code chunk number 13: clmm2_tutorial.Rnw:348-349 ################################################### getOption("SweaveHooks")[["fig"]]() ci <- fm2$ranef + qnorm(0.975) * sqrt(fm2$condVar) %o% c(-1, 1) ord.re <- order(fm2$ranef) ci <- ci[order(fm2$ranef),] plot(1:9, fm2$ranef[ord.re], axes=FALSE, ylim=range(ci), xlab="Judge", ylab="Judge effect") axis(1, at=1:9, labels = ord.re) axis(2) for(i in 1:9) segments(i, ci[i,1], i, ci[i, 2]) abline(h = 0, lty=2) ################################################### ### code chunk number 14: clmm2_tutorial.Rnw:361-362 ################################################### head(cbind(wine, fitted(fm2))) ################################################### ### code chunk number 15: clmm2_tutorial.Rnw:367-368 ################################################### head(cbind(wine, pred=predict(fm2, newdata=wine))) ################################################### ### code chunk number 16: clmm2_tutorial.Rnw:386-388 ################################################### plogis(fm2$Theta[3] - fm2$beta[2]) - plogis(fm2$Theta[2] - fm2$beta[2]) ################################################### ### code chunk number 17: clmm2_tutorial.Rnw:396-397 ################################################### qnorm(0.95) * c(-1, 1) * fm2$stDev ################################################### ### code chunk number 18: clmm2_tutorial.Rnw:402-410 ################################################### pred <- function(eta, theta, cat = 1:(length(theta)+1), inv.link = plogis) { Theta <- c(-1e3, theta, 1e3) sapply(cat, function(j) inv.link(Theta[j+1] - eta) - inv.link(Theta[j] - eta) ) } pred(qnorm(0.05) * fm2$stDev, fm2$Theta) ################################################### ### code chunk number 19: clmm2_tutorial.Rnw:416-434 ################################################### mat <- expand.grid(judge = qnorm(0.95) * c(-1, 0, 1) * fm2$stDev, contact = c(0, fm2$beta[2]), temp = c(0, fm2$beta[1])) pred.mat <- pred(eta=rowSums(mat), theta=fm2$Theta) lab <- paste("contact=", rep(levels(wine$contact), 2), ", ", "temp=", rep(levels(wine$temp), each=2), sep="") par(mfrow=c(2, 2)) for(k in c(1, 4, 7, 10)) { plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") } ################################################### ### code chunk number 20: clmm2_tutorial.Rnw:439-449 ################################################### getOption("SweaveHooks")[["fig"]]() k <- 1 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") ################################################### ### code chunk number 21: clmm2_tutorial.Rnw:451-461 ################################################### getOption("SweaveHooks")[["fig"]]() k <- 4 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") ################################################### ### code chunk number 22: clmm2_tutorial.Rnw:463-473 ################################################### getOption("SweaveHooks")[["fig"]]() k <- 7 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") ################################################### ### code chunk number 23: clmm2_tutorial.Rnw:475-485 ################################################### getOption("SweaveHooks")[["fig"]]() k <- 10 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") ################################################### ### code chunk number 24: clmm2_tutorial.Rnw:495-496 ################################################### exp(2*qnorm(0.95) * fm2$stDev) ################################################### ### code chunk number 25: clmm2_tutorial.Rnw:502-503 ################################################### exp(2*qnorm(0.75) * fm2$stDev) ################################################### ### code chunk number 26: misc (eval = FALSE) ################################################### ## ordinal/inst/CITATION0000755000176200001440000000111613574245411014024 0ustar liggesuserscitHeader("To cite the ordinal-package in publications use:") year <- sub(".*(2[[:digit:]]{3})-.*", "\\1", meta$Date) vers <- paste0("R package version ", meta$Version, ". https://CRAN.R-project.org/package=ordinal") citEntry(entry="misc", title = "ordinal---Regression Models for Ordinal Data ", author = personList(as.person("R. H. B. Christensen")), year = year, note = vers, textVersion = paste("Christensen, R. H. B. (", year, "). ordinal - Regression Models for Ordinal Data. ", vers, ".", sep="") )